get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83976/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83976,
    "url": "http://patches.dpdk.org/api/patches/83976/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201111064936.768604-37-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111064936.768604-37-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201111064936.768604-37-jiawenwu@trustnetic.com",
    "date": "2020-11-11T06:49:35",
    "name": "[v2,36/37] net/txgbe: add security offload in Rx and Tx process",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "9a3d9d3f03ca79819c6a8c0d836764152da77dca",
    "submitter": {
        "id": 1932,
        "url": "http://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201111064936.768604-37-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 13798,
            "url": "http://patches.dpdk.org/api/series/13798/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13798",
            "date": "2020-11-11T06:49:00",
            "name": "net: add txgbe PMD part 2",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/13798/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83976/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/83976/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DD88AA09D2;\n\tWed, 11 Nov 2020 07:59:32 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5D2F5C864;\n\tWed, 11 Nov 2020 07:48:29 +0100 (CET)",
            "from smtpbg511.qq.com (smtpbg511.qq.com [203.205.250.109])\n by dpdk.org (Postfix) with ESMTP id 6BF22C325\n for <dev@dpdk.org>; Wed, 11 Nov 2020 07:48:11 +0100 (CET)",
            "from localhost.localdomain.com (unknown [183.129.236.74])\n by esmtp10.qq.com (ESMTP) with\n id ; Wed, 11 Nov 2020 14:48:07 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp27t1605077287tq5dk59m",
        "X-QQ-SSF": "01400000000000C0C000B00A0000000",
        "X-QQ-FEAT": "tOSmGYdYic2Wbl2cx77VSAl2oz0a7AseNOw5TYEaeifHfQCVSehEmdXceRi6v\n q7ZreJ6oFUVma96x0qhKZgYnUrTAOcueJjY0Ph7E5jRWb1qJQm2YYdCNq1QsO6WnwVBguTq\n upFe9AwQb3p4FggwkmCHmj+iqmbV6AKKwtSbLED71fXEtzMfktxs+u/yzqJBGkZ4TtGGuJ4\n MP2sk2sKX7lUEzQqi+k78ESxRLuzt8owOSDbzNJXAfMYk2fBaa3W2P2O9+/m8RjZCjkra6t\n PePilKwS4OfdXspmyEbWCH1HYg335D2mHeZpLFbna14W11kr8epHFcLCSd/pvUNkdfBQujk\n 4fuejZRp49uTFgXKL59Pqdwpxdo1A==",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Wed, 11 Nov 2020 14:49:35 +0800",
        "Message-Id": "<20201111064936.768604-37-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.18.4",
        "In-Reply-To": "<20201111064936.768604-1-jiawenwu@trustnetic.com>",
        "References": "<20201111064936.768604-1-jiawenwu@trustnetic.com>",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign5",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v2 36/37] net/txgbe: add security offload in Rx\n\tand Tx process",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add security offload in Rx and Tx process.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/txgbe/txgbe_ipsec.c | 106 ++++++++++++++++++++++++++++++++\n drivers/net/txgbe/txgbe_ipsec.h |   1 +\n drivers/net/txgbe/txgbe_rxtx.c  |  93 +++++++++++++++++++++++++++-\n drivers/net/txgbe/txgbe_rxtx.h  |  13 ++++\n 4 files changed, 211 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c\nindex 0bdd1c061..f8c54f3d4 100644\n--- a/drivers/net/txgbe/txgbe_ipsec.c\n+++ b/drivers/net/txgbe/txgbe_ipsec.c\n@@ -19,6 +19,55 @@\n \t(a).ipv6[2] == (b).ipv6[2] && \\\n \t(a).ipv6[3] == (b).ipv6[3])\n \n+static void\n+txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)\n+{\n+\tstruct txgbe_hw *hw = TXGBE_DEV_HW(dev);\n+\tstruct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);\n+\tint i = 0;\n+\n+\t/* clear Rx IP table*/\n+\tfor (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {\n+\t\tuint16_t index = i << 3;\n+\t\tuint32_t reg_val = TXGBE_IPSRXIDX_WRITE |\n+\t\t\t\tTXGBE_IPSRXIDX_TB_IP | index;\n+\t\twr32(hw, TXGBE_IPSRXADDR(0), 0);\n+\t\twr32(hw, TXGBE_IPSRXADDR(1), 0);\n+\t\twr32(hw, TXGBE_IPSRXADDR(2), 0);\n+\t\twr32(hw, TXGBE_IPSRXADDR(3), 0);\n+\t\twr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);\n+\t}\n+\n+\t/* clear Rx SPI and Rx/Tx SA tables*/\n+\tfor (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {\n+\t\tuint32_t index = i << 3;\n+\t\tuint32_t reg_val = TXGBE_IPSRXIDX_WRITE |\n+\t\t\t\tTXGBE_IPSRXIDX_TB_SPI | index;\n+\t\twr32(hw, TXGBE_IPSRXSPI, 0);\n+\t\twr32(hw, TXGBE_IPSRXADDRIDX, 0);\n+\t\twr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);\n+\t\treg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;\n+\t\twr32(hw, TXGBE_IPSRXKEY(0), 0);\n+\t\twr32(hw, TXGBE_IPSRXKEY(1), 0);\n+\t\twr32(hw, TXGBE_IPSRXKEY(2), 0);\n+\t\twr32(hw, TXGBE_IPSRXKEY(3), 0);\n+\t\twr32(hw, TXGBE_IPSRXSALT, 0);\n+\t\twr32(hw, TXGBE_IPSRXMODE, 0);\n+\t\twr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);\n+\t\treg_val = TXGBE_IPSTXIDX_WRITE | index;\n+\t\twr32(hw, TXGBE_IPSTXKEY(0), 0);\n+\t\twr32(hw, TXGBE_IPSTXKEY(1), 0);\n+\t\twr32(hw, TXGBE_IPSTXKEY(2), 0);\n+\t\twr32(hw, TXGBE_IPSTXKEY(3), 0);\n+\t\twr32(hw, TXGBE_IPSTXSALT, 0);\n+\t\twr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);\n+\t}\n+\n+\tmemset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));\n+\tmemset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));\n+\tmemset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));\n+}\n+\n static int\n txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)\n {\n@@ -552,6 +601,63 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)\n \treturn txgbe_security_capabilities;\n }\n \n+int\n+txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)\n+{\n+\tstruct txgbe_hw *hw = TXGBE_DEV_HW(dev);\n+\tuint32_t reg;\n+\tuint64_t rx_offloads;\n+\tuint64_t tx_offloads;\n+\n+\trx_offloads = dev->data->dev_conf.rxmode.offloads;\n+\ttx_offloads = dev->data->dev_conf.txmode.offloads;\n+\n+\t/* sanity checks */\n+\tif (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {\n+\t\tPMD_DRV_LOG(ERR, \"RSC and IPsec not supported\");\n+\t\treturn -1;\n+\t}\n+\tif (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {\n+\t\tPMD_DRV_LOG(ERR, \"HW CRC strip needs to be enabled for IPsec\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/\n+\twr32(hw, TXGBE_SECTXBUFAF, 0x14);\n+\n+\t/* IFG needs to be set to 3 when we are using security. Otherwise a Tx\n+\t * hang will occur with heavy traffic.\n+\t */\n+\treg = rd32(hw, TXGBE_SECTXIFG);\n+\treg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);\n+\twr32(hw, TXGBE_SECTXIFG, reg);\n+\n+\treg = rd32(hw, TXGBE_SECRXCTL);\n+\treg |= TXGBE_SECRXCTL_CRCSTRIP;\n+\twr32(hw, TXGBE_SECRXCTL, reg);\n+\n+\tif (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {\n+\t\twr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);\n+\t\treg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);\n+\t\tif (reg != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Error enabling Rx Crypto\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\tif (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {\n+\t\twr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);\n+\t\treg = rd32(hw, TXGBE_SECTXCTL);\n+\t\tif (reg != TXGBE_SECTXCTL_STFWD) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Error enabling Rx Crypto\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\ttxgbe_crypto_clear_ipsec_tables(dev);\n+\n+\treturn 0;\n+}\n+\n static struct rte_security_ops txgbe_security_ops = {\n \t.session_create = txgbe_crypto_create_session,\n \t.session_get_size = txgbe_crypto_session_get_size,\ndiff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h\nindex d022a255f..54c27d8ce 100644\n--- a/drivers/net/txgbe/txgbe_ipsec.h\n+++ b/drivers/net/txgbe/txgbe_ipsec.h\n@@ -90,5 +90,6 @@ struct txgbe_ipsec {\n };\n \n int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);\n+int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);\n \n #endif /*TXGBE_IPSEC_H_*/\ndiff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c\nindex 857feba9b..2111a4850 100644\n--- a/drivers/net/txgbe/txgbe_rxtx.c\n+++ b/drivers/net/txgbe/txgbe_rxtx.c\n@@ -20,6 +20,7 @@\n #include <rte_debug.h>\n #include <rte_ethdev.h>\n #include <rte_ethdev_driver.h>\n+#include <rte_security_driver.h>\n #include <rte_memzone.h>\n #include <rte_atomic.h>\n #include <rte_mempool.h>\n@@ -60,6 +61,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |\n \t\tPKT_TX_MACSEC |\n #endif\n \t\tPKT_TX_OUTER_IP_CKSUM |\n+#ifdef RTE_LIB_SECURITY\n+\t\tPKT_TX_SEC_OFFLOAD |\n+#endif\n \t\tTXGBE_TX_IEEE1588_TMST);\n \n #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \\\n@@ -314,7 +318,8 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n static inline void\n txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,\n \t\tvolatile struct txgbe_tx_ctx_desc *ctx_txd,\n-\t\tuint64_t ol_flags, union txgbe_tx_offload tx_offload)\n+\t\tuint64_t ol_flags, union txgbe_tx_offload tx_offload,\n+\t\t__rte_unused uint64_t *mdata)\n {\n \tunion txgbe_tx_offload tx_offload_mask;\n \tuint32_t type_tucmd_mlhl;\n@@ -408,6 +413,19 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,\n \t\tvlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);\n \t}\n \n+#ifdef RTE_LIB_SECURITY\n+\tif (ol_flags & PKT_TX_SEC_OFFLOAD) {\n+\t\tunion txgbe_crypto_tx_desc_md *md =\n+\t\t\t\t(union txgbe_crypto_tx_desc_md *)mdata;\n+\t\ttunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);\n+\t\ttype_tucmd_mlhl |= md->enc ?\n+\t\t\t(TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;\n+\t\ttype_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);\n+\t\ttx_offload_mask.sa_idx |= ~0;\n+\t\ttx_offload_mask.sec_pad_len |= ~0;\n+\t}\n+#endif\n+\n \ttxq->ctx_cache[ctx_idx].flags = ol_flags;\n \ttxq->ctx_cache[ctx_idx].tx_offload.data[0] =\n \t\ttx_offload_mask.data[0] & tx_offload.data[0];\n@@ -704,6 +722,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint32_t ctx = 0;\n \tuint32_t new_ctx;\n \tunion txgbe_tx_offload tx_offload;\n+#ifdef RTE_LIB_SECURITY\n+\tuint8_t use_ipsec;\n+#endif\n \n \ttx_offload.data[0] = 0;\n \ttx_offload.data[1] = 0;\n@@ -730,6 +751,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t * are needed for offload functionality.\n \t\t */\n \t\tol_flags = tx_pkt->ol_flags;\n+#ifdef RTE_LIB_SECURITY\n+\t\tuse_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);\n+#endif\n \n \t\t/* If hardware offload required */\n \t\ttx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;\n@@ -745,6 +769,16 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\ttx_offload.outer_l3_len = tx_pkt->outer_l3_len;\n \t\t\ttx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);\n \n+#ifdef RTE_LIB_SECURITY\n+\t\t\tif (use_ipsec) {\n+\t\t\t\tunion txgbe_crypto_tx_desc_md *ipsec_mdata =\n+\t\t\t\t\t(union txgbe_crypto_tx_desc_md *)\n+\t\t\t\t\t\trte_security_dynfield(tx_pkt);\n+\t\t\t\ttx_offload.sa_idx = ipsec_mdata->sa_idx;\n+\t\t\t\ttx_offload.sec_pad_len = ipsec_mdata->pad_len;\n+\t\t\t}\n+#endif\n+\n \t\t\t/* If new context need be built or reuse the exist ctx*/\n \t\t\tctx = what_ctx_update(txq, tx_ol_req, tx_offload);\n \t\t\t/* Only allocate context descriptor if required */\n@@ -898,7 +932,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t}\n \n \t\t\t\ttxgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,\n-\t\t\t\t\ttx_offload);\n+\t\t\t\t\ttx_offload,\n+\t\t\t\t\trte_security_dynfield(tx_pkt));\n \n \t\t\t\ttxe->last_id = tx_last;\n \t\t\t\ttx_id = txe->next_id;\n@@ -917,6 +952,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t}\n \n \t\tolinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);\n+#ifdef RTE_LIB_SECURITY\n+\t\tif (use_ipsec)\n+\t\t\tolinfo_status |= TXGBE_TXD_IPSEC;\n+#endif\n \n \t\tm_seg = tx_pkt;\n \t\tdo {\n@@ -1101,6 +1140,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)\n \t\tpkt_flags |= PKT_RX_EIP_CKSUM_BAD;\n \t}\n \n+#ifdef RTE_LIB_SECURITY\n+\tif (rx_status & TXGBE_RXD_STAT_SECP) {\n+\t\tpkt_flags |= PKT_RX_SEC_OFFLOAD;\n+\t\tif (rx_status & TXGBE_RXD_ERR_SECERR)\n+\t\t\tpkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;\n+\t}\n+#endif\n+\n \treturn pkt_flags;\n }\n \n@@ -1929,6 +1976,11 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)\n \n \toffloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;\n \n+#ifdef RTE_LIB_SECURITY\n+\tif (dev->security_ctx)\n+\t\toffloads |= DEV_RX_OFFLOAD_SECURITY;\n+#endif\n+\n \treturn offloads;\n }\n \n@@ -2030,6 +2082,9 @@ txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)\n {\n \tstruct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;\n \tif (txq->offloads == 0 &&\n+#ifdef RTE_LIB_SECURITY\n+\t\t!(txq->using_ipsec) &&\n+#endif\n \t\ttxq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)\n \t\treturn txgbe_tx_done_cleanup_simple(txq, free_cnt);\n \n@@ -2113,6 +2168,9 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)\n {\n \t/* Use a simple Tx queue (no offloads, no multi segs) if possible */\n \tif (txq->offloads == 0 &&\n+#ifdef RTE_LIB_SECURITY\n+\t\t\t!(txq->using_ipsec) &&\n+#endif\n \t\t\ttxq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {\n \t\tPMD_INIT_LOG(DEBUG, \"Using simple tx code path\");\n \t\tdev->tx_pkt_burst = txgbe_xmit_pkts_simple;\n@@ -2167,6 +2225,10 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)\n \n \ttx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;\n \n+#ifdef RTE_LIB_SECURITY\n+\tif (dev->security_ctx)\n+\t\ttx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;\n+#endif\n \treturn tx_offload_capa;\n }\n \n@@ -2265,6 +2327,10 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->offloads = offloads;\n \ttxq->ops = &def_txq_ops;\n \ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+#ifdef RTE_LIB_SECURITY\n+\ttxq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &\n+\t\t\tDEV_TX_OFFLOAD_SECURITY);\n+#endif\n \n \t/* Modification to set tail pointer for virtual function\n \t * if vf is detected.\n@@ -4065,6 +4131,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev)\n void __rte_cold\n txgbe_set_rx_function(struct rte_eth_dev *dev)\n {\n+\tuint16_t i;\n \tstruct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);\n \n \t/*\n@@ -4125,6 +4192,15 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)\n \n \t\tdev->rx_pkt_burst = txgbe_recv_pkts;\n \t}\n+\n+#ifdef RTE_LIB_SECURITY\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct txgbe_rx_queue *rxq = dev->data->rx_queues[i];\n+\n+\t\trxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &\n+\t\t\t\tDEV_RX_OFFLOAD_SECURITY);\n+\t}\n+#endif\n }\n \n /*\n@@ -4395,6 +4471,19 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)\n \t    dev->data->dev_conf.lpbk_mode)\n \t\ttxgbe_setup_loopback_link_raptor(hw);\n \n+#ifdef RTE_LIB_SECURITY\n+\tif ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||\n+\t    (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {\n+\t\tret = txgbe_crypto_enable_ipsec(dev);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"txgbe_crypto_enable_ipsec fails with %d.\",\n+\t\t\t\t    ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+#endif\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h\nindex 6e0e86ce5..203bdeb88 100644\n--- a/drivers/net/txgbe/txgbe_rxtx.h\n+++ b/drivers/net/txgbe/txgbe_rxtx.h\n@@ -293,6 +293,10 @@ struct txgbe_rx_queue {\n \tuint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */\n \tuint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */\n \tuint16_t rx_free_trigger; /**< triggers rx buffer allocation */\n+#ifdef RTE_LIB_SECURITY\n+\tuint8_t            using_ipsec;\n+\t/**< indicates that IPsec RX feature is in use */\n+#endif\n \tuint16_t            rx_free_thresh; /**< max free RX desc to hold. */\n \tuint16_t            queue_id; /**< RX queue index. */\n \tuint16_t            reg_idx;  /**< RX queue register index. */\n@@ -336,6 +340,11 @@ union txgbe_tx_offload {\n \t\tuint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */\n \t\tuint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */\n \t\tuint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */\n+#ifdef RTE_LIB_SECURITY\n+\t\t/* inline ipsec related*/\n+\t\tuint64_t sa_idx:8;\t/**< TX SA database entry index */\n+\t\tuint64_t sec_pad_len:4;\t/**< padding length */\n+#endif\n \t};\n };\n \n@@ -388,6 +397,10 @@ struct txgbe_tx_queue {\n \tstruct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];\n \tconst struct txgbe_txq_ops *ops;       /**< txq ops */\n \tuint8_t             tx_deferred_start; /**< not in global dev start. */\n+#ifdef RTE_LIB_SECURITY\n+\tuint8_t\t\t    using_ipsec;\n+\t/**< indicates that IPsec TX feature is in use */\n+#endif\n };\n \n struct txgbe_txq_ops {\n",
    "prefixes": [
        "v2",
        "36/37"
    ]
}