get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97742/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97742,
    "url": "http://patches.dpdk.org/api/patches/97742/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210902021505.17607-17-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210902021505.17607-17-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210902021505.17607-17-ndabilpuram@marvell.com",
    "date": "2021-09-02T02:14:54",
    "name": "[16/27] net/cnxk: add inline security support for cn10k",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c2fb9b100ff85407f8e5c117f6caa6ddfda32ee6",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210902021505.17607-17-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 18612,
            "url": "http://patches.dpdk.org/api/series/18612/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18612",
            "date": "2021-09-02T02:14:38",
            "name": "net/cnxk: support for inline ipsec",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/18612/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/97742/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/97742/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C35C4A0C4C;\n\tThu,  2 Sep 2021 04:18:34 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C3BD641183;\n\tThu,  2 Sep 2021 04:17:35 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id A2FFD40698\n for <dev@dpdk.org>; Thu,  2 Sep 2021 04:17:34 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with SMTP id\n 181HQCpp011801;\n Wed, 1 Sep 2021 19:17:34 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com with ESMTP id 3atdwq9htp-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Wed, 01 Sep 2021 19:17:33 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Wed, 1 Sep 2021 19:17:31 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Wed, 1 Sep 2021 19:17:31 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id 8AEBB3F7050;\n Wed,  1 Sep 2021 19:17:28 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=9DTCrhW2O6z5G/cXb1TSwRdqvaYX04Pg8ExMrBrSLgw=;\n b=JT5a+0tOUhgbzayR+/LEiaoUYjEsPkZUJ+1UsS2f0dhbsV1/q2Qn+jpxWXPgS7CvRGLf\n 1FC3/zj7/trr1ovgFmD63cLqK6ov6WyXtOEk0fyFais6A4cNbRg8m4qetwTIF80ZgmX0\n QCjcyGN7WYfjW2TfU9EgThPbErRnzy4F9W8DIOCZ1CAvtf+DQT5OhfmUi469eq3J3zEa\n MXZae/PKJR+ve+p6bF78vXdXW0WQiBKHx4e+M+ZlZXyfeZq1sIQSRAxun7AJiobzs2+J\n lc/SfmP4oel+mYzk42+FvLJQ3CwVByiaoRGU+e9/XJXISL0gmU4o4++HyhqJO4MzFxdI wQ==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "Nithin Dabilpuram <ndabilpuram@marvell.com>, Kiran Kumar K\n <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>, Satha Rao\n <skoteshwar@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,\n \"Shijith Thotton\" <sthotton@marvell.com>, Anatoly Burakov\n <anatoly.burakov@intel.com>",
        "CC": "<jerinj@marvell.com>, <schalla@marvell.com>, <dev@dpdk.org>",
        "Date": "Thu, 2 Sep 2021 07:44:54 +0530",
        "Message-ID": "<20210902021505.17607-17-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20210902021505.17607-1-ndabilpuram@marvell.com>",
        "References": "<20210902021505.17607-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "dzPfWECBRdulXd0dGXIFPnpLhwaIvtI1",
        "X-Proofpoint-GUID": "dzPfWECBRdulXd0dGXIFPnpLhwaIvtI1",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.182.1,Aquarius:18.0.790,Hydra:6.0.391,FMLib:17.0.607.475\n definitions=2021-09-01_05,2021-09-01_01,2020-04-07_01",
        "Subject": "[dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for\n cn10k",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for inline inbound and outbound IPSec for SA create,\ndestroy and other NIX / CPT LF configurations.\n\nThis patch also changes dpdk-devbind.py to list new inline\ndevice as misc device.\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\n---\n doc/guides/nics/cnxk.rst                 | 102 ++++++++\n drivers/event/cnxk/cnxk_eventdev_adptr.c |  36 ++-\n drivers/net/cnxk/cn10k_ethdev.c          |  36 ++-\n drivers/net/cnxk/cn10k_ethdev.h          |  43 ++++\n drivers/net/cnxk/cn10k_ethdev_sec.c      | 426 +++++++++++++++++++++++++++++++\n drivers/net/cnxk/cn10k_rx.h              |   1 +\n drivers/net/cnxk/cn10k_tx.h              |   1 +\n drivers/net/cnxk/meson.build             |   1 +\n usertools/dpdk-devbind.py                |   8 +-\n 9 files changed, 649 insertions(+), 5 deletions(-)\n create mode 100644 drivers/net/cnxk/cn10k_ethdev_sec.c",
    "diff": "diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst\nindex 90d27db..b542437 100644\n--- a/doc/guides/nics/cnxk.rst\n+++ b/doc/guides/nics/cnxk.rst\n@@ -34,6 +34,7 @@ Features of the CNXK Ethdev PMD are:\n - Vector Poll mode driver\n - Debug utilities - Context dump and error interrupt support\n - Support Rx interrupt\n+- Inline IPsec processing support\n \n Prerequisites\n -------------\n@@ -185,6 +186,74 @@ Runtime Config Options\n \n       -a 0002:02:00.0,tag_as_xor=1\n \n+- ``Max SPI for inbound inline IPsec`` (default ``255``)\n+\n+   Max SPI supported for inbound inline IPsec processing can be specified by\n+   ``ipsec_in_max_spi`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,ipsec_in_max_spi=128\n+\n+   With the above configuration, application can enable inline IPsec processing\n+   for 128 inbound SAs (SPI 0-127).\n+\n+- ``Max SA's for outbound inline IPsec`` (default ``4096``)\n+\n+   Max number of SA's supported for outbound inline IPsec processing can be\n+   specified by ``ipsec_out_max_sa`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,ipsec_out_max_sa=128\n+\n+   With the above configuration, application can enable inline IPsec processing\n+   for 128 outbound SAs.\n+\n+- ``Outbound CPT LF queue size`` (default ``8200``)\n+\n+   Size of Outbound CPT LF queue in number of descriptors can be specified by\n+   ``outb_nb_desc`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,outb_nb_desc=16384\n+\n+    With the above configuration, Outbound CPT LF will be created to accommodate\n+    at max 16384 descriptors at any given time.\n+\n+- ``Outbound CPT LF count`` (default ``1``)\n+\n+   Number of CPT LF's to attach for Outbound processing can be specified by\n+   ``outb_nb_crypto_qs`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,outb_nb_crypto_qs=2\n+\n+   With the above confiuration, two CPT LF's are setup and distributed among\n+   all the Tx queues for outbound processing.\n+\n+- ``Force using inline ipsec device for inbound`` (default ``0``)\n+\n+   In CN10K, in event mode, driver can work in two modes,\n+\n+   1. Inbound encrypted traffic received by probed ipsec inline device while\n+      plain traffic post decryption is received by ethdev.\n+\n+   2. Both Inbound encrypted traffic and plain traffic post decryption are\n+      received by ethdev.\n+\n+   By default event mode works without using inline device i.e mode ``2``.\n+   This behaviour can be changed to pick mode ``1`` by using\n+   ``force_inb_inl_dev`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,force_inb_inl_dev=1 -a 0002:03:00.0,force_inb_inl_dev=1\n+\n+   With the above configuration, inbound encrypted traffic from both the ports\n+   is received by ipsec inline device.\n \n .. note::\n \n@@ -250,6 +319,39 @@ Example usage in testpmd::\n    testpmd> flow create 0 ingress pattern eth / raw relative is 0 pattern \\\n           spec ab pattern mask ab offset is 4 / end actions queue index 1 / end\n \n+Inline device support for CN10K\n+-------------------------------\n+\n+CN10K HW provides a misc device Inline device that supports ethernet devices in\n+providing following features.\n+\n+  - Aggregate all the inline IPsec inbound traffic from all the CN10K ethernet\n+    devices to be processed by the single inline IPSec device. This allows\n+    single rte security session to accept traffic from multiple ports.\n+\n+  - Support for event generation on outbound inline IPsec processing errors.\n+\n+  - Support CN106xx poll mode of operation for inline IPSec inbound processing.\n+\n+Inline IPsec device is identified by PCI PF vendid:devid ``177D:A0F0`` or\n+VF ``177D:A0F1``.\n+\n+Runtime Config Options for inline device\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+- ``Max SPI for inbound inline IPsec`` (default ``255``)\n+\n+   Max SPI supported for inbound inline IPsec processing can be specified by\n+   ``ipsec_in_max_spi`` ``devargs`` parameter.\n+\n+   For example::\n+\n+      -a 0002:1d:00.0,ipsec_in_max_spi=128\n+\n+   With the above configuration, application can enable inline IPsec processing\n+   for 128 inbound SAs (SPI 0-127) for traffic aggregated on inline device.\n+\n+\n Debugging Options\n -----------------\n \ndiff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c\nindex baf2f2a..a34efbb 100644\n--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c\n+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c\n@@ -123,7 +123,9 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,\n \t\t    uint16_t port_id, const struct rte_event *ev,\n \t\t    uint8_t custom_flowid)\n {\n+\tstruct roc_nix *nix = &cnxk_eth_dev->nix;\n \tstruct roc_nix_rq *rq;\n+\tint rc;\n \n \trq = &cnxk_eth_dev->rqs[rq_id];\n \trq->sso_ena = 1;\n@@ -140,7 +142,24 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,\n \t\trq->tag_mask |= ev->flow_id;\n \t}\n \n-\treturn roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);\n+\trc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (rq_id == 0 && roc_nix_inl_inb_is_enabled(nix)) {\n+\t\tuint32_t sec_tag_const;\n+\n+\t\t/* IPSec tag const is 8-bit left shifted value of tag_mask\n+\t\t * as it applies to bit 32:8 of tag only.\n+\t\t */\n+\t\tsec_tag_const = rq->tag_mask >> 8;\n+\t\trc = roc_nix_inl_inb_tag_update(nix, sec_tag_const,\n+\t\t\t\t\t\tev->sched_type);\n+\t\tif (rc)\n+\t\t\tplt_err(\"Failed to set tag conf for ipsec, rc=%d\", rc);\n+\t}\n+\n+\treturn rc;\n }\n \n static int\n@@ -186,6 +205,7 @@ cnxk_sso_rx_adapter_queue_add(\n \t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n \t\t\t\t      rxq_sp->qconf.mp->pool_id, true,\n \t\t\t\t      dev->force_ena_bp);\n+\t\tcnxk_eth_dev->nb_rxq_sso++;\n \t}\n \n \tif (rc < 0) {\n@@ -196,6 +216,14 @@ cnxk_sso_rx_adapter_queue_add(\n \n \tdev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;\n \n+\t/* Switch to use PF/VF's NIX LF instead of inline device for inbound\n+\t * when all the RQ's are switched to event dev mode. We do this only\n+\t * when using inline device is not forced by dev args.\n+\t */\n+\tif (!cnxk_eth_dev->inb.force_inl_dev &&\n+\t    cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)\n+\t\tcnxk_nix_inb_mode_set(cnxk_eth_dev, false);\n+\n \treturn 0;\n }\n \n@@ -220,12 +248,18 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,\n \t\trox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,\n \t\t\t\t      rxq_sp->qconf.mp->pool_id, false,\n \t\t\t\t      dev->force_ena_bp);\n+\t\tcnxk_eth_dev->nb_rxq_sso--;\n \t}\n \n \tif (rc < 0)\n \t\tplt_err(\"Failed to clear Rx adapter config port=%d, q=%d\",\n \t\t\teth_dev->data->port_id, rx_queue_id);\n \n+\t/* Removing RQ from Rx adapter implies need to use\n+\t * inline device for CQ/Poll mode.\n+\t */\n+\tcnxk_nix_inb_mode_set(cnxk_eth_dev, true);\n+\n \treturn rc;\n }\n \ndiff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c\nindex 7caec6c..fa2343c 100644\n--- a/drivers/net/cnxk/cn10k_ethdev.c\n+++ b/drivers/net/cnxk/cn10k_ethdev.c\n@@ -36,6 +36,9 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)\n \tif (!dev->ptype_disable)\n \t\tflags |= NIX_RX_OFFLOAD_PTYPE_F;\n \n+\tif (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)\n+\t\tflags |= NIX_RX_OFFLOAD_SECURITY_F;\n+\n \treturn flags;\n }\n \n@@ -101,6 +104,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)\n \tif ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))\n \t\tflags |= NIX_TX_OFFLOAD_TSTAMP_F;\n \n+\tif (conf & DEV_TX_OFFLOAD_SECURITY)\n+\t\tflags |= NIX_TX_OFFLOAD_SECURITY_F;\n+\n \treturn flags;\n }\n \n@@ -181,8 +187,11 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \t\t\t const struct rte_eth_txconf *tx_conf)\n {\n \tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tstruct roc_nix *nix = &dev->nix;\n+\tstruct roc_cpt_lf *inl_lf;\n \tstruct cn10k_eth_txq *txq;\n \tstruct roc_nix_sq *sq;\n+\tuint16_t crypto_qid;\n \tint rc;\n \n \tRTE_SET_USED(socket);\n@@ -198,11 +207,24 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \ttxq = eth_dev->data->tx_queues[qid];\n \ttxq->fc_mem = sq->fc;\n \t/* Store lmt base in tx queue for easy access */\n-\ttxq->lmt_base = dev->nix.lmt_base;\n+\ttxq->lmt_base = nix->lmt_base;\n \ttxq->io_addr = sq->io_addr;\n \ttxq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;\n \ttxq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;\n \n+\t/* Fetch CPT LF info for outbound if present */\n+\tif (dev->outb.lf_base) {\n+\t\tcrypto_qid = qid % dev->outb.nb_crypto_qs;\n+\t\tinl_lf = dev->outb.lf_base + crypto_qid;\n+\n+\t\ttxq->cpt_io_addr = inl_lf->io_addr;\n+\t\ttxq->cpt_fc = inl_lf->fc_addr;\n+\t\ttxq->cpt_desc = inl_lf->nb_desc * 0.7;\n+\t\ttxq->sa_base = (uint64_t)dev->outb.sa_base;\n+\t\ttxq->sa_base |= eth_dev->data->port_id;\n+\t\tPLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));\n+\t}\n+\n \tnix_form_default_desc(dev, txq, qid);\n \ttxq->lso_tun_fmt = dev->lso_tun_fmt;\n \treturn 0;\n@@ -215,6 +237,7 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \t\t\t struct rte_mempool *mp)\n {\n \tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tstruct cnxk_eth_rxq_sp *rxq_sp;\n \tstruct cn10k_eth_rxq *rxq;\n \tstruct roc_nix_rq *rq;\n \tstruct roc_nix_cq *cq;\n@@ -250,6 +273,15 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \trxq->data_off = rq->first_skip;\n \trxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);\n \n+\t/* Setup security related info */\n+\tif (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {\n+\t\trxq->lmt_base = dev->nix.lmt_base;\n+\t\trxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,\n+\t\t\t\t\t\t\t   dev->inb.inl_dev);\n+\t}\n+\trxq_sp = cnxk_eth_rxq_to_sp(rxq);\n+\trxq->aura_handle = rxq_sp->qconf.mp->pool_id;\n+\n \t/* Lookup mem */\n \trxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();\n \treturn 0;\n@@ -500,6 +532,8 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \tnix_eth_dev_ops_override();\n \tnpc_flow_ops_override();\n \n+\tcn10k_eth_sec_ops_override();\n+\n \t/* Common probe */\n \trc = cnxk_nix_probe(pci_drv, pci_dev);\n \tif (rc)\ndiff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h\nindex 8b6e0f2..a888364 100644\n--- a/drivers/net/cnxk/cn10k_ethdev.h\n+++ b/drivers/net/cnxk/cn10k_ethdev.h\n@@ -5,6 +5,7 @@\n #define __CN10K_ETHDEV_H__\n \n #include <cnxk_ethdev.h>\n+#include <cnxk_security.h>\n \n struct cn10k_eth_txq {\n \tuint64_t send_hdr_w0;\n@@ -15,6 +16,10 @@ struct cn10k_eth_txq {\n \trte_iova_t io_addr;\n \tuint16_t sqes_per_sqb_log2;\n \tint16_t nb_sqb_bufs_adj;\n+\trte_iova_t cpt_io_addr;\n+\tuint64_t sa_base;\n+\tuint64_t *cpt_fc;\n+\tuint16_t cpt_desc;\n \tuint64_t cmd[4];\n \tuint64_t lso_tun_fmt;\n } __plt_cache_aligned;\n@@ -30,12 +35,50 @@ struct cn10k_eth_rxq {\n \tuint32_t qmask;\n \tuint32_t available;\n \tuint16_t data_off;\n+\tuint64_t sa_base;\n+\tuint64_t lmt_base;\n+\tuint64_t aura_handle;\n \tuint16_t rq;\n \tstruct cnxk_timesync_info *tstamp;\n } __plt_cache_aligned;\n \n+/* Private data in sw rsvd area of struct roc_ot_ipsec_inb_sa */\n+struct cn10k_inb_priv_data {\n+\tvoid *userdata;\n+\tstruct cnxk_eth_sec_sess *eth_sec;\n+};\n+\n+/* Private data in sw rsvd area of struct roc_ot_ipsec_outb_sa */\n+struct cn10k_outb_priv_data {\n+\tvoid *userdata;\n+\t/* Rlen computation data */\n+\tstruct cnxk_ipsec_outb_rlens rlens;\n+\t/* Back pinter to eth sec session */\n+\tstruct cnxk_eth_sec_sess *eth_sec;\n+\t/* SA index */\n+\tuint32_t sa_idx;\n+};\n+\n+struct cn10k_sec_sess_priv {\n+\tunion {\n+\t\tstruct {\n+\t\t\tuint32_t sa_idx;\n+\t\t\tuint8_t inb_sa : 1;\n+\t\t\tuint8_t rsvd1 : 2;\n+\t\t\tuint8_t roundup_byte : 5;\n+\t\t\tuint8_t roundup_len;\n+\t\t\tuint16_t partial_len;\n+\t\t};\n+\n+\t\tuint64_t u64;\n+\t};\n+} __rte_packed;\n+\n /* Rx and Tx routines */\n void cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev);\n void cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev);\n \n+/* Security context setup */\n+void cn10k_eth_sec_ops_override(void);\n+\n #endif /* __CN10K_ETHDEV_H__ */\ndiff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c\nnew file mode 100644\nindex 0000000..3ffd824\n--- /dev/null\n+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c\n@@ -0,0 +1,426 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2021 Marvell.\n+ */\n+\n+#include <rte_cryptodev.h>\n+#include <rte_eventdev.h>\n+#include <rte_security.h>\n+#include <rte_security_driver.h>\n+\n+#include <cn10k_ethdev.h>\n+#include <cnxk_security.h>\n+\n+static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {\n+\t{\t/* AES GCM */\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t\t{.sym = {\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,\n+\t\t\t{.aead = {\n+\t\t\t\t.algo = RTE_CRYPTO_AEAD_AES_GCM,\n+\t\t\t\t.block_size = 16,\n+\t\t\t\t.key_size = {\n+\t\t\t\t\t.min = 16,\n+\t\t\t\t\t.max = 32,\n+\t\t\t\t\t.increment = 8\n+\t\t\t\t},\n+\t\t\t\t.digest_size = {\n+\t\t\t\t\t.min = 16,\n+\t\t\t\t\t.max = 16,\n+\t\t\t\t\t.increment = 0\n+\t\t\t\t},\n+\t\t\t\t.aad_size = {\n+\t\t\t\t\t.min = 8,\n+\t\t\t\t\t.max = 12,\n+\t\t\t\t\t.increment = 4\n+\t\t\t\t},\n+\t\t\t\t.iv_size = {\n+\t\t\t\t\t.min = 12,\n+\t\t\t\t\t.max = 12,\n+\t\t\t\t\t.increment = 0\n+\t\t\t\t}\n+\t\t\t}, }\n+\t\t}, }\n+\t},\n+\tRTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()\n+};\n+\n+static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {\n+\t{\t/* IPsec Inline Protocol ESP Tunnel Ingress */\n+\t\t.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,\n+\t\t.protocol = RTE_SECURITY_PROTOCOL_IPSEC,\n+\t\t.ipsec = {\n+\t\t\t.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,\n+\t\t\t.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,\n+\t\t\t.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,\n+\t\t\t.options = { 0 }\n+\t\t},\n+\t\t.crypto_capabilities = cn10k_eth_sec_crypto_caps,\n+\t\t.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA\n+\t},\n+\t{\t/* IPsec Inline Protocol ESP Tunnel Egress */\n+\t\t.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,\n+\t\t.protocol = RTE_SECURITY_PROTOCOL_IPSEC,\n+\t\t.ipsec = {\n+\t\t\t.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,\n+\t\t\t.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,\n+\t\t\t.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,\n+\t\t\t.options = { 0 }\n+\t\t},\n+\t\t.crypto_capabilities = cn10k_eth_sec_crypto_caps,\n+\t\t.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA\n+\t},\n+\t{\n+\t\t.action = RTE_SECURITY_ACTION_TYPE_NONE\n+\t}\n+};\n+\n+static void\n+cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)\n+{\n+\tstruct rte_eth_event_ipsec_desc desc;\n+\tstruct cn10k_sec_sess_priv sess_priv;\n+\tstruct cn10k_outb_priv_data *priv;\n+\tstruct roc_ot_ipsec_outb_sa *sa;\n+\tstruct cpt_cn10k_res_s *res;\n+\tstruct rte_eth_dev *eth_dev;\n+\tstruct cnxk_eth_dev *dev;\n+\tuint16_t dlen_adj, rlen;\n+\tstruct rte_mbuf *mbuf;\n+\tuintptr_t sa_base;\n+\tuintptr_t nixtx;\n+\tuint8_t port;\n+\n+\tRTE_SET_USED(args);\n+\n+\tswitch ((gw[0] >> 28) & 0xF) {\n+\tcase RTE_EVENT_TYPE_ETHDEV:\n+\t\t/* Event from inbound inline dev due to IPSEC packet bad L4 */\n+\t\tmbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));\n+\t\tplt_nix_dbg(\"Received mbuf %p from inline dev inbound\", mbuf);\n+\t\trte_pktmbuf_free(mbuf);\n+\t\treturn;\n+\tcase RTE_EVENT_TYPE_CPU:\n+\t\t/* Check for subtype */\n+\t\tif (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {\n+\t\t\t/* Event from outbound inline error */\n+\t\t\tmbuf = (struct rte_mbuf *)gw[1];\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* Fall through */\n+\tdefault:\n+\t\tplt_err(\"Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx\",\n+\t\t\tgw[0], gw[1]);\n+\t\treturn;\n+\t}\n+\n+\t/* Get ethdev port from tag */\n+\tport = gw[0] & 0xFF;\n+\teth_dev = &rte_eth_devices[port];\n+\tdev = cnxk_eth_pmd_priv(eth_dev);\n+\n+\tsess_priv.u64 = *rte_security_dynfield(mbuf);\n+\t/* Calculate dlen adj */\n+\tdlen_adj = mbuf->pkt_len - mbuf->l2_len;\n+\trlen = (dlen_adj + sess_priv.roundup_len) +\n+\t       (sess_priv.roundup_byte - 1);\n+\trlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);\n+\trlen += sess_priv.partial_len;\n+\tdlen_adj = rlen - dlen_adj;\n+\n+\t/* Find the res area residing on next cacheline after end of data */\n+\tnixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;\n+\tnixtx += BIT_ULL(7);\n+\tnixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);\n+\tres = (struct cpt_cn10k_res_s *)nixtx;\n+\n+\tplt_nix_dbg(\"Outbound error, mbuf %p, sa_index %u, compcode %x uc %x\",\n+\t\t    mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);\n+\n+\tsess_priv.u64 = *rte_security_dynfield(mbuf);\n+\n+\tsa_base = dev->outb.sa_base;\n+\tsa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);\n+\tpriv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);\n+\n+\tmemset(&desc, 0, sizeof(desc));\n+\n+\tswitch (res->uc_compcode) {\n+\tcase ROC_IE_OT_UCC_ERR_SA_OVERFLOW:\n+\t\tdesc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;\n+\t\tbreak;\n+\tdefault:\n+\t\tplt_warn(\"Outbound error, mbuf %p, sa_index %u, \"\n+\t\t\t \"compcode %x uc %x\", mbuf, sess_priv.sa_idx,\n+\t\t\t res->compcode, res->uc_compcode);\n+\t\tdesc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;\n+\t\tbreak;\n+\t}\n+\n+\tdesc.metadata = (uint64_t)priv->userdata;\n+\trte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);\n+\trte_pktmbuf_free(mbuf);\n+}\n+\n+static int\n+cn10k_eth_sec_session_create(void *device,\n+\t\t\t     struct rte_security_session_conf *conf,\n+\t\t\t     struct rte_security_session *sess,\n+\t\t\t     struct rte_mempool *mempool)\n+{\n+\tstruct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;\n+\tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tstruct rte_security_ipsec_xform *ipsec;\n+\tstruct cn10k_sec_sess_priv sess_priv;\n+\tstruct rte_crypto_sym_xform *crypto;\n+\tstruct cnxk_eth_sec_sess *eth_sec;\n+\tbool inbound, inl_dev;\n+\tint rc = 0;\n+\n+\tif (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)\n+\t\treturn -ENOTSUP;\n+\n+\tif (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)\n+\t\treturn -ENOTSUP;\n+\n+\tif (rte_security_dynfield_register() < 0)\n+\t\treturn -ENOTSUP;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\troc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);\n+\n+\tipsec = &conf->ipsec;\n+\tcrypto = conf->crypto_xform;\n+\tinbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);\n+\tinl_dev = !!dev->inb.inl_dev;\n+\n+\t/* Search if a session already exits */\n+\tif (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {\n+\t\tplt_err(\"%s SA with SPI %u already in use\",\n+\t\t\tinbound ? \"Inbound\" : \"Outbound\", ipsec->spi);\n+\t\treturn -EEXIST;\n+\t}\n+\n+\tif (rte_mempool_get(mempool, (void **)&eth_sec)) {\n+\t\tplt_err(\"Could not allocate security session private data\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tmemset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));\n+\tsess_priv.u64 = 0;\n+\n+\t/* Acquire lock on inline dev for inbound */\n+\tif (inbound && inl_dev)\n+\t\troc_nix_inl_dev_lock();\n+\n+\tif (inbound) {\n+\t\tstruct cn10k_inb_priv_data *inb_priv;\n+\t\tstruct roc_ot_ipsec_inb_sa *inb_sa;\n+\t\tuintptr_t sa;\n+\n+\t\tPLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <\n+\t\t\t\t  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);\n+\n+\t\t/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */\n+\t\tsa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);\n+\t\tif (!sa && dev->inb.inl_dev) {\n+\t\t\tplt_err(\"Failed to create ingress sa, inline dev \"\n+\t\t\t\t\"not found or spi not in range\");\n+\t\t\trc = -ENOTSUP;\n+\t\t\tgoto mempool_put;\n+\t\t} else if (!sa) {\n+\t\t\tplt_err(\"Failed to create ingress sa\");\n+\t\t\trc = -EFAULT;\n+\t\t\tgoto mempool_put;\n+\t\t}\n+\n+\t\tinb_sa = (struct roc_ot_ipsec_inb_sa *)sa;\n+\n+\t\t/* Check if SA is already in use */\n+\t\tif (inb_sa->w2.s.valid) {\n+\t\t\tplt_err(\"Inbound SA with SPI %u already in use\",\n+\t\t\t\tipsec->spi);\n+\t\t\trc = -EBUSY;\n+\t\t\tgoto mempool_put;\n+\t\t}\n+\n+\t\tmemset(inb_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));\n+\n+\t\t/* Fill inbound sa params */\n+\t\trc = cnxk_ot_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to init inbound sa, rc=%d\", rc);\n+\t\t\tgoto mempool_put;\n+\t\t}\n+\n+\t\tinb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);\n+\t\t/* Back pointer to get eth_sec */\n+\t\tinb_priv->eth_sec = eth_sec;\n+\t\t/* Save userdata in inb private area */\n+\t\tinb_priv->userdata = conf->userdata;\n+\n+\t\t/* Save SA index/SPI in cookie for now */\n+\t\tinb_sa->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);\n+\n+\t\t/* Prepare session priv */\n+\t\tsess_priv.inb_sa = 1;\n+\t\tsess_priv.sa_idx = ipsec->spi;\n+\n+\t\t/* Pointer from eth_sec -> inb_sa */\n+\t\teth_sec->sa = inb_sa;\n+\t\teth_sec->sess = sess;\n+\t\teth_sec->sa_idx = ipsec->spi;\n+\t\teth_sec->spi = ipsec->spi;\n+\t\teth_sec->inl_dev = !!dev->inb.inl_dev;\n+\t\teth_sec->inb = true;\n+\n+\t\tTAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);\n+\t\tdev->inb.nb_sess++;\n+\t} else {\n+\t\tstruct cn10k_outb_priv_data *outb_priv;\n+\t\tstruct roc_ot_ipsec_outb_sa *outb_sa;\n+\t\tstruct cnxk_ipsec_outb_rlens *rlens;\n+\t\tuint64_t sa_base = dev->outb.sa_base;\n+\t\tuint32_t sa_idx;\n+\n+\t\tPLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <\n+\t\t\t\t  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);\n+\n+\t\t/* Alloc an sa index */\n+\t\trc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);\n+\t\tif (rc)\n+\t\t\tgoto mempool_put;\n+\n+\t\toutb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);\n+\t\toutb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);\n+\t\trlens = &outb_priv->rlens;\n+\n+\t\tmemset(outb_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));\n+\n+\t\t/* Fill outbound sa params */\n+\t\trc = cnxk_ot_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to init outbound sa, rc=%d\", rc);\n+\t\t\trc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);\n+\t\t\tgoto mempool_put;\n+\t\t}\n+\n+\t\t/* Save userdata */\n+\t\toutb_priv->userdata = conf->userdata;\n+\t\toutb_priv->sa_idx = sa_idx;\n+\t\toutb_priv->eth_sec = eth_sec;\n+\n+\t\t/* Save rlen info */\n+\t\tcnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);\n+\n+\t\t/* Prepare session priv */\n+\t\tsess_priv.sa_idx = outb_priv->sa_idx;\n+\t\tsess_priv.roundup_byte = rlens->roundup_byte;\n+\t\tsess_priv.roundup_len = rlens->roundup_len;\n+\t\tsess_priv.partial_len = rlens->partial_len;\n+\n+\t\t/* Pointer from eth_sec -> outb_sa */\n+\t\teth_sec->sa = outb_sa;\n+\t\teth_sec->sess = sess;\n+\t\teth_sec->sa_idx = sa_idx;\n+\t\teth_sec->spi = ipsec->spi;\n+\n+\t\tTAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);\n+\t\tdev->outb.nb_sess++;\n+\t}\n+\n+\t/* Sync session in context cache */\n+\troc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,\n+\t\t\t    ROC_NIX_INL_SA_OP_RELOAD);\n+\n+\tif (inbound && inl_dev)\n+\t\troc_nix_inl_dev_unlock();\n+\n+\tplt_nix_dbg(\"Created %s session with spi=%u, sa_idx=%u inl_dev=%u\",\n+\t\t    inbound ? \"inbound\" : \"outbound\", eth_sec->spi,\n+\t\t    eth_sec->sa_idx, eth_sec->inl_dev);\n+\t/*\n+\t * Update fast path info in priv area.\n+\t */\n+\tset_sec_session_private_data(sess, (void *)sess_priv.u64);\n+\n+\treturn 0;\n+mempool_put:\n+\tif (inbound && inl_dev)\n+\t\troc_nix_inl_dev_unlock();\n+\trte_mempool_put(mempool, eth_sec);\n+\treturn rc;\n+}\n+\n+static int\n+cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)\n+{\n+\tstruct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;\n+\tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tstruct roc_ot_ipsec_inb_sa *inb_sa;\n+\tstruct roc_ot_ipsec_outb_sa *outb_sa;\n+\tstruct cnxk_eth_sec_sess *eth_sec;\n+\tstruct rte_mempool *mp;\n+\n+\teth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);\n+\tif (!eth_sec)\n+\t\treturn -ENOENT;\n+\n+\tif (eth_sec->inl_dev)\n+\t\troc_nix_inl_dev_lock();\n+\n+\tif (eth_sec->inb) {\n+\t\tinb_sa = eth_sec->sa;\n+\t\t/* Disable SA */\n+\t\tinb_sa->w2.s.valid = 0;\n+\n+\t\tTAILQ_REMOVE(&dev->inb.list, eth_sec, entry);\n+\t\tdev->inb.nb_sess--;\n+\t} else {\n+\t\toutb_sa = eth_sec->sa;\n+\t\t/* Disable SA */\n+\t\toutb_sa->w2.s.valid = 0;\n+\n+\t\t/* Release Outbound SA index */\n+\t\tcnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);\n+\t\tTAILQ_REMOVE(&dev->outb.list, eth_sec, entry);\n+\t\tdev->outb.nb_sess--;\n+\t}\n+\n+\t/* Sync session in context cache */\n+\troc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,\n+\t\t\t    ROC_NIX_INL_SA_OP_RELOAD);\n+\n+\tif (eth_sec->inl_dev)\n+\t\troc_nix_inl_dev_unlock();\n+\n+\tplt_nix_dbg(\"Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u\",\n+\t\t    eth_sec->inb ? \"inbound\" : \"outbound\", eth_sec->spi,\n+\t\t    eth_sec->sa_idx, eth_sec->inl_dev);\n+\n+\t/* Put eth_sec object back to pool */\n+\tmp = rte_mempool_from_obj(eth_sec);\n+\tset_sec_session_private_data(sess, NULL);\n+\trte_mempool_put(mp, eth_sec);\n+\treturn 0;\n+}\n+\n+static const struct rte_security_capability *\n+cn10k_eth_sec_capabilities_get(void *device __rte_unused)\n+{\n+\treturn cn10k_eth_sec_capabilities;\n+}\n+\n+void\n+cn10k_eth_sec_ops_override(void)\n+{\n+\tstatic int init_once;\n+\n+\tif (init_once)\n+\t\treturn;\n+\tinit_once = 1;\n+\n+\t/* Update platform specific ops */\n+\tcnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;\n+\tcnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;\n+\tcnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;\n+}\ndiff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h\nindex 68219b8..d27a231 100644\n--- a/drivers/net/cnxk/cn10k_rx.h\n+++ b/drivers/net/cnxk/cn10k_rx.h\n@@ -16,6 +16,7 @@\n #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)\n #define NIX_RX_OFFLOAD_TSTAMP_F\t     BIT(4)\n #define NIX_RX_OFFLOAD_VLAN_STRIP_F  BIT(5)\n+#define NIX_RX_OFFLOAD_SECURITY_F    BIT(6)\n \n /* Flags to control cqe_to_mbuf conversion function.\n  * Defining it from backwards to denote its been\ndiff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h\nindex f75cae0..8577a7b 100644\n--- a/drivers/net/cnxk/cn10k_tx.h\n+++ b/drivers/net/cnxk/cn10k_tx.h\n@@ -13,6 +13,7 @@\n #define NIX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)\n #define NIX_TX_OFFLOAD_TSO_F\t      BIT(4)\n #define NIX_TX_OFFLOAD_TSTAMP_F\t      BIT(5)\n+#define NIX_TX_OFFLOAD_SECURITY_F     BIT(6)\n \n /* Flags to control xmit_prepare function.\n  * Defining it from backwards to denote its been\ndiff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build\nindex 6cc30c3..d1d4b4e 100644\n--- a/drivers/net/cnxk/meson.build\n+++ b/drivers/net/cnxk/meson.build\n@@ -37,6 +37,7 @@ sources += files(\n # CN10K\n sources += files(\n         'cn10k_ethdev.c',\n+        'cn10k_ethdev_sec.c',\n         'cn10k_rte_flow.c',\n         'cn10k_rx.c',\n         'cn10k_rx_mseg.c',\ndiff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py\nindex 74d16e4..5f0e817 100755\n--- a/usertools/dpdk-devbind.py\n+++ b/usertools/dpdk-devbind.py\n@@ -49,6 +49,8 @@\n              'SVendor': None, 'SDevice': None}\n cnxk_bphy_cgx = {'Class': '08', 'Vendor': '177d', 'Device': 'a059,a060',\n                  'SVendor': None, 'SDevice': None}\n+cnxk_inl_dev = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f0,a0f1',\n+                'SVendor': None, 'SDevice': None}\n \n intel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714',\n              'SVendor': None, 'SDevice': None}\n@@ -73,9 +75,9 @@\n mempool_devices = [cavium_fpa, octeontx2_npa]\n compress_devices = [cavium_zip]\n regex_devices = [octeontx2_ree]\n-misc_devices = [cnxk_bphy, cnxk_bphy_cgx, intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_idxd_spr,\n-                intel_ntb_skx, intel_ntb_icx,\n-                octeontx2_dma]\n+misc_devices = [cnxk_bphy, cnxk_bphy_cgx, cnxk_inl_dev, intel_ioat_bdw,\n+\t        intel_ioat_skx, intel_ioat_icx, intel_idxd_spr, intel_ntb_skx,\n+\t\tintel_ntb_icx, octeontx2_dma]\n \n # global dict ethernet devices present. Dictionary indexed by PCI address.\n # Each device within this is itself a dictionary of device properties\n",
    "prefixes": [
        "16/27"
    ]
}