get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/124768/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 124768,
    "url": "http://patches.dpdk.org/api/patches/124768/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230303081013.589868-11-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230303081013.589868-11-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230303081013.589868-11-ndabilpuram@marvell.com",
    "date": "2023-03-03T08:10:09",
    "name": "[11/15] common/cnxk: support of per NIX LF meta aura",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "c4ad0fddbaaae27b012dea0c7d08b435c38947c7",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230303081013.589868-11-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 27237,
            "url": "http://patches.dpdk.org/api/series/27237/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=27237",
            "date": "2023-03-03T08:09:59",
            "name": "[01/15] net/cnxk: resolve sefgault caused during transmit completion",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/27237/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/124768/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/124768/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 56A6841DC3;\n\tFri,  3 Mar 2023 09:12:03 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0FFBF42D3F;\n\tFri,  3 Mar 2023 09:11:26 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id B2B2642D3B\n for <dev@dpdk.org>; Fri,  3 Mar 2023 09:11:24 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 3234WwRY024923 for <dev@dpdk.org>; Fri, 3 Mar 2023 00:11:24 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3p1wr9xbkh-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Fri, 03 Mar 2023 00:11:23 -0800",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.42;\n Fri, 3 Mar 2023 00:11:21 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.42 via Frontend\n Transport; Fri, 3 Mar 2023 00:11:21 -0800",
            "from hyd1588t430.caveonetworks.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id 251EE5B693A;\n Fri,  3 Mar 2023 00:11:14 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=wErRZ1m4AWgsBneVKajJj6NNId3gUHJ0XVVSr42Wf0U=;\n b=XCBKhfdr7Q7RnWzhzMKtGmh+NeULCa6in/QDJ1hT/afWLgq/jvsn1e7T/bjR/0ZWtW0A\n fuyqLSGERMGIRG59v5qYibUulVRy8mCk3oNUBmPmY08KeZJtqWWHWqp/ItELT7vtJVLC\n 8kDWOHqyf4EUaozC1ZuKeVVEuyOOY+FEtYzApGe9CHiUvrzDP/mbpgpfHOTBkzxEngX2\n 65AA2cw5zx+kpgCpuSaLALB0Z1F8LASNrSJV/amTBjmGeWD8QEOjemlpAOKzvnCz0esv\n 6w6tAGWCzNijT9jSzVFqe3o85hxhoZqnpk+8j+q/xzine6aes4pws/jSeA6h1q88y+jZ JQ==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "Nithin Dabilpuram <ndabilpuram@marvell.com>, Kiran Kumar K\n <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>, Satha Rao\n <skoteshwar@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,\n \"Shijith Thotton\" <sthotton@marvell.com>",
        "CC": "<jerinj@marvell.com>, <dev@dpdk.org>, Rahul Bhansali\n <rbhansali@marvell.com>",
        "Subject": "[PATCH 11/15] common/cnxk: support of per NIX LF meta aura",
        "Date": "Fri, 3 Mar 2023 13:40:09 +0530",
        "Message-ID": "<20230303081013.589868-11-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230303081013.589868-1-ndabilpuram@marvell.com>",
        "References": "<20230303081013.589868-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "5ddo1GOTYcGlWvjvgU6X6KMQnjm68DT4",
        "X-Proofpoint-GUID": "5ddo1GOTYcGlWvjvgU6X6KMQnjm68DT4",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.219,Aquarius:18.0.942,Hydra:6.0.573,FMLib:17.11.170.22\n definitions=2023-03-03_01,2023-03-02_02,2023-02-09_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Rahul Bhansali <rbhansali@marvell.com>\n\nSupports creation of individual meta aura per NIX port for\nCN106-B0/CN103xx SoC.\n\nIndividual pool buffer size can be passed using meta_buf_sz\ndevargs parameter per NIX for local meta aura creation.\n\nSigned-off-by: Rahul Bhansali <rbhansali@marvell.com>\n---\n doc/guides/nics/cnxk.rst               |  14 ++\n drivers/common/cnxk/roc_features.h     |   6 +\n drivers/common/cnxk/roc_nix.h          |   5 +\n drivers/common/cnxk/roc_nix_fc.c       |   7 +-\n drivers/common/cnxk/roc_nix_inl.c      | 232 +++++++++++++++++++------\n drivers/common/cnxk/roc_nix_inl.h      |   7 +-\n drivers/common/cnxk/roc_nix_queue.c    |   6 +-\n drivers/event/cnxk/cn10k_eventdev.c    |  10 +-\n drivers/event/cnxk/cn10k_worker.h      |  11 +-\n drivers/event/cnxk/cn9k_eventdev.c     |   7 +-\n drivers/event/cnxk/cnxk_tim_evdev.c    |   2 +-\n drivers/event/cnxk/cnxk_tim_evdev.h    |   2 +-\n drivers/net/cnxk/cn10k_ethdev.c        |   2 +\n drivers/net/cnxk/cnxk_ethdev.c         |   4 +\n drivers/net/cnxk/cnxk_ethdev.h         |   6 +-\n drivers/net/cnxk/cnxk_ethdev_devargs.c |  23 +++\n drivers/net/cnxk/cnxk_ethdev_dp.h      |  13 ++\n drivers/net/cnxk/cnxk_ethdev_sec.c     |  21 ++-\n drivers/net/cnxk/cnxk_lookup.c         |  37 +++-\n 19 files changed, 330 insertions(+), 85 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst\nindex 267010e760..9229056f6f 100644\n--- a/doc/guides/nics/cnxk.rst\n+++ b/doc/guides/nics/cnxk.rst\n@@ -402,6 +402,20 @@ Runtime Config Options\n \n       -a 0002:01:00.1,tx_compl_ena=1\n \n+- ``Meta buffer size per ethdev port for inline inbound IPsec second pass``\n+\n+   Size of meta buffer allocated for inline inbound IPsec second pass per\n+   ethdev port can be specified by ``meta_buf_sz`` ``devargs`` parameter.\n+   Default value is computed runtime based on pkt mbuf pools created and in use.\n+   This option is for OCTEON CN106-B0/CN103XX SoC family.\n+\n+   For example::\n+\n+      -a 0002:02:00.0,meta_buf_sz=512\n+\n+   With the above configuration, PMD would allocate meta buffers of size 512 for\n+   inline inbound IPsec processing second pass.\n+\n .. note::\n \n    Above devarg parameters are configurable per device, user needs to pass the\ndiff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h\nindex 27bccd6b9c..7796fef91b 100644\n--- a/drivers/common/cnxk/roc_features.h\n+++ b/drivers/common/cnxk/roc_features.h\n@@ -16,6 +16,12 @@ roc_feature_nix_has_inl_rq_mask(void)\n \treturn (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());\n }\n \n+static inline bool\n+roc_feature_nix_has_own_meta_aura(void)\n+{\n+\treturn (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());\n+}\n+\n static inline bool\n roc_feature_nix_has_late_bp(void)\n {\ndiff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h\nindex f04dd63e27..0ec98ad630 100644\n--- a/drivers/common/cnxk/roc_nix.h\n+++ b/drivers/common/cnxk/roc_nix.h\n@@ -434,12 +434,17 @@ struct roc_nix {\n \tuint32_t dwrr_mtu;\n \tbool ipsec_out_sso_pffunc;\n \tbool custom_sa_action;\n+\tbool local_meta_aura_ena;\n+\tuint32_t meta_buf_sz;\n \t/* End of input parameters */\n \t/* LMT line base for \"Per Core Tx LMT line\" mode*/\n \tuintptr_t lmt_base;\n \tbool io_enabled;\n \tbool rx_ptp_ena;\n \tuint16_t cints;\n+\tuint32_t buf_sz;\n+\tuint64_t meta_aura_handle;\n+\tuintptr_t meta_mempool;\n \n #define ROC_NIX_MEM_SZ (6 * 1056)\n \tuint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;\ndiff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c\nindex 7574a88bf6..cec83b31f3 100644\n--- a/drivers/common/cnxk/roc_nix_fc.c\n+++ b/drivers/common/cnxk/roc_nix_fc.c\n@@ -295,11 +295,16 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)\n \tif (sso_ena < 0)\n \t\treturn -EINVAL;\n \n-\tif (sso_ena)\n+\tif (sso_ena) {\n \t\troc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,\n \t\t\t\t      fc_cfg->rq_cfg.enable, true,\n \t\t\t\t      fc_cfg->rq_cfg.tc);\n \n+\t\tif (roc_nix->local_meta_aura_ena)\n+\t\t\troc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,\n+\t\t\t\t\t      fc_cfg->rq_cfg.enable, true, fc_cfg->rq_cfg.tc);\n+\t}\n+\n \t/* Copy RQ config to CQ config as they are occupying same area */\n \tmemset(&tmp, 0, sizeof(tmp));\n \ttmp.type = ROC_NIX_FC_CQ_CFG;\ndiff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c\nindex 19f500ee54..076d83e8d5 100644\n--- a/drivers/common/cnxk/roc_nix_inl.c\n+++ b/drivers/common/cnxk/roc_nix_inl.c\n@@ -20,97 +20,134 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==\n \t\t  1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);\n \n static int\n-nix_inl_meta_aura_destroy(void)\n+nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)\n {\n \tstruct idev_cfg *idev = idev_get_cfg();\n \tstruct idev_nix_inl_cfg *inl_cfg;\n+\tchar mempool_name[24] = {'\\0'};\n+\tchar *mp_name = NULL;\n+\tuint64_t *meta_aura;\n \tint rc;\n \n \tif (!idev)\n \t\treturn -EINVAL;\n \n \tinl_cfg = &idev->inl_cfg;\n+\tif (roc_nix->local_meta_aura_ena) {\n+\t\tmeta_aura = &roc_nix->meta_aura_handle;\n+\t\tsnprintf(mempool_name, sizeof(mempool_name), \"NIX_INL_META_POOL_%d\",\n+\t\t\t roc_nix->port_id + 1);\n+\t\tmp_name = mempool_name;\n+\t} else {\n+\t\tmeta_aura = &inl_cfg->meta_aura;\n+\t}\n+\n \t/* Destroy existing Meta aura */\n-\tif (inl_cfg->meta_aura) {\n+\tif (*meta_aura) {\n \t\tuint64_t avail, limit;\n \n \t\t/* Check if all buffers are back to pool */\n-\t\tavail = roc_npa_aura_op_available(inl_cfg->meta_aura);\n-\t\tlimit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);\n+\t\tavail = roc_npa_aura_op_available(*meta_aura);\n+\t\tlimit = roc_npa_aura_op_limit_get(*meta_aura);\n \t\tif (avail != limit)\n \t\t\tplt_warn(\"Not all buffers are back to meta pool,\"\n \t\t\t\t \" %\" PRIu64 \" != %\" PRIu64, avail, limit);\n \n-\t\trc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);\n+\t\trc = meta_pool_cb(meta_aura, &roc_nix->meta_mempool, 0, 0, true, mp_name);\n \t\tif (rc) {\n \t\t\tplt_err(\"Failed to destroy meta aura, rc=%d\", rc);\n \t\t\treturn rc;\n \t\t}\n-\t\tinl_cfg->meta_aura = 0;\n-\t\tinl_cfg->buf_sz = 0;\n-\t\tinl_cfg->nb_bufs = 0;\n-\t\tinl_cfg->refs = 0;\n+\n+\t\tif (!roc_nix->local_meta_aura_ena) {\n+\t\t\tinl_cfg->meta_aura = 0;\n+\t\t\tinl_cfg->buf_sz = 0;\n+\t\t\tinl_cfg->nb_bufs = 0;\n+\t\t} else\n+\t\t\troc_nix->buf_sz = 0;\n \t}\n \n \treturn 0;\n }\n \n static int\n-nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)\n+nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, uint16_t first_skip,\n+\t\t\t uint64_t *meta_aura)\n {\n \tuint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);\n \tstruct idev_nix_inl_cfg *inl_cfg;\n \tstruct nix_inl_dev *nix_inl_dev;\n+\tint port_id = roc_nix->port_id;\n+\tchar mempool_name[24] = {'\\0'};\n+\tstruct roc_nix_rq *inl_rq;\n \tuint32_t nb_bufs, buf_sz;\n+\tchar *mp_name = NULL;\n+\tuint16_t inl_rq_id;\n+\tuintptr_t mp;\n \tint rc;\n \n \tinl_cfg = &idev->inl_cfg;\n \tnix_inl_dev = idev->nix_inl_dev;\n \n-\t/* Override meta buf count from devargs if present */\n-\tif (nix_inl_dev && nix_inl_dev->nb_meta_bufs)\n-\t\tnb_bufs = nix_inl_dev->nb_meta_bufs;\n-\telse\n-\t\tnb_bufs = roc_npa_buf_type_limit_get(mask);\n-\n-\t/* Override meta buf size from devargs if present */\n-\tif (nix_inl_dev && nix_inl_dev->meta_buf_sz)\n-\t\tbuf_sz = nix_inl_dev->meta_buf_sz;\n-\telse\n-\t\tbuf_sz = first_skip + NIX_INL_META_SIZE;\n+\tif (roc_nix->local_meta_aura_ena) {\n+\t\t/* Per LF Meta Aura */\n+\t\tinl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;\n+\t\tinl_rq = &nix_inl_dev->rqs[inl_rq_id];\n+\n+\t\tnb_bufs = roc_npa_aura_op_limit_get(inl_rq->aura_handle);\n+\t\tif (inl_rq->spb_ena)\n+\t\t\tnb_bufs += roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);\n+\n+\t\t/* Override meta buf size from NIX devargs if present */\n+\t\tif (roc_nix->meta_buf_sz)\n+\t\t\tbuf_sz = roc_nix->meta_buf_sz;\n+\t\telse\n+\t\t\tbuf_sz = first_skip + NIX_INL_META_SIZE;\n+\n+\t\t/* Create Metapool name */\n+\t\tsnprintf(mempool_name, sizeof(mempool_name), \"NIX_INL_META_POOL_%d\",\n+\t\t\t roc_nix->port_id + 1);\n+\t\tmp_name = mempool_name;\n+\t} else {\n+\t\t/* Global Meta Aura (Aura 0) */\n+\t\t/* Override meta buf count from devargs if present */\n+\t\tif (nix_inl_dev && nix_inl_dev->nb_meta_bufs)\n+\t\t\tnb_bufs = nix_inl_dev->nb_meta_bufs;\n+\t\telse\n+\t\t\tnb_bufs = roc_npa_buf_type_limit_get(mask);\n+\n+\t\t/* Override meta buf size from devargs if present */\n+\t\tif (nix_inl_dev && nix_inl_dev->meta_buf_sz)\n+\t\t\tbuf_sz = nix_inl_dev->meta_buf_sz;\n+\t\telse\n+\t\t\tbuf_sz = first_skip + NIX_INL_META_SIZE;\n+\t}\n \n \t/* Allocate meta aura */\n-\trc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);\n+\trc = meta_pool_cb(meta_aura, &mp, buf_sz, nb_bufs, false, mp_name);\n \tif (rc) {\n \t\tplt_err(\"Failed to allocate meta aura, rc=%d\", rc);\n \t\treturn rc;\n \t}\n+\troc_nix->meta_mempool = mp;\n+\n+\tif (!roc_nix->local_meta_aura_ena) {\n+\t\tinl_cfg->buf_sz = buf_sz;\n+\t\tinl_cfg->nb_bufs = nb_bufs;\n+\t} else\n+\t\troc_nix->buf_sz = buf_sz;\n \n-\tinl_cfg->buf_sz = buf_sz;\n-\tinl_cfg->nb_bufs = nb_bufs;\n \treturn 0;\n }\n \n-int\n-roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)\n+static int\n+nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq *rq)\n {\n-\tstruct idev_cfg *idev = idev_get_cfg();\n \tstruct idev_nix_inl_cfg *inl_cfg;\n \tuint32_t actual, expected;\n \tuint64_t mask, type_mask;\n-\tint rc;\n \n-\tif (!idev || !meta_pool_cb)\n-\t\treturn -EFAULT;\n \tinl_cfg = &idev->inl_cfg;\n-\n-\t/* Create meta aura if not present */\n-\tif (!inl_cfg->meta_aura) {\n-\t\trc = nix_inl_meta_aura_create(idev, rq->first_skip);\n-\t\tif (rc)\n-\t\t\treturn rc;\n-\t}\n-\n \t/* Validate if we have enough meta buffers */\n \tmask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);\n \texpected = roc_npa_buf_type_limit_get(mask);\n@@ -145,7 +182,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)\n \t\t\texpected = roc_npa_buf_type_limit_get(mask);\n \n \t\t\tif (actual < expected) {\n-\t\t\t\tplt_err(\"VWQE aura shared b/w Inline inbound and non-Inline inbound \"\n+\t\t\t\tplt_err(\"VWQE aura shared b/w Inline inbound and non-Inline \"\n \t\t\t\t\t\"ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)\",\n \t\t\t\t\tactual, expected);\n \t\t\t\treturn -EIO;\n@@ -164,6 +201,71 @@ roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)\n \t\t\t}\n \t\t}\n \t}\n+\treturn 0;\n+}\n+\n+static int\n+nix_inl_local_meta_buffer_validate(struct roc_nix *roc_nix, struct roc_nix_rq *rq)\n+{\n+\t/* Validate if we have enough space for meta buffer */\n+\tif (roc_nix->buf_sz && (rq->first_skip + NIX_INL_META_SIZE > roc_nix->buf_sz)) {\n+\t\tplt_err(\"Meta buffer size %u not sufficient to meet RQ first skip %u\",\n+\t\t\troc_nix->buf_sz, rq->first_skip);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* TODO: Validate VWQE buffers */\n+\n+\treturn 0;\n+}\n+\n+int\n+roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct idev_cfg *idev = idev_get_cfg();\n+\tstruct idev_nix_inl_cfg *inl_cfg;\n+\tbool aura_setup = false;\n+\tuint64_t *meta_aura;\n+\tint rc;\n+\n+\tif (!idev || !meta_pool_cb)\n+\t\treturn -EFAULT;\n+\n+\tinl_cfg = &idev->inl_cfg;\n+\n+\t/* Create meta aura if not present */\n+\tif (roc_nix->local_meta_aura_ena)\n+\t\tmeta_aura = &roc_nix->meta_aura_handle;\n+\telse\n+\t\tmeta_aura = &inl_cfg->meta_aura;\n+\n+\tif (!(*meta_aura)) {\n+\t\trc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, meta_aura);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\taura_setup = true;\n+\t}\n+\t/* Update rq meta aura handle */\n+\trq->meta_aura_handle = *meta_aura;\n+\n+\tif (roc_nix->local_meta_aura_ena) {\n+\t\trc = nix_inl_local_meta_buffer_validate(roc_nix, rq);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* Check for TC config on RQ 0 when local meta aura is used as\n+\t\t * inline meta aura creation is delayed.\n+\t\t */\n+\t\tif (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != ROC_NIX_PFC_CLASS_INVALID)\n+\t\t\troc_nix_fc_npa_bp_cfg(roc_nix, roc_nix->meta_aura_handle,\n+\t\t\t\t\t      true, true, nix->rqs[0]->tc);\n+\t} else {\n+\t\trc = nix_inl_global_meta_buffer_validate(idev, rq);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n \n \treturn 0;\n }\n@@ -426,6 +528,7 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)\n \tstruct idev_nix_inl_cfg *inl_cfg;\n \tuint64_t aura_handle;\n \tint rc = -ENOSPC;\n+\tuint32_t buf_sz;\n \tint i;\n \n \tif (!idev)\n@@ -473,10 +576,21 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)\n \tmsk_req->rq_mask.xqe_drop_ena = 0;\n \tmsk_req->rq_mask.spb_ena = 0;\n \n-\taura_handle = roc_npa_zero_aura_handle();\n+\tif (roc_nix->local_meta_aura_ena) {\n+\t\taura_handle = roc_nix->meta_aura_handle;\n+\t\tbuf_sz = roc_nix->buf_sz;\n+\t\tif (!aura_handle && enable) {\n+\t\t\tplt_err(\"NULL meta aura handle\");\n+\t\t\tgoto exit;\n+\t\t}\n+\t} else {\n+\t\taura_handle = roc_npa_zero_aura_handle();\n+\t\tbuf_sz = inl_cfg->buf_sz;\n+\t}\n+\n \tmsk_req->ipsec_cfg1.spb_cpt_aura = roc_npa_aura_handle_to_aura(aura_handle);\n \tmsk_req->ipsec_cfg1.rq_mask_enable = enable;\n-\tmsk_req->ipsec_cfg1.spb_cpt_sizem1 = (inl_cfg->buf_sz >> 7) - 1;\n+\tmsk_req->ipsec_cfg1.spb_cpt_sizem1 = (buf_sz >> 7) - 1;\n \tmsk_req->ipsec_cfg1.spb_cpt_enable = enable;\n \n \trc = mbox_process(mbox);\n@@ -539,7 +653,8 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)\n \n \tif (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {\n \t\tnix->need_meta_aura = true;\n-\t\tidev->inl_cfg.refs++;\n+\t\tif (!roc_nix->local_meta_aura_ena)\n+\t\t\tidev->inl_cfg.refs++;\n \t}\n \n \tnix->inl_inb_ena = true;\n@@ -562,9 +677,13 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)\n \tnix->inl_inb_ena = false;\n \tif (nix->need_meta_aura) {\n \t\tnix->need_meta_aura = false;\n-\t\tidev->inl_cfg.refs--;\n-\t\tif (!idev->inl_cfg.refs)\n-\t\t\tnix_inl_meta_aura_destroy();\n+\t\tif (roc_nix->local_meta_aura_ena) {\n+\t\t\tnix_inl_meta_aura_destroy(roc_nix);\n+\t\t} else {\n+\t\t\tidev->inl_cfg.refs--;\n+\t\t\tif (!idev->inl_cfg.refs)\n+\t\t\t\tnix_inl_meta_aura_destroy(roc_nix);\n+\t\t}\n \t}\n \n \tif (roc_feature_nix_has_inl_rq_mask()) {\n@@ -968,7 +1087,7 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)\n \n \t/* Check meta aura */\n \tif (enable && nix->need_meta_aura) {\n-\t\trc = roc_nix_inl_meta_aura_check(rq);\n+\t\trc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq);\n \t\tif (rc)\n \t\t\treturn rc;\n \t}\n@@ -1058,7 +1177,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)\n \t\t\treturn rc;\n \n \t\tif (enable && nix->need_meta_aura)\n-\t\t\treturn roc_nix_inl_meta_aura_check(inl_rq);\n+\t\t\treturn roc_nix_inl_meta_aura_check(roc_nix, inl_rq);\n \t}\n \treturn 0;\n }\n@@ -1084,15 +1203,22 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)\n \t * managed outside RoC.\n \t */\n \tnix->inl_inb_ena = ena;\n-\tif (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {\n-\t\tif (ena) {\n-\t\t\tnix->need_meta_aura = true;\n+\n+\tif (roc_model_is_cn9k() || roc_errata_nix_no_meta_aura())\n+\t\treturn;\n+\n+\tif (ena) {\n+\t\tnix->need_meta_aura = true;\n+\t\tif (!roc_nix->local_meta_aura_ena)\n \t\t\tidev->inl_cfg.refs++;\n-\t\t} else if (nix->need_meta_aura) {\n-\t\t\tnix->need_meta_aura = false;\n+\t} else if (nix->need_meta_aura) {\n+\t\tnix->need_meta_aura = false;\n+\t\tif (roc_nix->local_meta_aura_ena) {\n+\t\t\tnix_inl_meta_aura_destroy(roc_nix);\n+\t\t} else {\n \t\t\tidev->inl_cfg.refs--;\n \t\t\tif (!idev->inl_cfg.refs)\n-\t\t\t\tnix_inl_meta_aura_destroy();\n+\t\t\t\tnix_inl_meta_aura_destroy(roc_nix);\n \t\t}\n \t}\n }\ndiff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h\nindex 105a9e4ec4..6220ba6773 100644\n--- a/drivers/common/cnxk/roc_nix_inl.h\n+++ b/drivers/common/cnxk/roc_nix_inl.h\n@@ -118,8 +118,9 @@ roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)\n typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,\n \t\t\t\t\t  uint32_t soft_exp_event);\n \n-typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,\n-\t\t\t\t\t  bool destroy);\n+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t *mpool,\n+\t\t\t\t\t  uint32_t blk_sz, uint32_t nb_bufs, bool destroy,\n+\t\t\t\t\t  const char *mempool_name);\n \n struct roc_nix_inl_dev {\n \t/* Input parameters */\n@@ -181,7 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,\n int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,\n \t\t\t\t       bool inb_inl_dev);\n int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);\n-int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);\n+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq);\n \n /* NIX Inline Outbound API */\n int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);\ndiff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c\nindex 33b2cdf90f..464ee0b984 100644\n--- a/drivers/common/cnxk/roc_nix_queue.c\n+++ b/drivers/common/cnxk/roc_nix_queue.c\n@@ -102,7 +102,7 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)\n \n \t/* Check for meta aura if RQ is enabled */\n \tif (enable && nix->need_meta_aura)\n-\t\trc = roc_nix_inl_meta_aura_check(rq);\n+\t\trc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq);\n \treturn rc;\n }\n \n@@ -691,7 +691,7 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)\n \n \t/* Check for meta aura if RQ is enabled */\n \tif (ena && nix->need_meta_aura) {\n-\t\trc = roc_nix_inl_meta_aura_check(rq);\n+\t\trc = roc_nix_inl_meta_aura_check(roc_nix, rq);\n \t\tif (rc)\n \t\t\treturn rc;\n \t}\n@@ -745,7 +745,7 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)\n \n \t/* Check for meta aura if RQ is enabled */\n \tif (ena && nix->need_meta_aura) {\n-\t\trc = roc_nix_inl_meta_aura_check(rq);\n+\t\trc = roc_nix_inl_meta_aura_check(roc_nix, rq);\n \t\tif (rc)\n \t\t\treturn rc;\n \t}\ndiff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c\nindex 8e74edff55..b1cf43ee57 100644\n--- a/drivers/event/cnxk/cn10k_eventdev.c\n+++ b/drivers/event/cnxk/cn10k_eventdev.c\n@@ -843,7 +843,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,\n }\n \n static void\n-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura)\n+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)\n {\n \tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n \tint i;\n@@ -855,8 +855,6 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, u\n \t\tws->tstamp = dev->tstamp;\n \t\tif (lookup_mem)\n \t\t\tws->lookup_mem = lookup_mem;\n-\t\tif (meta_aura)\n-\t\t\tws->meta_aura = meta_aura;\n \t}\n }\n \n@@ -867,7 +865,6 @@ cn10k_sso_rx_adapter_queue_add(\n \tconst struct rte_event_eth_rx_adapter_queue_conf *queue_conf)\n {\n \tstruct cn10k_eth_rxq *rxq;\n-\tuint64_t meta_aura;\n \tvoid *lookup_mem;\n \tint rc;\n \n@@ -881,8 +878,7 @@ cn10k_sso_rx_adapter_queue_add(\n \t\treturn -EINVAL;\n \trxq = eth_dev->data->rx_queues[0];\n \tlookup_mem = rxq->lookup_mem;\n-\tmeta_aura = rxq->meta_aura;\n-\tcn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);\n+\tcn10k_sso_set_priv_mem(event_dev, lookup_mem);\n \tcn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n \n \treturn 0;\n@@ -1056,7 +1052,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,\n \tcn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n \n \tret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);\n-\tcn10k_sso_set_priv_mem(event_dev, NULL, 0);\n+\tcn10k_sso_set_priv_mem(event_dev, NULL);\n \n \treturn ret;\n }\ndiff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h\nindex 2bea1f6ca6..06c71c6092 100644\n--- a/drivers/event/cnxk/cn10k_worker.h\n+++ b/drivers/event/cnxk/cn10k_worker.h\n@@ -55,9 +55,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc\n \tstruct cnxk_timesync_info *tstamp = ws->tstamp[port_id];\n \tvoid *lookup_mem = ws->lookup_mem;\n \tuintptr_t lbase = ws->lmt_base;\n+\tuint64_t meta_aura = 0, laddr;\n \tstruct rte_event_vector *vec;\n-\tuint64_t meta_aura, laddr;\n \tuint16_t nb_mbufs, non_vec;\n+\tstruct rte_mempool *mp;\n \tuint16_t lmt_id, d_off;\n \tstruct rte_mbuf **wqe;\n \tstruct rte_mbuf *mbuf;\n@@ -77,7 +78,12 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc\n \tif (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)\n \t\tmbuf_init |= 8;\n \n-\tmeta_aura = ws->meta_aura;\n+\tif (flags & NIX_RX_OFFLOAD_SECURITY_F) {\n+\t\tmp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port_id, lookup_mem);\n+\t\tif (mp)\n+\t\t\tmeta_aura = mp->pool_id;\n+\t}\n+\n \tnb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);\n \tnb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,\n \t\t\t\t\t      flags | NIX_RX_VWQE_F,\n@@ -94,7 +100,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc\n \t\t/* Pick first mbuf's aura handle assuming all\n \t\t * mbufs are from a vec and are from same RQ.\n \t\t */\n-\t\tmeta_aura = ws->meta_aura;\n \t\tif (!meta_aura)\n \t\t\tmeta_aura = mbuf->pool->pool_id;\n \t\tROC_LMT_BASE_ID_GET(lbase, lmt_id);\ndiff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c\nindex 131d42a95b..7e8339bd3a 100644\n--- a/drivers/event/cnxk/cn9k_eventdev.c\n+++ b/drivers/event/cnxk/cn9k_eventdev.c\n@@ -945,8 +945,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,\n }\n \n static void\n-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,\n-\t\t      uint64_t aura __rte_unused)\n+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)\n {\n \tstruct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);\n \tint i;\n@@ -992,7 +991,7 @@ cn9k_sso_rx_adapter_queue_add(\n \n \trxq = eth_dev->data->rx_queues[0];\n \tlookup_mem = rxq->lookup_mem;\n-\tcn9k_sso_set_priv_mem(event_dev, lookup_mem, 0);\n+\tcn9k_sso_set_priv_mem(event_dev, lookup_mem);\n \tcn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n \n \treturn 0;\n@@ -1141,7 +1140,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,\n \tcn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);\n \n \tret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);\n-\tcn9k_sso_set_priv_mem(event_dev, NULL, 0);\n+\tcn9k_sso_set_priv_mem(event_dev, NULL);\n \n \treturn ret;\n }\ndiff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c\nindex fac3806e14..121480df15 100644\n--- a/drivers/event/cnxk/cnxk_tim_evdev.c\n+++ b/drivers/event/cnxk/cnxk_tim_evdev.c\n@@ -265,7 +265,7 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)\n \tcnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,\n \t\t\t      RTE_EVENT_TYPE_TIMER);\n \tcnxk_sso_xae_reconfigure(dev->event_dev);\n-\tsso_set_priv_mem_fn(dev->event_dev, NULL, 0);\n+\tsso_set_priv_mem_fn(dev->event_dev, NULL);\n \n \tplt_tim_dbg(\n \t\t\"Total memory used %\" PRIu64 \"MB\\n\",\ndiff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h\nindex 7253a37d3d..3a0b036cb4 100644\n--- a/drivers/event/cnxk/cnxk_tim_evdev.h\n+++ b/drivers/event/cnxk/cnxk_tim_evdev.h\n@@ -81,7 +81,7 @@\n \t(TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))\n \n typedef void (*cnxk_sso_set_priv_mem_t)(const struct rte_eventdev *event_dev,\n-\t\t\t\t\tvoid *lookup_mem, uint64_t aura);\n+\t\t\t\t\tvoid *lookup_mem);\n \n struct cnxk_tim_ctl {\n \tuint16_t ring;\ndiff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c\nindex 2dbca698af..019c8299ce 100644\n--- a/drivers/net/cnxk/cn10k_ethdev.c\n+++ b/drivers/net/cnxk/cn10k_ethdev.c\n@@ -362,6 +362,8 @@ cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev *eth_dev)\n \t\t\trxq->meta_aura = rxq_sp->qconf.mp->pool_id;\n \t\t}\n \t}\n+\t/* Store mempool in lookup mem */\n+\tcnxk_nix_lookup_mem_metapool_set(dev);\n }\n \n static int\ndiff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c\nindex d8ccd307a8..1cae3084e1 100644\n--- a/drivers/net/cnxk/cnxk_ethdev.c\n+++ b/drivers/net/cnxk/cnxk_ethdev.c\n@@ -275,6 +275,8 @@ nix_security_release(struct cnxk_eth_dev *dev)\n \t\t\tplt_err(\"Failed to cleanup nix inline inb, rc=%d\", rc);\n \t\tret |= rc;\n \n+\t\tcnxk_nix_lookup_mem_metapool_clear(dev);\n+\n \t\tif (dev->inb.sa_dptr) {\n \t\t\tplt_free(dev->inb.sa_dptr);\n \t\t\tdev->inb.sa_dptr = NULL;\n@@ -1852,6 +1854,8 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)\n \tnix->pci_dev = pci_dev;\n \tnix->hw_vlan_ins = true;\n \tnix->port_id = eth_dev->data->port_id;\n+\tif (roc_feature_nix_has_own_meta_aura())\n+\t\tnix->local_meta_aura_ena = true;\n \trc = roc_nix_dev_init(nix);\n \tif (rc) {\n \t\tplt_err(\"Failed to initialize roc nix rc=%d\", rc);\ndiff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h\nindex f0eab4244c..12c56ccd55 100644\n--- a/drivers/net/cnxk/cnxk_ethdev.h\n+++ b/drivers/net/cnxk/cnxk_ethdev.h\n@@ -594,6 +594,8 @@ int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,\n int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);\n int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);\n int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);\n+int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);\n+int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);\n __rte_internal\n int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);\n struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,\n@@ -601,8 +603,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,\n struct cnxk_eth_sec_sess *\n cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,\n \t\t\t      struct rte_security_session *sess);\n-int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,\n-\t\t\t      bool destroy);\n+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,\n+\t\t\t      uint32_t nb_bufs, bool destroy, const char *mempool_name);\n \n /* Congestion Management */\n int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);\ndiff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c\nindex dbf5bd847d..e1a0845ece 100644\n--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c\n+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c\n@@ -182,6 +182,22 @@ parse_sqb_count(const char *key, const char *value, void *extra_args)\n \treturn 0;\n }\n \n+static int\n+parse_meta_bufsize(const char *key, const char *value, void *extra_args)\n+{\n+\tRTE_SET_USED(key);\n+\tuint32_t val;\n+\n+\terrno = 0;\n+\tval = strtoul(value, NULL, 0);\n+\tif (errno)\n+\t\tval = 0;\n+\n+\t*(uint32_t *)extra_args = val;\n+\n+\treturn 0;\n+}\n+\n static int\n parse_switch_header_type(const char *key, const char *value, void *extra_args)\n {\n@@ -248,6 +264,7 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args)\n #define CNXK_FLOW_PRE_L2_INFO\t\"flow_pre_l2_info\"\n #define CNXK_CUSTOM_SA_ACT\t\"custom_sa_act\"\n #define CNXK_SQB_SLACK\t\t\"sqb_slack\"\n+#define CNXK_NIX_META_BUF_SZ\t\"meta_buf_sz\"\n \n int\n cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)\n@@ -270,6 +287,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)\n \tuint16_t tx_compl_ena = 0;\n \tuint16_t custom_sa_act = 0;\n \tstruct rte_kvargs *kvlist;\n+\tuint32_t meta_buf_sz = 0;\n \tuint16_t no_inl_dev = 0;\n \tuint8_t lock_rx_ctx = 0;\n \n@@ -319,6 +337,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)\n \t\t\t   &custom_sa_act);\n \trte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count,\n \t\t\t   &sqb_slack);\n+\trte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, &meta_buf_sz);\n \trte_kvargs_free(kvlist);\n \n null_devargs:\n@@ -337,6 +356,10 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)\n \tdev->nix.lock_rx_ctx = lock_rx_ctx;\n \tdev->nix.custom_sa_action = custom_sa_act;\n \tdev->nix.sqb_slack = sqb_slack;\n+\n+\tif (roc_feature_nix_has_own_meta_aura())\n+\t\tdev->nix.meta_buf_sz = meta_buf_sz;\n+\n \tdev->npc.flow_prealloc_size = flow_prealloc_size;\n \tdev->npc.flow_max_priority = flow_max_priority;\n \tdev->npc.switch_header_type = switch_header_type;\ndiff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h\nindex a812c78eda..c1f99a2616 100644\n--- a/drivers/net/cnxk/cnxk_ethdev_dp.h\n+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h\n@@ -34,6 +34,9 @@\n #define ERRCODE_ERRLEN_WIDTH 12\n #define ERR_ARRAY_SZ\t     ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))\n \n+#define SA_BASE_TBL_SZ\t(RTE_MAX_ETHPORTS * sizeof(uintptr_t))\n+#define MEMPOOL_TBL_SZ\t(RTE_MAX_ETHPORTS * sizeof(uintptr_t))\n+\n #define CNXK_NIX_UDP_TUN_BITMASK                                               \\\n \t((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) |                               \\\n \t (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))\n@@ -164,4 +167,14 @@ cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)\n \treturn *((const uintptr_t *)sa_base_tbl + port);\n }\n \n+static __rte_always_inline uintptr_t\n+cnxk_nix_inl_metapool_get(uint16_t port, const void *lookup_mem)\n+{\n+\tuintptr_t metapool_tbl;\n+\n+\tmetapool_tbl = (uintptr_t)lookup_mem;\n+\tmetapool_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;\n+\treturn *((const uintptr_t *)metapool_tbl + port);\n+}\n+\n #endif /* __CNXK_ETHDEV_DP_H__ */\ndiff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c\nindex 6c71f9554b..aa8a378a00 100644\n--- a/drivers/net/cnxk/cnxk_ethdev_sec.c\n+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c\n@@ -38,15 +38,22 @@ bitmap_ctzll(uint64_t slab)\n }\n \n int\n-cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy)\n+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,\n+\t\t\t  uint32_t nb_bufs, bool destroy, const char *mempool_name)\n {\n-\tconst char *mp_name = CNXK_NIX_INL_META_POOL_NAME;\n+\tconst char *mp_name = NULL;\n \tstruct rte_pktmbuf_pool_private mbp_priv;\n \tstruct npa_aura_s *aura;\n \tstruct rte_mempool *mp;\n \tuint16_t first_skip;\n \tint rc;\n \n+\t/* Null Mempool name indicates to allocate Zero aura. */\n+\tif (!mempool_name)\n+\t\tmp_name = CNXK_NIX_INL_META_POOL_NAME;\n+\telse\n+\t\tmp_name = mempool_name;\n+\n \t/* Destroy the mempool if requested */\n \tif (destroy) {\n \t\tmp = rte_mempool_lookup(mp_name);\n@@ -62,6 +69,7 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bu\n \t\trte_mempool_free(mp);\n \n \t\t*aura_handle = 0;\n+\t\t*mpool = 0;\n \t\treturn 0;\n \t}\n \n@@ -83,10 +91,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bu\n \t\tgoto free_mp;\n \t}\n \taura->ena = 1;\n-\taura->pool_addr = 0x0;\n+\tif (!mempool_name)\n+\t\taura->pool_addr = 0;\n+\telse\n+\t\taura->pool_addr = 1; /* Any non zero value, so that alloc from next free Index */\n \n-\trc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),\n-\t\t\t\t\taura);\n+\trc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), aura);\n \tif (rc) {\n \t\tplt_err(\"Failed to setup mempool ops for meta, rc=%d\", rc);\n \t\tgoto free_aura;\n@@ -108,6 +118,7 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bu\n \n \trte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);\n \t*aura_handle = mp->pool_id;\n+\t*mpool = (uintptr_t)mp;\n \treturn 0;\n free_aura:\n \tplt_free(aura);\ndiff --git a/drivers/net/cnxk/cnxk_lookup.c b/drivers/net/cnxk/cnxk_lookup.c\nindex 6d561f194f..c0a7129a9c 100644\n--- a/drivers/net/cnxk/cnxk_lookup.c\n+++ b/drivers/net/cnxk/cnxk_lookup.c\n@@ -7,8 +7,7 @@\n \n #include \"cnxk_ethdev.h\"\n \n-#define SA_BASE_TBL_SZ\t(RTE_MAX_ETHPORTS * sizeof(uintptr_t))\n-#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ)\n+#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ + MEMPOOL_TBL_SZ)\n const uint32_t *\n cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)\n {\n@@ -371,3 +370,37 @@ cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)\n \t*((uintptr_t *)sa_base_tbl + port) = 0;\n \treturn 0;\n }\n+\n+int\n+cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev)\n+{\n+\tvoid *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();\n+\tuint16_t port = dev->eth_dev->data->port_id;\n+\tuintptr_t mp_tbl;\n+\n+\tif (!lookup_mem)\n+\t\treturn -EIO;\n+\n+\t/* Set Mempool in lookup mem */\n+\tmp_tbl = (uintptr_t)lookup_mem;\n+\tmp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;\n+\t*((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;\n+\treturn 0;\n+}\n+\n+int\n+cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev)\n+{\n+\tvoid *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();\n+\tuint16_t port = dev->eth_dev->data->port_id;\n+\tuintptr_t mp_tbl;\n+\n+\tif (!lookup_mem)\n+\t\treturn -EIO;\n+\n+\t/* Clear Mempool in lookup mem */\n+\tmp_tbl = (uintptr_t)lookup_mem;\n+\tmp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;\n+\t*((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;\n+\treturn 0;\n+}\n",
    "prefixes": [
        "11/15"
    ]
}