get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/90626/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 90626,
    "url": "http://patches.dpdk.org/api/patches/90626/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210406114131.25874-14-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210406114131.25874-14-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210406114131.25874-14-ndabilpuram@marvell.com",
    "date": "2021-04-06T11:40:52",
    "name": "[v4,13/52] common/cnxk: add npa pool HW ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ec171d3b3a45f3914999ab6300c7c948e41aa66a",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210406114131.25874-14-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 16128,
            "url": "http://patches.dpdk.org/api/series/16128/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16128",
            "date": "2021-04-06T11:40:39",
            "name": "Add Marvell CNXK common driver",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/16128/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/90626/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/90626/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3CFD6A0546;\n\tTue,  6 Apr 2021 13:43:46 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 03DF7140EEC;\n\tTue,  6 Apr 2021 13:42:22 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 5B945140EEC\n for <dev@dpdk.org>; Tue,  6 Apr 2021 13:42:20 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 136BdwBC008328 for <dev@dpdk.org>; Tue, 6 Apr 2021 04:42:19 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com with ESMTP id 37r72p2dpt-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Tue, 06 Apr 2021 04:42:19 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Tue, 6 Apr 2021 04:42:18 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Tue, 6 Apr 2021 04:42:18 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id A70A13F704D;\n Tue,  6 Apr 2021 04:42:15 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=OTK1njqqa4ylK+axjlz35dZu2qCGSMZKqBvHAye+0ag=;\n b=RR9R4WzYBcGEVK+XpnoQ+vF3Q5p4bFd0QJ+FZ/4PhrkJAv1Aw8WDeq3o0B7jjQXc0DgF\n m7bP61lwSeSjcfvu5iOmMJSxjNegTCZ236KnPWav5wjfxjuFCMxb0NXHTcy2QFjV62AP\n iGan1vyd61Uc1x91TClwAfhk1VFernbbR6NExqy+pDkKy0dQin3ZQmyLmKMn7aUK6CVN\n XY09Y8fnjvZR2D+x1X+N44tTaGljYzPyKrpF4Yj5wKMugaU5Ok9opC7Ji2QLaox4yoXJ\n nJvot8hsGJhn7Sm8TSVY20euQDWQ6YrSdl87BDBGjnYFCyqTrqCw4/pd75Jq7kJkdKuX ZQ==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<jerinj@marvell.com>, <skori@marvell.com>, <skoteshwar@marvell.com>,\n <pbhagavatula@marvell.com>, <kirankumark@marvell.com>,\n <psatheesh@marvell.com>, <asekhar@marvell.com>",
        "Date": "Tue, 6 Apr 2021 17:10:52 +0530",
        "Message-ID": "<20210406114131.25874-14-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20210406114131.25874-1-ndabilpuram@marvell.com>",
        "References": "<20210305133918.8005-1-ndabilpuram@marvell.com>\n <20210406114131.25874-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "1Gx9NzXrKEGu8bQpDUHdrjwcVaDNezJR",
        "X-Proofpoint-ORIG-GUID": "1Gx9NzXrKEGu8bQpDUHdrjwcVaDNezJR",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761\n definitions=2021-04-06_02:2021-04-01,\n 2021-04-06 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v4 13/52] common/cnxk: add npa pool HW ops",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Ashwin Sekhar T K <asekhar@marvell.com>\n\nAdd APIs for creating, destroying, modifying\nNPA pools.\n\nSigned-off-by: Ashwin Sekhar T K <asekhar@marvell.com>\n---\n drivers/common/cnxk/roc_npa.c   | 421 ++++++++++++++++++++++++++++++++++++++++\n drivers/common/cnxk/roc_npa.h   | 146 ++++++++++++++\n drivers/common/cnxk/version.map |   5 +\n 3 files changed, 572 insertions(+)",
    "diff": "diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c\nindex 0d4a56a..80f5a78 100644\n--- a/drivers/common/cnxk/roc_npa.c\n+++ b/drivers/common/cnxk/roc_npa.c\n@@ -5,6 +5,427 @@\n #include \"roc_api.h\"\n #include \"roc_priv.h\"\n \n+void\n+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,\n+\t\t\t  uint64_t end_iova)\n+{\n+\tconst uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t       NPA_LF_POOL_OP_PTR_START0;\n+\tconst uint64_t end = roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t     NPA_LF_POOL_OP_PTR_END0;\n+\tuint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);\n+\tstruct npa_lf *lf = idev_npa_obj_get();\n+\tstruct npa_aura_lim *lim;\n+\n+\tPLT_ASSERT(lf);\n+\tlim = lf->aura_lim;\n+\n+\tlim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);\n+\tlim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);\n+\n+\troc_store_pair(lim[reg].ptr_start, reg, start);\n+\troc_store_pair(lim[reg].ptr_end, reg, end);\n+}\n+\n+static int\n+npa_aura_pool_init(struct mbox *mbox, uint32_t aura_id, struct npa_aura_s *aura,\n+\t\t   struct npa_pool_s *pool)\n+{\n+\tstruct npa_aq_enq_req *aura_init_req, *pool_init_req;\n+\tstruct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;\n+\tstruct mbox_dev *mdev = &mbox->dev[0];\n+\tint rc = -ENOSPC, off;\n+\n+\taura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (aura_init_req == NULL)\n+\t\treturn rc;\n+\taura_init_req->aura_id = aura_id;\n+\taura_init_req->ctype = NPA_AQ_CTYPE_AURA;\n+\taura_init_req->op = NPA_AQ_INSTOP_INIT;\n+\tmbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));\n+\n+\tpool_init_req = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (pool_init_req == NULL)\n+\t\treturn rc;\n+\tpool_init_req->aura_id = aura_id;\n+\tpool_init_req->ctype = NPA_AQ_CTYPE_POOL;\n+\tpool_init_req->op = NPA_AQ_INSTOP_INIT;\n+\tmbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));\n+\n+\trc = mbox_process(mbox);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\toff = mbox->rx_start +\n+\t      PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);\n+\taura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);\n+\toff = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;\n+\tpool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);\n+\n+\tif (aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)\n+\t\treturn 0;\n+\telse\n+\t\treturn NPA_ERR_AURA_POOL_INIT;\n+}\n+\n+static int\n+npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle)\n+{\n+\tstruct npa_aq_enq_req *aura_req, *pool_req;\n+\tstruct npa_aq_enq_rsp *aura_rsp, *pool_rsp;\n+\tstruct mbox_dev *mdev = &mbox->dev[0];\n+\tstruct ndc_sync_op *ndc_req;\n+\tint rc = -ENOSPC, off;\n+\tuint64_t ptr;\n+\n+\t/* Procedure for disabling an aura/pool */\n+\tplt_delay_us(10);\n+\n+\t/* Clear all the pointers from the aura */\n+\tdo {\n+\t\tptr = roc_npa_aura_op_alloc(aura_handle, 0);\n+\t} while (ptr);\n+\n+\tpool_req = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (pool_req == NULL)\n+\t\treturn rc;\n+\tpool_req->aura_id = aura_id;\n+\tpool_req->ctype = NPA_AQ_CTYPE_POOL;\n+\tpool_req->op = NPA_AQ_INSTOP_WRITE;\n+\tpool_req->pool.ena = 0;\n+\tpool_req->pool_mask.ena = ~pool_req->pool_mask.ena;\n+\n+\taura_req = mbox_alloc_msg_npa_aq_enq(mbox);\n+\tif (aura_req == NULL)\n+\t\treturn rc;\n+\taura_req->aura_id = aura_id;\n+\taura_req->ctype = NPA_AQ_CTYPE_AURA;\n+\taura_req->op = NPA_AQ_INSTOP_WRITE;\n+\taura_req->aura.ena = 0;\n+\taura_req->aura_mask.ena = ~aura_req->aura_mask.ena;\n+\n+\trc = mbox_process(mbox);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\toff = mbox->rx_start +\n+\t      PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);\n+\tpool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);\n+\n+\toff = mbox->rx_start + pool_rsp->hdr.next_msgoff;\n+\taura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);\n+\n+\tif (aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0)\n+\t\treturn NPA_ERR_AURA_POOL_FINI;\n+\n+\t/* Sync NDC-NPA for LF */\n+\tndc_req = mbox_alloc_msg_ndc_sync_op(mbox);\n+\tif (ndc_req == NULL)\n+\t\treturn -ENOSPC;\n+\tndc_req->npa_lf_sync = 1;\n+\trc = mbox_process(mbox);\n+\tif (rc) {\n+\t\tplt_err(\"Error on NDC-NPA LF sync, rc %d\", rc);\n+\t\treturn NPA_ERR_AURA_POOL_FINI;\n+\t}\n+\treturn 0;\n+}\n+\n+static inline char *\n+npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)\n+{\n+\tsnprintf(name, PLT_MEMZONE_NAMESIZE, \"roc_npa_stack_%x_%d\", lf->pf_func,\n+\t\t pool_id);\n+\treturn name;\n+}\n+\n+static inline const struct plt_memzone *\n+npa_stack_dma_alloc(struct npa_lf *lf, char *name, int pool_id, size_t size)\n+{\n+\tconst char *mz_name = npa_stack_memzone_name(lf, pool_id, name);\n+\n+\treturn plt_memzone_reserve_cache_align(mz_name, size);\n+}\n+\n+static inline int\n+npa_stack_dma_free(struct npa_lf *lf, char *name, int pool_id)\n+{\n+\tconst struct plt_memzone *mz;\n+\n+\tmz = plt_memzone_lookup(npa_stack_memzone_name(lf, pool_id, name));\n+\tif (mz == NULL)\n+\t\treturn NPA_ERR_PARAM;\n+\n+\treturn plt_memzone_free(mz);\n+}\n+\n+static inline int\n+bitmap_ctzll(uint64_t slab)\n+{\n+\tif (slab == 0)\n+\t\treturn 0;\n+\n+\treturn __builtin_ctzll(slab);\n+}\n+\n+static int\n+npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,\n+\t\t\t const uint32_t block_count, struct npa_aura_s *aura,\n+\t\t\t struct npa_pool_s *pool, uint64_t *aura_handle)\n+{\n+\tint rc, aura_id, pool_id, stack_size, alloc_size;\n+\tchar name[PLT_MEMZONE_NAMESIZE];\n+\tconst struct plt_memzone *mz;\n+\tuint64_t slab;\n+\tuint32_t pos;\n+\n+\t/* Sanity check */\n+\tif (!lf || !block_size || !block_count || !pool || !aura ||\n+\t    !aura_handle)\n+\t\treturn NPA_ERR_PARAM;\n+\n+\t/* Block size should be cache line aligned and in range of 128B-128KB */\n+\tif (block_size % ROC_ALIGN || block_size < 128 ||\n+\t    block_size > 128 * 1024)\n+\t\treturn NPA_ERR_INVALID_BLOCK_SZ;\n+\n+\tpos = 0;\n+\tslab = 0;\n+\t/* Scan from the beginning */\n+\tplt_bitmap_scan_init(lf->npa_bmp);\n+\t/* Scan bitmap to get the free pool */\n+\trc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);\n+\t/* Empty bitmap */\n+\tif (rc == 0) {\n+\t\tplt_err(\"Mempools exhausted\");\n+\t\treturn NPA_ERR_AURA_ID_ALLOC;\n+\t}\n+\n+\t/* Get aura_id from resource bitmap */\n+\taura_id = pos + bitmap_ctzll(slab);\n+\t/* Mark pool as reserved */\n+\tplt_bitmap_clear(lf->npa_bmp, aura_id);\n+\n+\t/* Configuration based on each aura has separate pool(aura-pool pair) */\n+\tpool_id = aura_id;\n+\trc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||\n+\t      aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?\n+\t\t\t   NPA_ERR_AURA_ID_ALLOC :\n+\t\t\t   0;\n+\tif (rc)\n+\t\tgoto exit;\n+\n+\t/* Allocate stack memory */\n+\tstack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;\n+\talloc_size = stack_size * lf->stack_pg_bytes;\n+\n+\tmz = npa_stack_dma_alloc(lf, name, pool_id, alloc_size);\n+\tif (mz == NULL) {\n+\t\trc = NPA_ERR_ALLOC;\n+\t\tgoto aura_res_put;\n+\t}\n+\n+\t/* Update aura fields */\n+\taura->pool_addr = pool_id; /* AF will translate to associated poolctx */\n+\taura->ena = 1;\n+\taura->shift = __builtin_clz(block_count) - 8;\n+\taura->limit = block_count;\n+\taura->pool_caching = 1;\n+\taura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);\n+\taura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);\n+\taura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);\n+\taura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);\n+\t/* Many to one reduction */\n+\taura->err_qint_idx = aura_id % lf->qints;\n+\n+\t/* Update pool fields */\n+\tpool->stack_base = mz->iova;\n+\tpool->ena = 1;\n+\tpool->buf_size = block_size / ROC_ALIGN;\n+\tpool->stack_max_pages = stack_size;\n+\tpool->shift = __builtin_clz(block_count) - 8;\n+\tpool->ptr_start = 0;\n+\tpool->ptr_end = ~0;\n+\tpool->stack_caching = 1;\n+\tpool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);\n+\tpool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);\n+\tpool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);\n+\n+\t/* Many to one reduction */\n+\tpool->err_qint_idx = pool_id % lf->qints;\n+\n+\t/* Issue AURA_INIT and POOL_INIT op */\n+\trc = npa_aura_pool_init(lf->mbox, aura_id, aura, pool);\n+\tif (rc)\n+\t\tgoto stack_mem_free;\n+\n+\t*aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);\n+\t/* Update aura count */\n+\troc_npa_aura_op_cnt_set(*aura_handle, 0, block_count);\n+\t/* Read it back to make sure aura count is updated */\n+\troc_npa_aura_op_cnt_get(*aura_handle);\n+\n+\treturn 0;\n+\n+stack_mem_free:\n+\tplt_memzone_free(mz);\n+aura_res_put:\n+\tplt_bitmap_set(lf->npa_bmp, aura_id);\n+exit:\n+\treturn rc;\n+}\n+\n+int\n+roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,\n+\t\t    uint32_t block_count, struct npa_aura_s *aura,\n+\t\t    struct npa_pool_s *pool)\n+{\n+\tstruct npa_aura_s defaura;\n+\tstruct npa_pool_s defpool;\n+\tstruct idev_cfg *idev;\n+\tstruct npa_lf *lf;\n+\tint rc;\n+\n+\tlf = idev_npa_obj_get();\n+\tif (lf == NULL) {\n+\t\trc = NPA_ERR_DEVICE_NOT_BOUNDED;\n+\t\tgoto error;\n+\t}\n+\n+\tidev = idev_get_cfg();\n+\tif (idev == NULL) {\n+\t\trc = NPA_ERR_ALLOC;\n+\t\tgoto error;\n+\t}\n+\n+\tif (aura == NULL) {\n+\t\tmemset(&defaura, 0, sizeof(struct npa_aura_s));\n+\t\taura = &defaura;\n+\t}\n+\tif (pool == NULL) {\n+\t\tmemset(&defpool, 0, sizeof(struct npa_pool_s));\n+\t\tdefpool.nat_align = 1;\n+\t\tdefpool.buf_offset = 1;\n+\t\tpool = &defpool;\n+\t}\n+\n+\trc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,\n+\t\t\t\t      aura_handle);\n+\tif (rc) {\n+\t\tplt_err(\"Failed to alloc pool or aura rc=%d\", rc);\n+\t\tgoto error;\n+\t}\n+\n+\tplt_npa_dbg(\"lf=%p block_sz=%d block_count=%d aura_handle=0x%\" PRIx64,\n+\t\t    lf, block_size, block_count, *aura_handle);\n+\n+\t/* Just hold the reference of the object */\n+\t__atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);\n+error:\n+\treturn rc;\n+}\n+\n+int\n+roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)\n+{\n+\tstruct npa_aq_enq_req *aura_req;\n+\tstruct npa_lf *lf;\n+\tint rc;\n+\n+\tlf = idev_npa_obj_get();\n+\tif (lf == NULL)\n+\t\treturn NPA_ERR_DEVICE_NOT_BOUNDED;\n+\n+\taura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);\n+\tif (aura_req == NULL)\n+\t\treturn -ENOMEM;\n+\taura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);\n+\taura_req->ctype = NPA_AQ_CTYPE_AURA;\n+\taura_req->op = NPA_AQ_INSTOP_WRITE;\n+\n+\taura_req->aura.limit = aura_limit;\n+\taura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);\n+\trc = mbox_process(lf->mbox);\n+\n+\treturn rc;\n+}\n+\n+static int\n+npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)\n+{\n+\tchar name[PLT_MEMZONE_NAMESIZE];\n+\tint aura_id, pool_id, rc;\n+\n+\tif (!lf || !aura_handle)\n+\t\treturn NPA_ERR_PARAM;\n+\n+\taura_id = roc_npa_aura_handle_to_aura(aura_handle);\n+\tpool_id = aura_id;\n+\trc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);\n+\trc |= npa_stack_dma_free(lf, name, pool_id);\n+\n+\tplt_bitmap_set(lf->npa_bmp, aura_id);\n+\n+\treturn rc;\n+}\n+\n+int\n+roc_npa_pool_destroy(uint64_t aura_handle)\n+{\n+\tstruct npa_lf *lf = idev_npa_obj_get();\n+\tint rc = 0;\n+\n+\tplt_npa_dbg(\"lf=%p aura_handle=0x%\" PRIx64, lf, aura_handle);\n+\trc = npa_aura_pool_pair_free(lf, aura_handle);\n+\tif (rc)\n+\t\tplt_err(\"Failed to destroy pool or aura rc=%d\", rc);\n+\n+\t/* Release the reference of npa */\n+\trc |= npa_lf_fini();\n+\treturn rc;\n+}\n+\n+int\n+roc_npa_pool_range_update_check(uint64_t aura_handle)\n+{\n+\tuint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);\n+\tstruct npa_lf *lf;\n+\tstruct npa_aura_lim *lim;\n+\t__io struct npa_pool_s *pool;\n+\tstruct npa_aq_enq_req *req;\n+\tstruct npa_aq_enq_rsp *rsp;\n+\tint rc;\n+\n+\tlf = idev_npa_obj_get();\n+\tif (lf == NULL)\n+\t\treturn NPA_ERR_PARAM;\n+\n+\tlim = lf->aura_lim;\n+\n+\treq = mbox_alloc_msg_npa_aq_enq(lf->mbox);\n+\tif (req == NULL)\n+\t\treturn -ENOSPC;\n+\n+\treq->aura_id = aura_id;\n+\treq->ctype = NPA_AQ_CTYPE_POOL;\n+\treq->op = NPA_AQ_INSTOP_READ;\n+\n+\trc = mbox_process_msg(lf->mbox, (void *)&rsp);\n+\tif (rc) {\n+\t\tplt_err(\"Failed to get pool(0x%\" PRIx64 \") context\", aura_id);\n+\t\treturn rc;\n+\t}\n+\n+\tpool = &rsp->pool;\n+\tif (lim[aura_id].ptr_start != pool->ptr_start ||\n+\t    lim[aura_id].ptr_end != pool->ptr_end) {\n+\t\tplt_err(\"Range update failed on pool(0x%\" PRIx64 \")\", aura_id);\n+\t\treturn NPA_ERR_PARAM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n static inline int\n npa_attach(struct mbox *mbox)\n {\ndiff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h\nindex 029f966..6983849 100644\n--- a/drivers/common/cnxk/roc_npa.h\n+++ b/drivers/common/cnxk/roc_npa.h\n@@ -6,6 +6,140 @@\n #define _ROC_NPA_H_\n \n #define ROC_AURA_ID_MASK       (BIT_ULL(16) - 1)\n+#define ROC_AURA_OP_LIMIT_MASK (BIT_ULL(36) - 1)\n+\n+/*\n+ * Generate 64bit handle to have optimized alloc and free aura operation.\n+ * 0 - ROC_AURA_ID_MASK for storing the aura_id.\n+ * [ROC_AURA_ID_MASK+1, (2^64 - 1)] for storing the lf base address.\n+ * This scheme is valid when OS can give ROC_AURA_ID_MASK\n+ * aligned address for lf base address.\n+ */\n+static inline uint64_t\n+roc_npa_aura_handle_gen(uint32_t aura_id, uintptr_t addr)\n+{\n+\tuint64_t val;\n+\n+\tval = aura_id & ROC_AURA_ID_MASK;\n+\treturn (uint64_t)addr | val;\n+}\n+\n+static inline uint64_t\n+roc_npa_aura_handle_to_aura(uint64_t aura_handle)\n+{\n+\treturn aura_handle & ROC_AURA_ID_MASK;\n+}\n+\n+static inline uintptr_t\n+roc_npa_aura_handle_to_base(uint64_t aura_handle)\n+{\n+\treturn (uintptr_t)(aura_handle & ~ROC_AURA_ID_MASK);\n+}\n+\n+static inline uint64_t\n+roc_npa_aura_op_alloc(uint64_t aura_handle, const int drop)\n+{\n+\tuint64_t wdata = roc_npa_aura_handle_to_aura(aura_handle);\n+\tint64_t *addr;\n+\n+\tif (drop)\n+\t\twdata |= BIT_ULL(63); /* DROP */\n+\n+\taddr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t   NPA_LF_AURA_OP_ALLOCX(0));\n+\treturn roc_atomic64_add_nosync(wdata, addr);\n+}\n+\n+static inline void\n+roc_npa_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)\n+{\n+\tuint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);\n+\tconst uint64_t addr =\n+\t\troc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0;\n+\tif (fabs)\n+\t\treg |= BIT_ULL(63); /* FABS */\n+\n+\troc_store_pair(iova, reg, addr);\n+}\n+\n+static inline uint64_t\n+roc_npa_aura_op_cnt_get(uint64_t aura_handle)\n+{\n+\tuint64_t wdata;\n+\tint64_t *addr;\n+\tuint64_t reg;\n+\n+\twdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;\n+\taddr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t   NPA_LF_AURA_OP_CNT);\n+\treg = roc_atomic64_add_nosync(wdata, addr);\n+\n+\tif (reg & BIT_ULL(42) /* OP_ERR */)\n+\t\treturn 0;\n+\telse\n+\t\treturn reg & 0xFFFFFFFFF;\n+}\n+\n+static inline void\n+roc_npa_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)\n+{\n+\tuint64_t reg = count & (BIT_ULL(36) - 1);\n+\n+\tif (sign)\n+\t\treg |= BIT_ULL(43); /* CNT_ADD */\n+\n+\treg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44);\n+\n+\tplt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t\t NPA_LF_AURA_OP_CNT);\n+}\n+\n+static inline uint64_t\n+roc_npa_aura_op_limit_get(uint64_t aura_handle)\n+{\n+\tuint64_t wdata;\n+\tint64_t *addr;\n+\tuint64_t reg;\n+\n+\twdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;\n+\taddr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t   NPA_LF_AURA_OP_LIMIT);\n+\treg = roc_atomic64_add_nosync(wdata, addr);\n+\n+\tif (reg & BIT_ULL(42) /* OP_ERR */)\n+\t\treturn 0;\n+\telse\n+\t\treturn reg & ROC_AURA_OP_LIMIT_MASK;\n+}\n+\n+static inline void\n+roc_npa_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)\n+{\n+\tuint64_t reg = limit & ROC_AURA_OP_LIMIT_MASK;\n+\n+\treg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44);\n+\n+\tplt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t\t NPA_LF_AURA_OP_LIMIT);\n+}\n+\n+static inline uint64_t\n+roc_npa_aura_op_available(uint64_t aura_handle)\n+{\n+\tuint64_t wdata;\n+\tuint64_t reg;\n+\tint64_t *addr;\n+\n+\twdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;\n+\taddr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +\n+\t\t\t   NPA_LF_POOL_OP_AVAILABLE);\n+\treg = roc_atomic64_add_nosync(wdata, addr);\n+\n+\tif (reg & BIT_ULL(42) /* OP_ERR */)\n+\t\treturn 0;\n+\telse\n+\t\treturn reg & 0xFFFFFFFFF;\n+}\n \n struct roc_npa {\n \tstruct plt_pci_device *pci_dev;\n@@ -17,6 +151,18 @@ struct roc_npa {\n int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);\n int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);\n \n+/* NPA pool */\n+int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,\n+\t\t\t\t  uint32_t block_count, struct npa_aura_s *aura,\n+\t\t\t\t  struct npa_pool_s *pool);\n+int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,\n+\t\t\t\t\tuint16_t aura_limit);\n+int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);\n+int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);\n+void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,\n+\t\t\t\t\t uint64_t start_iova,\n+\t\t\t\t\t uint64_t end_iova);\n+\n /* Debug */\n int __roc_api roc_npa_ctx_dump(void);\n int __roc_api roc_npa_dump(void);\ndiff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map\nindex 41752d9..1250767 100644\n--- a/drivers/common/cnxk/version.map\n+++ b/drivers/common/cnxk/version.map\n@@ -11,10 +11,15 @@ INTERNAL {\n \troc_idev_npa_maxpools_set;\n \troc_idev_num_lmtlines_get;\n \troc_model;\n+\troc_npa_aura_limit_modify;\n+\troc_npa_aura_op_range_set;\n \troc_npa_ctx_dump;\n \troc_npa_dev_fini;\n \troc_npa_dev_init;\n \troc_npa_dump;\n+\troc_npa_pool_create;\n+\troc_npa_pool_destroy;\n+\troc_npa_pool_range_update_check;\n \troc_plt_init;\n \troc_plt_init_cb_register;\n \n",
    "prefixes": [
        "v4",
        "13/52"
    ]
}