get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/130868/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 130868,
    "url": "http://patches.dpdk.org/api/patches/130868/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230830075655.8004-1-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230830075655.8004-1-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230830075655.8004-1-pbhagavatula@marvell.com",
    "date": "2023-08-30T07:56:54",
    "name": "[1/2] dma/cnxk: use mempool for DMA chunk pool",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "203944bfe6eb40f000629d503698e4f914e48272",
    "submitter": {
        "id": 1183,
        "url": "http://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230830075655.8004-1-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 29366,
            "url": "http://patches.dpdk.org/api/series/29366/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29366",
            "date": "2023-08-30T07:56:54",
            "name": "[1/2] dma/cnxk: use mempool for DMA chunk pool",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/29366/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/130868/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/130868/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 111F741F63;\n\tWed, 30 Aug 2023 09:57:05 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9B2CC40279;\n\tWed, 30 Aug 2023 09:57:04 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 59F5640277\n for <dev@dpdk.org>; Wed, 30 Aug 2023 09:57:03 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 37U5u8Sb027898 for <dev@dpdk.org>; Wed, 30 Aug 2023 00:57:02 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3sqgwkmwf5-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 30 Aug 2023 00:57:02 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Wed, 30 Aug 2023 00:57:00 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Wed, 30 Aug 2023 00:57:00 -0700",
            "from MININT-80QBFE8.corp.innovium.com (MININT-80QBFE8.marvell.com\n [10.28.164.106])\n by maili.marvell.com (Postfix) with ESMTP id 802EA3F7065;\n Wed, 30 Aug 2023 00:56:57 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : mime-version : content-transfer-encoding :\n content-type; s=pfpt0220; bh=UFBBsQ9181xxmU5V5uRHagAnbIKneqA3/lvs97z+T3g=;\n b=D4XFdg4GZm0vt2XsM9eKRNXoAm3kMYQPYW4U8/93aKY1YUqLPnaJp2IX6sGDxr7r/4Fe\n wdK9LypeV4CSXuQzTVl4Bm7e+VN1XbdREpTFtNDXB0Ol2RhWhWfLtl9WndI+kvFdVVch\n rLiaOTbPRBjU7jGH/uaZHqYSVLrZMoUB6bqztIAU7k0w/H1LXMi5fuytkEkKLGmSLPv+\n z33PmfnqosO9acDYaufeNKcNL7IAOOW1U7HjRsv6VvZU2tjglvyZVVLvwCeFGDyke+5R\n MzE0b/aYZPnY7+OsuFqU+wfTGKT/v0rJg1OcUaXFppDmwD/BFG+HfAEPGDm3gSidplZx 0Q==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>, \"Kiran\n Kumar K\" <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>,\n Satha Rao <skoteshwar@marvell.com>, Vamsi Attunuru <vattunuru@marvell.com>",
        "CC": "<dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>",
        "Subject": "[PATCH 1/2] dma/cnxk: use mempool for DMA chunk pool",
        "Date": "Wed, 30 Aug 2023 13:26:54 +0530",
        "Message-ID": "<20230830075655.8004-1-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "YLPJGKtx5_QvVqhEhskYiquLFh2XFf4F",
        "X-Proofpoint-ORIG-GUID": "YLPJGKtx5_QvVqhEhskYiquLFh2XFf4F",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.957,Hydra:6.0.601,FMLib:17.11.176.26\n definitions=2023-08-29_16,2023-08-29_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nUse rte_mempool for DMA chunk pool to allow using mempool cache.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n drivers/common/cnxk/roc_dpi.c      |  95 +++++--------------------\n drivers/common/cnxk/roc_dpi.h      |  28 +-------\n drivers/common/cnxk/roc_dpi_priv.h |   3 -\n drivers/common/cnxk/roc_platform.c |   1 +\n drivers/common/cnxk/roc_platform.h |   2 +\n drivers/common/cnxk/version.map    |   1 +\n drivers/dma/cnxk/cnxk_dmadev.c     | 108 +++++++++++++++++++++--------\n drivers/dma/cnxk/cnxk_dmadev.h     |  10 ++-\n 8 files changed, 110 insertions(+), 138 deletions(-)",
    "diff": "diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c\nindex 0e2f803077..9cb479371a 100644\n--- a/drivers/common/cnxk/roc_dpi.c\n+++ b/drivers/common/cnxk/roc_dpi.c\n@@ -1,14 +1,14 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  * Copyright(C) 2021 Marvell.\n  */\n+\n+#include \"roc_api.h\"\n+#include \"roc_priv.h\"\n #include <fcntl.h>\n #include <sys/stat.h>\n #include <sys/types.h>\n #include <unistd.h>\n \n-#include \"roc_api.h\"\n-#include \"roc_priv.h\"\n-\n #define DPI_PF_MBOX_SYSFS_ENTRY \"dpi_device_config\"\n \n static inline int\n@@ -52,17 +52,12 @@ roc_dpi_disable(struct roc_dpi *dpi)\n }\n \n int\n-roc_dpi_configure(struct roc_dpi *roc_dpi)\n+roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uint64_t chunk_base)\n {\n \tstruct plt_pci_device *pci_dev;\n-\tconst struct plt_memzone *dpi_mz;\n \tdpi_mbox_msg_t mbox_msg;\n-\tstruct npa_pool_s pool;\n-\tstruct npa_aura_s aura;\n-\tint rc, count, buflen;\n-\tuint64_t aura_handle;\n-\tplt_iova_t iova;\n-\tchar name[32];\n+\tuint64_t reg;\n+\tint rc;\n \n \tif (!roc_dpi) {\n \t\tplt_err(\"roc_dpi is NULL\");\n@@ -70,80 +65,31 @@ roc_dpi_configure(struct roc_dpi *roc_dpi)\n \t}\n \n \tpci_dev = roc_dpi->pci_dev;\n-\tmemset(&pool, 0, sizeof(struct npa_pool_s));\n-\tpool.nat_align = 1;\n-\n-\tmemset(&aura, 0, sizeof(aura));\n-\trc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,\n-\t\t\t\t DPI_CMD_QUEUE_BUFS, &aura, &pool, 0);\n-\tif (rc) {\n-\t\tplt_err(\"Failed to create NPA pool, err %d\\n\", rc);\n-\t\treturn rc;\n-\t}\n-\n-\tsnprintf(name, sizeof(name), \"dpimem%d:%d:%d:%d\", pci_dev->addr.domain, pci_dev->addr.bus,\n-\t\t pci_dev->addr.devid, pci_dev->addr.function);\n-\tbuflen = DPI_CMD_QUEUE_SIZE * DPI_CMD_QUEUE_BUFS;\n-\tdpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, DPI_CMD_QUEUE_SIZE);\n-\tif (dpi_mz == NULL) {\n-\t\tplt_err(\"dpi memzone reserve failed\");\n-\t\trc = -ENOMEM;\n-\t\tgoto err1;\n-\t}\n-\n-\troc_dpi->mz = dpi_mz;\n-\tiova = dpi_mz->iova;\n-\tfor (count = 0; count < DPI_CMD_QUEUE_BUFS; count++) {\n-\t\troc_npa_aura_op_free(aura_handle, 0, iova);\n-\t\tiova += DPI_CMD_QUEUE_SIZE;\n-\t}\n-\n-\troc_dpi->chunk_base = (void *)roc_npa_aura_op_alloc(aura_handle, 0);\n-\tif (!roc_dpi->chunk_base) {\n-\t\tplt_err(\"Failed to alloc buffer from NPA aura\");\n-\t\trc = -ENOMEM;\n-\t\tgoto err2;\n-\t}\n \n-\troc_dpi->chunk_next = (void *)roc_npa_aura_op_alloc(aura_handle, 0);\n-\tif (!roc_dpi->chunk_next) {\n-\t\tplt_err(\"Failed to alloc buffer from NPA aura\");\n-\t\trc = -ENOMEM;\n-\t\tgoto err2;\n-\t}\n-\n-\troc_dpi->aura_handle = aura_handle;\n-\t/* subtract 2 as they have already been alloc'ed above */\n-\troc_dpi->pool_size_m1 = (DPI_CMD_QUEUE_SIZE >> 3) - 2;\n+\troc_dpi_disable(roc_dpi);\n+\treg = plt_read64(roc_dpi->rbase + DPI_VDMA_SADDR);\n+\twhile (!(reg & BIT_ULL(63)))\n+\t\treg = plt_read64(roc_dpi->rbase + DPI_VDMA_SADDR);\n \n \tplt_write64(0x0, roc_dpi->rbase + DPI_VDMA_REQQ_CTL);\n-\tplt_write64(((uint64_t)(roc_dpi->chunk_base) >> 7) << 7,\n-\t\t    roc_dpi->rbase + DPI_VDMA_SADDR);\n+\tplt_write64(chunk_base, roc_dpi->rbase + DPI_VDMA_SADDR);\n \tmbox_msg.u[0] = 0;\n \tmbox_msg.u[1] = 0;\n \t/* DPI PF driver expects vfid starts from index 0 */\n \tmbox_msg.s.vfid = roc_dpi->vfid;\n \tmbox_msg.s.cmd = DPI_QUEUE_OPEN;\n-\tmbox_msg.s.csize = DPI_CMD_QUEUE_SIZE;\n-\tmbox_msg.s.aura = roc_npa_aura_handle_to_aura(aura_handle);\n+\tmbox_msg.s.csize = chunk_sz;\n+\tmbox_msg.s.aura = aura;\n \tmbox_msg.s.sso_pf_func = idev_sso_pffunc_get();\n \tmbox_msg.s.npa_pf_func = idev_npa_pffunc_get();\n \n \trc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,\n \t\t\t    sizeof(dpi_mbox_msg_t));\n-\tif (rc < 0) {\n+\tif (rc < 0)\n \t\tplt_err(\"Failed to send mbox message %d to DPI PF, err %d\",\n \t\t\tmbox_msg.s.cmd, rc);\n-\t\tgoto err2;\n-\t}\n \n \treturn rc;\n-\n-err2:\n-\tplt_memzone_free(dpi_mz);\n-err1:\n-\troc_npa_pool_destroy(aura_handle);\n-\treturn rc;\n }\n \n int\n@@ -153,11 +99,9 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)\n \tuint16_t vfid;\n \n \troc_dpi->rbase = pci_dev->mem_resource[0].addr;\n-\tvfid = ((pci_dev->addr.devid & 0x1F) << 3) |\n-\t       (pci_dev->addr.function & 0x7);\n+\tvfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);\n \tvfid -= 1;\n \troc_dpi->vfid = vfid;\n-\tplt_spinlock_init(&roc_dpi->chunk_lock);\n \n \treturn 0;\n }\n@@ -180,14 +124,9 @@ roc_dpi_dev_fini(struct roc_dpi *roc_dpi)\n \tmbox_msg.s.vfid = roc_dpi->vfid;\n \tmbox_msg.s.cmd = DPI_QUEUE_CLOSE;\n \n-\trc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,\n-\t\t\t    sizeof(dpi_mbox_msg_t));\n+\trc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg, sizeof(dpi_mbox_msg_t));\n \tif (rc < 0)\n-\t\tplt_err(\"Failed to send mbox message %d to DPI PF, err %d\",\n-\t\t\tmbox_msg.s.cmd, rc);\n-\n-\troc_npa_pool_destroy(roc_dpi->aura_handle);\n-\tplt_memzone_free(roc_dpi->mz);\n+\t\tplt_err(\"Failed to send mbox message %d to DPI PF, err %d\", mbox_msg.s.cmd, rc);\n \n \treturn rc;\n }\ndiff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h\nindex 2f061b07c5..4ebde5b8a6 100644\n--- a/drivers/common/cnxk/roc_dpi.h\n+++ b/drivers/common/cnxk/roc_dpi.h\n@@ -5,41 +5,17 @@\n #ifndef _ROC_DPI_H_\n #define _ROC_DPI_H_\n \n-struct roc_dpi_args {\n-\tuint8_t num_ssegs;\n-\tuint8_t num_dsegs;\n-\tuint8_t comp_type;\n-\tuint8_t direction;\n-\tuint8_t sdevice;\n-\tuint8_t ddevice;\n-\tuint8_t swap;\n-\tuint8_t use_lock : 1;\n-\tuint8_t tt : 7;\n-\tuint16_t func;\n-\tuint16_t grp;\n-\tuint32_t tag;\n-\tuint64_t comp_ptr;\n-};\n-\n struct roc_dpi {\n-\t/* Input parameters */\n \tstruct plt_pci_device *pci_dev;\n-\t/* End of Input parameters */\n-\tconst struct plt_memzone *mz;\n \tuint8_t *rbase;\n \tuint16_t vfid;\n-\tuint16_t pool_size_m1;\n-\tuint16_t chunk_head;\n-\tuint64_t *chunk_base;\n-\tuint64_t *chunk_next;\n-\tuint64_t aura_handle;\n-\tplt_spinlock_t chunk_lock;\n } __plt_cache_aligned;\n \n int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);\n int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);\n \n-int __roc_api roc_dpi_configure(struct roc_dpi *dpi);\n+int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,\n+\t\t\t\tuint64_t chunk_base);\n int __roc_api roc_dpi_enable(struct roc_dpi *dpi);\n int __roc_api roc_dpi_disable(struct roc_dpi *dpi);\n \ndiff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h\nindex 1fa1a715d3..518a3e7351 100644\n--- a/drivers/common/cnxk/roc_dpi_priv.h\n+++ b/drivers/common/cnxk/roc_dpi_priv.h\n@@ -16,9 +16,6 @@\n #define DPI_REG_DUMP\t0x3\n #define DPI_GET_REG_CFG 0x4\n \n-#define DPI_CMD_QUEUE_SIZE 4096\n-#define DPI_CMD_QUEUE_BUFS 1024\n-\n typedef union dpi_mbox_msg_t {\n \tuint64_t u[2];\n \tstruct dpi_mbox_message_s {\ndiff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c\nindex f91b95ceab..f8287bcf6b 100644\n--- a/drivers/common/cnxk/roc_platform.c\n+++ b/drivers/common/cnxk/roc_platform.c\n@@ -70,4 +70,5 @@ RTE_LOG_REGISTER(cnxk_logtype_npc, pmd.net.cnxk.flow, NOTICE);\n RTE_LOG_REGISTER(cnxk_logtype_sso, pmd.event.cnxk, NOTICE);\n RTE_LOG_REGISTER(cnxk_logtype_tim, pmd.event.cnxk.timer, NOTICE);\n RTE_LOG_REGISTER(cnxk_logtype_tm, pmd.net.cnxk.tm, NOTICE);\n+RTE_LOG_REGISTER(cnxk_logtype_dpi, pmd.dma.cnxk.dpi, NOTICE);\n RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE);\ndiff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h\nindex 08f83aba12..dfd4da21b6 100644\n--- a/drivers/common/cnxk/roc_platform.h\n+++ b/drivers/common/cnxk/roc_platform.h\n@@ -242,6 +242,7 @@ extern int cnxk_logtype_sso;\n extern int cnxk_logtype_tim;\n extern int cnxk_logtype_tm;\n extern int cnxk_logtype_ree;\n+extern int cnxk_logtype_dpi;\n \n #define plt_err(fmt, args...)                                                  \\\n \tRTE_LOG(ERR, PMD, \"%s():%u \" fmt \"\\n\", __func__, __LINE__, ##args)\n@@ -270,6 +271,7 @@ extern int cnxk_logtype_ree;\n #define plt_tim_dbg(fmt, ...)\tplt_dbg(tim, fmt, ##__VA_ARGS__)\n #define plt_tm_dbg(fmt, ...)\tplt_dbg(tm, fmt, ##__VA_ARGS__)\n #define plt_ree_dbg(fmt, ...)\tplt_dbg(ree, fmt, ##__VA_ARGS__)\n+#define plt_dpi_dbg(fmt, ...)\tplt_dbg(dpi, fmt, ##__VA_ARGS__)\n \n /* Datapath logs */\n #define plt_dp_err(fmt, args...)                                               \\\ndiff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map\nindex 8c71497df8..1540dfadf9 100644\n--- a/drivers/common/cnxk/version.map\n+++ b/drivers/common/cnxk/version.map\n@@ -7,6 +7,7 @@ INTERNAL {\n \tcnxk_ipsec_outb_roundup_byte;\n \tcnxk_logtype_base;\n \tcnxk_logtype_cpt;\n+\tcnxk_logtype_dpi;\n \tcnxk_logtype_mbox;\n \tcnxk_logtype_ml;\n \tcnxk_logtype_nix;\ndiff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c\nindex eec6a897e2..35c2b79156 100644\n--- a/drivers/dma/cnxk/cnxk_dmadev.c\n+++ b/drivers/dma/cnxk/cnxk_dmadev.c\n@@ -11,6 +11,7 @@\n #include <rte_dmadev_pmd.h>\n #include <rte_eal.h>\n #include <rte_lcore.h>\n+#include <rte_mbuf_pool_ops.h>\n #include <rte_mempool.h>\n #include <rte_pci.h>\n \n@@ -70,10 +71,54 @@ cnxk_dmadev_vchan_free(struct cnxk_dpi_vf_s *dpivf, uint16_t vchan)\n \treturn 0;\n }\n \n+static int\n+cnxk_dmadev_chunk_pool_create(struct rte_dma_dev *dev)\n+{\n+\tchar pool_name[RTE_MEMPOOL_NAMESIZE];\n+\tstruct cnxk_dpi_vf_s *dpivf = NULL;\n+\tuint64_t nb_chunks;\n+\tint rc;\n+\n+\tdpivf = dev->fp_obj->dev_private;\n+\t/* Create chunk pool. */\n+\tsnprintf(pool_name, sizeof(pool_name), \"cnxk_dma_chunk_pool%d\", dev->data->dev_id);\n+\n+\tnb_chunks = DPI_CMD_QUEUE_BUFS;\n+\tnb_chunks += (CNXK_DMA_POOL_MAX_CACHE_SZ * rte_lcore_count());\n+\tdpivf->chunk_pool =\n+\t\trte_mempool_create_empty(pool_name, nb_chunks, DPI_CMD_QUEUE_BUF_SIZE,\n+\t\t\t\t\t CNXK_DMA_POOL_MAX_CACHE_SZ, 0, rte_socket_id(), 0);\n+\n+\tif (dpivf->chunk_pool == NULL) {\n+\t\tplt_err(\"Unable to create chunkpool.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trc = rte_mempool_set_ops_byname(dpivf->chunk_pool, rte_mbuf_platform_mempool_ops(), NULL);\n+\tif (rc < 0) {\n+\t\tplt_err(\"Unable to set chunkpool ops\");\n+\t\tgoto free;\n+\t}\n+\n+\trc = rte_mempool_populate_default(dpivf->chunk_pool);\n+\tif (rc < 0) {\n+\t\tplt_err(\"Unable to set populate chunkpool.\");\n+\t\tgoto free;\n+\t}\n+\tdpivf->aura = roc_npa_aura_handle_to_aura(dpivf->chunk_pool->pool_id);\n+\n+\treturn 0;\n+\n+free:\n+\trte_mempool_free(dpivf->chunk_pool);\n+\treturn rc;\n+}\n+\n static int\n cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, uint32_t conf_sz)\n {\n \tstruct cnxk_dpi_vf_s *dpivf = NULL;\n+\tvoid *chunk;\n \tint rc = 0;\n \n \tRTE_SET_USED(conf_sz);\n@@ -92,12 +137,29 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,\n \tif (dpivf->flag & CNXK_DPI_DEV_CONFIG)\n \t\treturn rc;\n \n-\trc = roc_dpi_configure(&dpivf->rdpi);\n+\trc = cnxk_dmadev_chunk_pool_create(dev);\n+\tif (rc < 0) {\n+\t\tplt_err(\"DMA pool configure failed err = %d\", rc);\n+\t\tgoto done;\n+\t}\n+\n+\trc = rte_mempool_get(dpivf->chunk_pool, &chunk);\n+\tif (rc < 0) {\n+\t\tplt_err(\"DMA failed to get chunk pointer err = %d\", rc);\n+\t\trte_mempool_free(dpivf->chunk_pool);\n+\t\tgoto done;\n+\t}\n+\n+\trc = roc_dpi_configure(&dpivf->rdpi, DPI_CMD_QUEUE_BUF_SIZE, dpivf->aura, (uint64_t)chunk);\n \tif (rc < 0) {\n \t\tplt_err(\"DMA configure failed err = %d\", rc);\n+\t\trte_mempool_free(dpivf->chunk_pool);\n \t\tgoto done;\n \t}\n \n+\tdpivf->chunk_base = chunk;\n+\tdpivf->chunk_head = 0;\n+\tdpivf->chunk_size_m1 = (DPI_CMD_QUEUE_BUF_SIZE >> 3) - 2;\n \tdpivf->flag |= CNXK_DPI_DEV_CONFIG;\n \n done:\n@@ -335,7 +397,7 @@ cnxk_dmadev_close(struct rte_dma_dev *dev)\n }\n \n static inline int\n-__dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count)\n+__dpi_queue_write(struct cnxk_dpi_vf_s *dpi, uint64_t *cmds, int cmd_count)\n {\n \tuint64_t *ptr = dpi->chunk_base;\n \n@@ -346,31 +408,25 @@ __dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count)\n \t * Normally there is plenty of room in the current buffer for the\n \t * command\n \t */\n-\tif (dpi->chunk_head + cmd_count < dpi->pool_size_m1) {\n+\tif (dpi->chunk_head + cmd_count < dpi->chunk_size_m1) {\n \t\tptr += dpi->chunk_head;\n \t\tdpi->chunk_head += cmd_count;\n \t\twhile (cmd_count--)\n \t\t\t*ptr++ = *cmds++;\n \t} else {\n+\t\tuint64_t *new_buff = NULL;\n \t\tint count;\n-\t\tuint64_t *new_buff = dpi->chunk_next;\n-\n-\t\tdpi->chunk_next = (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0);\n-\t\tif (!dpi->chunk_next) {\n-\t\t\tplt_dp_dbg(\"Failed to alloc next buffer from NPA\");\n \n-\t\t\t/* NPA failed to allocate a buffer. Restoring chunk_next\n-\t\t\t * to its original address.\n-\t\t\t */\n-\t\t\tdpi->chunk_next = new_buff;\n-\t\t\treturn -ENOSPC;\n+\t\tif (rte_mempool_get(dpi->chunk_pool, (void **)&new_buff) < 0) {\n+\t\t\tplt_dpi_dbg(\"Failed to alloc next buffer from NPA\");\n+\t\t\treturn -ENOMEM;\n \t\t}\n \n \t\t/*\n \t\t * Figure out how many cmd words will fit in this buffer.\n \t\t * One location will be needed for the next buffer pointer.\n \t\t */\n-\t\tcount = dpi->pool_size_m1 - dpi->chunk_head;\n+\t\tcount = dpi->chunk_size_m1 - dpi->chunk_head;\n \t\tptr += dpi->chunk_head;\n \t\tcmd_count -= count;\n \t\twhile (count--)\n@@ -395,19 +451,11 @@ __dpi_queue_write(struct roc_dpi *dpi, uint64_t *cmds, int cmd_count)\n \t\t\t*ptr++ = *cmds++;\n \n \t\t/* queue index may be greater than pool size */\n-\t\tif (dpi->chunk_head >= dpi->pool_size_m1) {\n-\t\t\tnew_buff = dpi->chunk_next;\n-\t\t\tdpi->chunk_next = (void *)roc_npa_aura_op_alloc(dpi->aura_handle, 0);\n-\t\t\tif (!dpi->chunk_next) {\n-\t\t\t\tplt_dp_dbg(\"Failed to alloc next buffer from NPA\");\n-\n-\t\t\t\t/* NPA failed to allocate a buffer. Restoring chunk_next\n-\t\t\t\t * to its original address.\n-\t\t\t\t */\n-\t\t\t\tdpi->chunk_next = new_buff;\n-\t\t\t\treturn -ENOSPC;\n+\t\tif (dpi->chunk_head == dpi->chunk_size_m1) {\n+\t\t\tif (rte_mempool_get(dpi->chunk_pool, (void **)&new_buff) < 0) {\n+\t\t\t\tplt_dpi_dbg(\"Failed to alloc next buffer from NPA\");\n+\t\t\t\treturn -ENOMEM;\n \t\t\t}\n-\n \t\t\t/* Write next buffer address */\n \t\t\t*ptr = (uint64_t)new_buff;\n \t\t\tdpi->chunk_base = new_buff;\n@@ -465,7 +513,7 @@ cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t d\n \tcmd[num_words++] = length;\n \tcmd[num_words++] = lptr;\n \n-\trc = __dpi_queue_write(&dpivf->rdpi, cmd, num_words);\n+\trc = __dpi_queue_write(dpivf, cmd, num_words);\n \tif (unlikely(rc)) {\n \t\tSTRM_DEC(dpi_conf->c_desc, tail);\n \t\treturn rc;\n@@ -537,7 +585,7 @@ cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge\n \t\tlptr++;\n \t}\n \n-\trc = __dpi_queue_write(&dpivf->rdpi, cmd, num_words);\n+\trc = __dpi_queue_write(dpivf, cmd, num_words);\n \tif (unlikely(rc)) {\n \t\tSTRM_DEC(dpi_conf->c_desc, tail);\n \t\treturn rc;\n@@ -593,7 +641,7 @@ cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t\n \tcmd[num_words++] = length;\n \tcmd[num_words++] = lptr;\n \n-\trc = __dpi_queue_write(&dpivf->rdpi, cmd, num_words);\n+\trc = __dpi_queue_write(dpivf, cmd, num_words);\n \tif (unlikely(rc)) {\n \t\tSTRM_DEC(dpi_conf->c_desc, tail);\n \t\treturn rc;\n@@ -656,7 +704,7 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge\n \t\tlptr++;\n \t}\n \n-\trc = __dpi_queue_write(&dpivf->rdpi, cmd, num_words);\n+\trc = __dpi_queue_write(dpivf, cmd, num_words);\n \tif (unlikely(rc)) {\n \t\tSTRM_DEC(dpi_conf->c_desc, tail);\n \t\treturn rc;\ndiff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h\nindex 254e7fea20..65f12d844d 100644\n--- a/drivers/dma/cnxk/cnxk_dmadev.h\n+++ b/drivers/dma/cnxk/cnxk_dmadev.h\n@@ -12,12 +12,15 @@\n #define DPI_MAX_DESC\t     2048\n #define DPI_MIN_DESC\t     2\n #define MAX_VCHANS_PER_QUEUE 4\n+#define DPI_CMD_QUEUE_BUF_SIZE 4096\n+#define DPI_CMD_QUEUE_BUFS     1024\n \n /* Set Completion data to 0xFF when request submitted,\n  * upon successful request completion engine reset to completion status\n  */\n #define DPI_REQ_CDATA 0xFF\n \n+#define CNXK_DMA_POOL_MAX_CACHE_SZ (16)\n #define CNXK_DPI_DEV_CONFIG (1ULL << 0)\n #define CNXK_DPI_DEV_START  (1ULL << 1)\n \n@@ -45,8 +48,13 @@ struct cnxk_dpi_conf {\n };\n \n struct cnxk_dpi_vf_s {\n-\tstruct roc_dpi rdpi;\n+\tuint64_t *chunk_base;\n+\tuint16_t chunk_head;\n+\tuint16_t chunk_size_m1;\n+\tstruct rte_mempool *chunk_pool;\n \tstruct cnxk_dpi_conf conf[MAX_VCHANS_PER_QUEUE];\n+\tstruct roc_dpi rdpi;\n+\tuint32_t aura;\n \tuint16_t num_vchans;\n \tuint16_t flag;\n } __plt_cache_aligned;\n",
    "prefixes": [
        "1/2"
    ]
}