get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/127419/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 127419,
    "url": "http://patches.dpdk.org/api/patches/127419/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230525095904.3967080-30-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230525095904.3967080-30-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230525095904.3967080-30-ndabilpuram@marvell.com",
    "date": "2023-05-25T09:59:02",
    "name": "[v3,30/32] net/cnxk: handle extbuf completion on ethdev stop",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ccb479a41aa6e017f61368a8978f3b31619b8227",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230525095904.3967080-30-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 28180,
            "url": "http://patches.dpdk.org/api/series/28180/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=28180",
            "date": "2023-05-25T09:58:36",
            "name": "[v3,01/32] common/cnxk: allocate dynamic BPIDs",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/28180/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/127419/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/127419/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9CEDD42B9A;\n\tThu, 25 May 2023 12:11:40 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 4E2D942DBC;\n\tThu, 25 May 2023 12:10:21 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id E90AB42D38\n for <dev@dpdk.org>; Thu, 25 May 2023 12:10:15 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 34PA2W0Q020284 for <dev@dpdk.org>; Thu, 25 May 2023 03:10:15 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3qt5jng0pa-15\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 25 May 2023 03:10:15 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Thu, 25 May 2023 03:09:49 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Thu, 25 May 2023 03:09:49 -0700",
            "from hyd1588t430.caveonetworks.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id D6E115B6EA5;\n Thu, 25 May 2023 03:00:45 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=N4rURLMmA4O8P//Rjcyp7WWHUfXTYBxTp7z5KUCWcns=;\n b=dGd8n16xEfcM5AngDJzL5H9SUximOHtE8p9IfqCyOug+GIZOXMOwQngC9K0EcVlqn2il\n jEbVOF0NnUPKuLQewhmWkYdvFJESjIvwpJY3NBxD1GAbw+9Fjntvwb9IYxySdgIHnCh1\n srVvXle5gTjrmg0bmCUJE/uOp/y1+ZEWaaE19jzM2/lpjxBOlM9JeXmnlDHuNYaufvqh\n AzROMG2dxFZOsMpENoiomHO3d7z0xmdszbpFSYYU2Iv7fmW2RWtLZDXauwk+qhL6Ml1f\n 3TgQCRGiV2LhM4U03JO2XGyG4ZiQsu0ElHLuBVyj0AhRZ520JdMJ3QdzdOtT0KJOa9Wh qQ==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "Pavan Nikhilesh <pbhagavatula@marvell.com>, Shijith Thotton\n <sthotton@marvell.com>, Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>,\n Kiran Kumar K <kirankumark@marvell.com>, Sunil Kumar Kori\n <skori@marvell.com>, Satha Rao <skoteshwar@marvell.com>",
        "CC": "<jerinj@marvell.com>, <dev@dpdk.org>, Rakesh Kudurumalla\n <rkudurumalla@marvell.com>",
        "Subject": "[PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop",
        "Date": "Thu, 25 May 2023 15:29:02 +0530",
        "Message-ID": "<20230525095904.3967080-30-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230525095904.3967080-1-ndabilpuram@marvell.com>",
        "References": "<20230411091144.1087887-1-ndabilpuram@marvell.com>\n <20230525095904.3967080-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "Q6gQgSIFxDJb1hrhenAcaHPjB_lmryV_",
        "X-Proofpoint-GUID": "Q6gQgSIFxDJb1hrhenAcaHPjB_lmryV_",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.957,Hydra:6.0.573,FMLib:17.11.176.26\n definitions=2023-05-25_06,2023-05-24_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Rakesh Kudurumalla <rkudurumalla@marvell.com>\n\nDuring tranmissoin of packets, CQ corresponding to\nSQ is polled for transmit completion packets in\ntransmit function, when last burst is transmitted\ncorresponding transmit completion packets are left\nin CQ.This patch reads leftover packets in CQ on\nethdev stop.Moved transmit completion code to cn10k_rxtx.h\nand cn9k_ethdev.h to avoid code duplication\n\nSigned-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>\n---\n drivers/event/cnxk/cn10k_tx_worker.h |  2 +-\n drivers/event/cnxk/cn9k_worker.h     |  2 +-\n drivers/net/cnxk/cn10k_ethdev.c      | 13 +++++\n drivers/net/cnxk/cn10k_rxtx.h        | 76 +++++++++++++++++++++++++\n drivers/net/cnxk/cn10k_tx.h          | 83 +---------------------------\n drivers/net/cnxk/cn9k_ethdev.c       | 14 +++++\n drivers/net/cnxk/cn9k_ethdev.h       | 77 ++++++++++++++++++++++++++\n drivers/net/cnxk/cn9k_tx.h           | 83 +---------------------------\n 8 files changed, 188 insertions(+), 162 deletions(-)",
    "diff": "diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h\nindex c18786a14c..7f170ac5f0 100644\n--- a/drivers/event/cnxk/cn10k_tx_worker.h\n+++ b/drivers/event/cnxk/cn10k_tx_worker.h\n@@ -55,7 +55,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,\n \t\treturn 0;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, 1, 1);\n+\t\thandle_tx_completion_pkts(txq, 1);\n \n \tcn10k_nix_tx_skeleton(txq, cmd, flags, 0);\n \t/* Perform header writes before barrier\ndiff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h\nindex 1ce4b044e8..fcb82987e5 100644\n--- a/drivers/event/cnxk/cn9k_worker.h\n+++ b/drivers/event/cnxk/cn9k_worker.h\n@@ -784,7 +784,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,\n \ttxq = cn9k_sso_hws_xtract_meta(m, txq_data);\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, 1, 1);\n+\t\thandle_tx_completion_pkts(txq, 1);\n \n \tif (((txq->nb_sqb_bufs_adj -\n \t      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))\ndiff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c\nindex 2b4ab8b772..792c1b1970 100644\n--- a/drivers/net/cnxk/cn10k_ethdev.c\n+++ b/drivers/net/cnxk/cn10k_ethdev.c\n@@ -367,6 +367,10 @@ static int\n cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)\n {\n \tstruct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];\n+\tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tuint16_t flags = dev->tx_offload_flags;\n+\tstruct roc_nix *nix = &dev->nix;\n+\tuint32_t head = 0, tail = 0;\n \tint rc;\n \n \trc = cnxk_nix_tx_queue_stop(eth_dev, qidx);\n@@ -375,6 +379,15 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)\n \n \t/* Clear fc cache pkts to trigger worker stop */\n \ttxq->fc_cache_pkts = 0;\n+\n+\tif ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {\n+\t\tstruct roc_nix_sq *sq = &dev->sqs[qidx];\n+\t\tdo {\n+\t\t\thandle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);\n+\t\t\troc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);\n+\t\t} while (head != tail);\n+\t}\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h\nindex c256d54307..65dd57494a 100644\n--- a/drivers/net/cnxk/cn10k_rxtx.h\n+++ b/drivers/net/cnxk/cn10k_rxtx.h\n@@ -113,4 +113,80 @@ struct cn10k_sec_sess_priv {\n \t(void *)((uintptr_t)(lmt_addr) +                                       \\\n \t\t ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))\n \n+static inline uint16_t\n+nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,\n+\t\tconst uint32_t qmask)\n+{\n+\tuint16_t available = txq->tx_compl.available;\n+\n+\t/* Update the available count if cached value is not enough */\n+\tif (!unlikely(available)) {\n+\t\tuint64_t reg, head, tail;\n+\n+\t\t/* Use LDADDA version to avoid reorder */\n+\t\treg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);\n+\t\t/* CQ_OP_STATUS operation error */\n+\t\tif (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||\n+\t\t\t\treg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))\n+\t\t\treturn 0;\n+\n+\t\ttail = reg & 0xFFFFF;\n+\t\thead = (reg >> 20) & 0xFFFFF;\n+\t\tif (tail < head)\n+\t\t\tavailable = tail - head + qmask + 1;\n+\t\telse\n+\t\t\tavailable = tail - head;\n+\n+\t\ttxq->tx_compl.available = available;\n+\t}\n+\treturn available;\n+}\n+\n+static inline void\n+handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)\n+{\n+#define CNXK_NIX_CQ_ENTRY_SZ 128\n+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)\n+\n+\tuint16_t tx_pkts = 0, nb_pkts;\n+\tconst uintptr_t desc = txq->tx_compl.desc_base;\n+\tconst uint64_t wdata = txq->tx_compl.wdata;\n+\tconst uint32_t qmask = txq->tx_compl.qmask;\n+\tuint32_t head = txq->tx_compl.head;\n+\tstruct nix_cqe_hdr_s *tx_compl_cq;\n+\tstruct nix_send_comp_s *tx_compl_s0;\n+\tstruct rte_mbuf *m_next, *m;\n+\n+\tif (mt_safe)\n+\t\trte_spinlock_lock(&txq->tx_compl.ext_buf_lock);\n+\n+\tnb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);\n+\twhile (tx_pkts < nb_pkts) {\n+\t\trte_prefetch_non_temporal((void *)(desc +\n+\t\t\t\t\t(CQE_SZ((head + 2) & qmask))));\n+\t\ttx_compl_cq = (struct nix_cqe_hdr_s *)\n+\t\t\t(desc + CQE_SZ(head));\n+\t\ttx_compl_s0 = (struct nix_send_comp_s *)\n+\t\t\t((uint64_t *)tx_compl_cq + 1);\n+\t\tm = txq->tx_compl.ptr[tx_compl_s0->sqe_id];\n+\t\twhile (m->next != NULL) {\n+\t\t\tm_next = m->next;\n+\t\t\trte_pktmbuf_free_seg(m);\n+\t\t\tm = m_next;\n+\t\t}\n+\t\trte_pktmbuf_free_seg(m);\n+\n+\t\thead++;\n+\t\thead &= qmask;\n+\t\ttx_pkts++;\n+\t}\n+\ttxq->tx_compl.head = head;\n+\ttxq->tx_compl.available -= nb_pkts;\n+\n+\tplt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);\n+\n+\tif (mt_safe)\n+\t\trte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);\n+}\n+\n #endif /* __CN10K_RXTX_H__ */\ndiff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h\nindex c9ec01cd9d..4f23a8dfc3 100644\n--- a/drivers/net/cnxk/cn10k_tx.h\n+++ b/drivers/net/cnxk/cn10k_tx.h\n@@ -1151,83 +1151,6 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,\n \treturn segdw;\n }\n \n-static inline uint16_t\n-nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,\n-\t\tconst uint16_t pkts, const uint32_t qmask)\n-{\n-\tuint32_t available = txq->tx_compl.available;\n-\n-\t/* Update the available count if cached value is not enough */\n-\tif (unlikely(available < pkts)) {\n-\t\tuint64_t reg, head, tail;\n-\n-\t\t/* Use LDADDA version to avoid reorder */\n-\t\treg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);\n-\t\t/* CQ_OP_STATUS operation error */\n-\t\tif (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||\n-\t\t\t\treg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))\n-\t\t\treturn 0;\n-\n-\t\ttail = reg & 0xFFFFF;\n-\t\thead = (reg >> 20) & 0xFFFFF;\n-\t\tif (tail < head)\n-\t\t\tavailable = tail - head + qmask + 1;\n-\t\telse\n-\t\t\tavailable = tail - head;\n-\n-\t\ttxq->tx_compl.available = available;\n-\t}\n-\treturn RTE_MIN(pkts, available);\n-}\n-\n-static inline void\n-handle_tx_completion_pkts(struct cn10k_eth_txq *txq, const uint16_t pkts,\n-\t\t\t  uint8_t mt_safe)\n-{\n-#define CNXK_NIX_CQ_ENTRY_SZ 128\n-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)\n-\n-\tuint16_t tx_pkts = 0, nb_pkts;\n-\tconst uintptr_t desc = txq->tx_compl.desc_base;\n-\tconst uint64_t wdata = txq->tx_compl.wdata;\n-\tconst uint32_t qmask = txq->tx_compl.qmask;\n-\tuint32_t head = txq->tx_compl.head;\n-\tstruct nix_cqe_hdr_s *tx_compl_cq;\n-\tstruct nix_send_comp_s *tx_compl_s0;\n-\tstruct rte_mbuf *m_next, *m;\n-\n-\tif (mt_safe)\n-\t\trte_spinlock_lock(&txq->tx_compl.ext_buf_lock);\n-\n-\tnb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);\n-\twhile (tx_pkts < nb_pkts) {\n-\t\trte_prefetch_non_temporal((void *)(desc +\n-\t\t\t\t\t(CQE_SZ((head + 2) & qmask))));\n-\t\ttx_compl_cq = (struct nix_cqe_hdr_s *)\n-\t\t\t(desc + CQE_SZ(head));\n-\t\ttx_compl_s0 = (struct nix_send_comp_s *)\n-\t\t\t((uint64_t *)tx_compl_cq + 1);\n-\t\tm = txq->tx_compl.ptr[tx_compl_s0->sqe_id];\n-\t\twhile (m->next != NULL) {\n-\t\t\tm_next = m->next;\n-\t\t\trte_pktmbuf_free_seg(m);\n-\t\t\tm = m_next;\n-\t\t}\n-\t\trte_pktmbuf_free_seg(m);\n-\n-\t\thead++;\n-\t\thead &= qmask;\n-\t\ttx_pkts++;\n-\t}\n-\ttxq->tx_compl.head = head;\n-\ttxq->tx_compl.available -= nb_pkts;\n-\n-\tplt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);\n-\n-\tif (mt_safe)\n-\t\trte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);\n-}\n-\n static __rte_always_inline uint16_t\n cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,\n \t\t    uint16_t pkts, uint64_t *cmd, const uint16_t flags)\n@@ -1249,7 +1172,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,\n \tbool sec;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);\n+\t\thandle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);\n \n \tif (!(flags & NIX_TX_VWQE_F)) {\n \t\tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n@@ -1398,7 +1321,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,\n \tbool sec;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);\n+\t\thandle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);\n \n \tif (!(flags & NIX_TX_VWQE_F)) {\n \t\tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n@@ -1953,7 +1876,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,\n \t} wd;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);\n+\t\thandle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);\n \n \tif (!(flags & NIX_TX_VWQE_F)) {\n \t\tNIX_XMIT_FC_OR_RETURN(txq, pkts);\ndiff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c\nindex e55a2aa133..bae4dda5e2 100644\n--- a/drivers/net/cnxk/cn9k_ethdev.c\n+++ b/drivers/net/cnxk/cn9k_ethdev.c\n@@ -329,14 +329,28 @@ static int\n cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)\n {\n \tstruct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];\n+\tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\tuint16_t flags = dev->tx_offload_flags;\n+\tstruct roc_nix *nix = &dev->nix;\n+\tuint32_t head = 0, tail = 0;\n \tint rc;\n \n+\n \trc = cnxk_nix_tx_queue_stop(eth_dev, qidx);\n \tif (rc)\n \t\treturn rc;\n \n \t/* Clear fc cache pkts to trigger worker stop */\n \ttxq->fc_cache_pkts = 0;\n+\n+\tif ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {\n+\t\tstruct roc_nix_sq *sq = &dev->sqs[qidx];\n+\t\tdo {\n+\t\t\thandle_tx_completion_pkts(txq, 0);\n+\t\t\troc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);\n+\t\t} while (head != tail);\n+\t}\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h\nindex a82dcb3d19..9e0a3c5bb2 100644\n--- a/drivers/net/cnxk/cn9k_ethdev.h\n+++ b/drivers/net/cnxk/cn9k_ethdev.h\n@@ -107,4 +107,81 @@ void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);\n /* Security context setup */\n void cn9k_eth_sec_ops_override(void);\n \n+static inline uint16_t\n+nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,\n+\t\tconst uint32_t qmask)\n+{\n+\tuint16_t available = txq->tx_compl.available;\n+\n+\t/* Update the available count if cached value is not enough */\n+\tif (!unlikely(available)) {\n+\t\tuint64_t reg, head, tail;\n+\n+\t\t/* Use LDADDA version to avoid reorder */\n+\t\treg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);\n+\t\t/* CQ_OP_STATUS operation error */\n+\t\tif (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||\n+\t\t\t\treg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))\n+\t\t\treturn 0;\n+\n+\t\ttail = reg & 0xFFFFF;\n+\t\thead = (reg >> 20) & 0xFFFFF;\n+\t\tif (tail < head)\n+\t\t\tavailable = tail - head + qmask + 1;\n+\t\telse\n+\t\t\tavailable = tail - head;\n+\n+\t\ttxq->tx_compl.available = available;\n+\t}\n+\treturn available;\n+}\n+\n+static inline void\n+handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)\n+{\n+#define CNXK_NIX_CQ_ENTRY_SZ 128\n+#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)\n+\n+\tuint16_t tx_pkts = 0, nb_pkts;\n+\tconst uintptr_t desc = txq->tx_compl.desc_base;\n+\tconst uint64_t wdata = txq->tx_compl.wdata;\n+\tconst uint32_t qmask = txq->tx_compl.qmask;\n+\tuint32_t head = txq->tx_compl.head;\n+\tstruct nix_cqe_hdr_s *tx_compl_cq;\n+\tstruct nix_send_comp_s *tx_compl_s0;\n+\tstruct rte_mbuf *m_next, *m;\n+\n+\tif (mt_safe)\n+\t\trte_spinlock_lock(&txq->tx_compl.ext_buf_lock);\n+\n+\tnb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);\n+\twhile (tx_pkts < nb_pkts) {\n+\t\trte_prefetch_non_temporal((void *)(desc +\n+\t\t\t\t\t(CQE_SZ((head + 2) & qmask))));\n+\t\ttx_compl_cq = (struct nix_cqe_hdr_s *)\n+\t\t\t(desc + CQE_SZ(head));\n+\t\ttx_compl_s0 = (struct nix_send_comp_s *)\n+\t\t\t((uint64_t *)tx_compl_cq + 1);\n+\t\tm = txq->tx_compl.ptr[tx_compl_s0->sqe_id];\n+\t\twhile (m->next != NULL) {\n+\t\t\tm_next = m->next;\n+\t\t\trte_pktmbuf_free_seg(m);\n+\t\t\tm = m_next;\n+\t\t}\n+\t\trte_pktmbuf_free_seg(m);\n+\n+\t\thead++;\n+\t\thead &= qmask;\n+\t\ttx_pkts++;\n+\t}\n+\ttxq->tx_compl.head = head;\n+\ttxq->tx_compl.available -= nb_pkts;\n+\n+\tplt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);\n+\n+\tif (mt_safe)\n+\t\trte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);\n+}\n+\n+\n #endif /* __CN9K_ETHDEV_H__ */\ndiff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h\nindex e956c1ad2a..8f1e05a461 100644\n--- a/drivers/net/cnxk/cn9k_tx.h\n+++ b/drivers/net/cnxk/cn9k_tx.h\n@@ -559,83 +559,6 @@ cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,\n \t} while (lmt_status == 0);\n }\n \n-static inline uint16_t\n-nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,\n-\t\tconst uint16_t pkts, const uint32_t qmask)\n-{\n-\tuint32_t available = txq->tx_compl.available;\n-\n-\t/* Update the available count if cached value is not enough */\n-\tif (unlikely(available < pkts)) {\n-\t\tuint64_t reg, head, tail;\n-\n-\t\t/* Use LDADDA version to avoid reorder */\n-\t\treg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);\n-\t\t/* CQ_OP_STATUS operation error */\n-\t\tif (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||\n-\t\t\t\treg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))\n-\t\t\treturn 0;\n-\n-\t\ttail = reg & 0xFFFFF;\n-\t\thead = (reg >> 20) & 0xFFFFF;\n-\t\tif (tail < head)\n-\t\t\tavailable = tail - head + qmask + 1;\n-\t\telse\n-\t\t\tavailable = tail - head;\n-\n-\t\ttxq->tx_compl.available = available;\n-\t}\n-\treturn RTE_MIN(pkts, available);\n-}\n-\n-static inline void\n-handle_tx_completion_pkts(struct cn9k_eth_txq *txq, const uint16_t pkts,\n-\t\t\t  uint8_t mt_safe)\n-{\n-#define CNXK_NIX_CQ_ENTRY_SZ 128\n-#define CQE_SZ(x)            ((x) * CNXK_NIX_CQ_ENTRY_SZ)\n-\n-\tuint16_t tx_pkts = 0, nb_pkts;\n-\tconst uintptr_t desc = txq->tx_compl.desc_base;\n-\tconst uint64_t wdata = txq->tx_compl.wdata;\n-\tconst uint32_t qmask = txq->tx_compl.qmask;\n-\tuint32_t head = txq->tx_compl.head;\n-\tstruct nix_cqe_hdr_s *tx_compl_cq;\n-\tstruct nix_send_comp_s *tx_compl_s0;\n-\tstruct rte_mbuf *m_next, *m;\n-\n-\tif (mt_safe)\n-\t\trte_spinlock_lock(&txq->tx_compl.ext_buf_lock);\n-\n-\tnb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);\n-\twhile (tx_pkts < nb_pkts) {\n-\t\trte_prefetch_non_temporal((void *)(desc +\n-\t\t\t\t\t(CQE_SZ((head + 2) & qmask))));\n-\t\ttx_compl_cq = (struct nix_cqe_hdr_s *)\n-\t\t\t(desc + CQE_SZ(head));\n-\t\ttx_compl_s0 = (struct nix_send_comp_s *)\n-\t\t\t((uint64_t *)tx_compl_cq + 1);\n-\t\tm = txq->tx_compl.ptr[tx_compl_s0->sqe_id];\n-\t\twhile (m->next != NULL) {\n-\t\t\tm_next = m->next;\n-\t\t\trte_pktmbuf_free_seg(m);\n-\t\t\tm = m_next;\n-\t\t}\n-\t\trte_pktmbuf_free_seg(m);\n-\n-\t\thead++;\n-\t\thead &= qmask;\n-\t\ttx_pkts++;\n-\t}\n-\ttxq->tx_compl.head = head;\n-\ttxq->tx_compl.available -= nb_pkts;\n-\n-\tplt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);\n-\n-\tif (mt_safe)\n-\t\trte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);\n-}\n-\n static __rte_always_inline uint16_t\n cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,\n \t\t   uint64_t *cmd, const uint16_t flags)\n@@ -648,7 +571,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,\n \tuint16_t i;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, 0);\n+\t\thandle_tx_completion_pkts(txq, 0);\n \n \tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n \n@@ -700,7 +623,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint64_t i;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, 0);\n+\t\thandle_tx_completion_pkts(txq, 0);\n \n \tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n \n@@ -1049,7 +972,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint16_t pkts_left;\n \n \tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)\n-\t\thandle_tx_completion_pkts(txq, pkts, 0);\n+\t\thandle_tx_completion_pkts(txq, 0);\n \n \tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n \n",
    "prefixes": [
        "v3",
        "30/32"
    ]
}