get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94718/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94718,
    "url": "https://patches.dpdk.org/api/patches/94718/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210623044702.4240-23-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210623044702.4240-23-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210623044702.4240-23-ndabilpuram@marvell.com",
    "date": "2021-06-23T04:46:22",
    "name": "[v4,22/62] net/cnxk: add Tx burst for cn9k",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "512b29f41bd3e4f2d3ef29e97e133aa47dba5456",
    "submitter": {
        "id": 1202,
        "url": "https://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210623044702.4240-23-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 17449,
            "url": "https://patches.dpdk.org/api/series/17449/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17449",
            "date": "2021-06-23T04:46:00",
            "name": "Marvell CNXK Ethdev Driver",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/17449/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94718/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/94718/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A75D2A0C41;\n\tWed, 23 Jun 2021 06:49:56 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 173B3410F8;\n\tWed, 23 Jun 2021 06:48:35 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id A432241139\n for <dev@dpdk.org>; Wed, 23 Jun 2021 06:48:33 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 15N4k7nr025542 for <dev@dpdk.org>; Tue, 22 Jun 2021 21:48:33 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com with ESMTP id 39bptj1gmk-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Tue, 22 Jun 2021 21:48:32 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Tue, 22 Jun 2021 21:48:29 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Tue, 22 Jun 2021 21:48:29 -0700",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id E55A85B693A;\n Tue, 22 Jun 2021 21:48:26 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=8FZWMIwOjWojr8FHrD8eha8sU0SL0tH9NjrWBEehUtI=;\n b=EiF1ZHgxaCdLJVIYdVyP+5nAKkohphgsdWYcGn19nWLXxRnGatMVpzz1Xvbf+cGrdHOC\n UVMDcR4OtE3JWxQ9UotkEhHomplTCXtjT9m1vgG9z9Jzt43wzbc+UyTJnuGvdOVLELy5\n 0aARlPBBYYp3GxZDh+9tX5ocvPU0WgVF87OkIK2zWRHuiEkvPji3zR/8gKctfvjnXuaT\n lux3aDri3FyBl3tJCabzHF6vi4SjvsEiOp/OeXIsPvP8jMJeTDogeLSHxT+4TogKaiGZ\n w4HnRRC+pJAwZXrsMMhzTF8O37Nq2Au7tZPGtmQz9bquYA71gsJLl/2fbC614aBVwpJg vQ==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<jerinj@marvell.com>, <skori@marvell.com>, <skoteshwar@marvell.com>,\n <pbhagavatula@marvell.com>, <kirankumark@marvell.com>,\n <psatheesh@marvell.com>, <asekhar@marvell.com>, <hkalra@marvell.com>,\n \"Nithin Dabilpuram\" <ndabilpuram@marvell.com>",
        "Date": "Wed, 23 Jun 2021 10:16:22 +0530",
        "Message-ID": "<20210623044702.4240-23-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20210623044702.4240-1-ndabilpuram@marvell.com>",
        "References": "<20210306153404.10781-1-ndabilpuram@marvell.com>\n <20210623044702.4240-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "SigN6hTymgaNXpkZXMTPQYQemj_lt2bn",
        "X-Proofpoint-GUID": "SigN6hTymgaNXpkZXMTPQYQemj_lt2bn",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790\n definitions=2021-06-23_01:2021-06-22,\n 2021-06-23 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v4 22/62] net/cnxk: add Tx burst for cn9k",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jerin Jacob <jerinj@marvell.com>\n\nAdd Tx burst scalar version for CN9K.\n\nSigned-off-by: Jerin Jacob <jerinj@marvell.com>\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nSigned-off-by: Harman Kalra <hkalra@marvell.com>\n---\n drivers/net/cnxk/cn9k_ethdev.h |   1 +\n drivers/net/cnxk/cn9k_tx.c     |  53 ++++++\n drivers/net/cnxk/cn9k_tx.h     | 419 +++++++++++++++++++++++++++++++++++++++++\n drivers/net/cnxk/cnxk_ethdev.h |  71 +++++++\n drivers/net/cnxk/meson.build   |   3 +-\n 5 files changed, 546 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/cnxk/cn9k_tx.c",
    "diff": "diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h\nindex bab5540..f8344e3 100644\n--- a/drivers/net/cnxk/cn9k_ethdev.h\n+++ b/drivers/net/cnxk/cn9k_ethdev.h\n@@ -33,5 +33,6 @@ struct cn9k_eth_rxq {\n \n /* Rx and Tx routines */\n void cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev);\n+void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);\n \n #endif /* __CN9K_ETHDEV_H__ */\ndiff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c\nnew file mode 100644\nindex 0000000..a0b022a\n--- /dev/null\n+++ b/drivers/net/cnxk/cn9k_tx.c\n@@ -0,0 +1,53 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2021 Marvell.\n+ */\n+\n+#include \"cn9k_ethdev.h\"\n+#include \"cn9k_tx.h\"\n+\n+#define T(name, f4, f3, f2, f1, f0, sz, flags)\t\t\t\t       \\\n+\tuint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name(\t       \\\n+\t\tvoid *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts)      \\\n+\t{                                                                      \\\n+\t\tuint64_t cmd[sz];                                              \\\n+\t\t\t\t\t\t\t\t\t       \\\n+\t\t/* For TSO inner checksum is a must */                         \\\n+\t\tif (((flags) & NIX_TX_OFFLOAD_TSO_F) &&\t\t\t       \\\n+\t\t    !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F))\t\t       \\\n+\t\t\treturn 0;                                              \\\n+\t\treturn cn9k_nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags);\\\n+\t}\n+\n+NIX_TX_FASTPATH_MODES\n+#undef T\n+\n+static inline void\n+pick_tx_func(struct rte_eth_dev *eth_dev,\n+\t     const eth_tx_burst_t tx_burst[2][2][2][2][2])\n+{\n+\tstruct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);\n+\n+\t/* [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */\n+\teth_dev->tx_pkt_burst = tx_burst\n+\t\t[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]\n+\t\t[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]\n+\t\t[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]\n+\t\t[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]\n+\t\t[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];\n+}\n+\n+void\n+cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)\n+{\n+\tconst eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = {\n+#define T(name, f4, f3, f2, f1, f0, sz, flags)\t\t\t\t       \\\n+\t[f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,\n+\n+\t\tNIX_TX_FASTPATH_MODES\n+#undef T\n+\t};\n+\n+\tpick_tx_func(eth_dev, nix_eth_tx_burst);\n+\n+\trte_mb();\n+}\ndiff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h\nindex bb6379b..7acecc6 100644\n--- a/drivers/net/cnxk/cn9k_tx.h\n+++ b/drivers/net/cnxk/cn9k_tx.h\n@@ -4,10 +4,429 @@\n #ifndef __CN9K_TX_H__\n #define __CN9K_TX_H__\n \n+#define NIX_TX_OFFLOAD_NONE\t      (0)\n+#define NIX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(0)\n+#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)\n #define NIX_TX_OFFLOAD_VLAN_QINQ_F    BIT(2)\n+#define NIX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)\n #define NIX_TX_OFFLOAD_TSO_F\t      BIT(4)\n \n+/* Flags to control xmit_prepare function.\n+ * Defining it from backwards to denote its been\n+ * not used as offload flags to pick function\n+ */\n+#define NIX_TX_MULTI_SEG_F BIT(15)\n+\n+#define NIX_TX_NEED_SEND_HDR_W1                                                \\\n+\t(NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |         \\\n+\t NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)\n+\n #define NIX_TX_NEED_EXT_HDR                                                    \\\n \t(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)\n \n+#define NIX_XMIT_FC_OR_RETURN(txq, pkts)                                       \\\n+\tdo {                                                                   \\\n+\t\t/* Cached value is low, Update the fc_cache_pkts */            \\\n+\t\tif (unlikely((txq)->fc_cache_pkts < (pkts))) {                 \\\n+\t\t\t/* Multiply with sqe_per_sqb to express in pkts */     \\\n+\t\t\t(txq)->fc_cache_pkts =                                 \\\n+\t\t\t\t((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem)      \\\n+\t\t\t\t<< (txq)->sqes_per_sqb_log2;                   \\\n+\t\t\t/* Check it again for the room */                      \\\n+\t\t\tif (unlikely((txq)->fc_cache_pkts < (pkts)))           \\\n+\t\t\t\treturn 0;                                      \\\n+\t\t}                                                              \\\n+\t} while (0)\n+\n+/* Function to determine no of tx subdesc required in case ext\n+ * sub desc is enabled.\n+ */\n+static __rte_always_inline int\n+cn9k_nix_tx_ext_subs(const uint16_t flags)\n+{\n+\treturn (flags &\n+\t\t(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;\n+}\n+\n+static __rte_always_inline void\n+cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)\n+{\n+\tuint64_t mask, ol_flags = m->ol_flags;\n+\n+\tif (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {\n+\t\tuintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);\n+\t\tuint16_t *iplen, *oiplen, *oudplen;\n+\t\tuint16_t lso_sb, paylen;\n+\n+\t\tmask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));\n+\t\tlso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +\n+\t\t\t m->l2_len + m->l3_len + m->l4_len;\n+\n+\t\t/* Reduce payload len from base headers */\n+\t\tpaylen = m->pkt_len - lso_sb;\n+\n+\t\t/* Get iplen position assuming no tunnel hdr */\n+\t\tiplen = (uint16_t *)(mdata + m->l2_len +\n+\t\t\t\t     (2 << !!(ol_flags & PKT_TX_IPV6)));\n+\t\t/* Handle tunnel tso */\n+\t\tif ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&\n+\t\t    (ol_flags & PKT_TX_TUNNEL_MASK)) {\n+\t\t\tconst uint8_t is_udp_tun =\n+\t\t\t\t(CNXK_NIX_UDP_TUN_BITMASK >>\n+\t\t\t\t ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &\n+\t\t\t\t0x1;\n+\n+\t\t\toiplen = (uint16_t *)(mdata + m->outer_l2_len +\n+\t\t\t\t\t      (2 << !!(ol_flags &\n+\t\t\t\t\t\t       PKT_TX_OUTER_IPV6)));\n+\t\t\t*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -\n+\t\t\t\t\t\t   paylen);\n+\n+\t\t\t/* Update format for UDP tunneled packet */\n+\t\t\tif (is_udp_tun) {\n+\t\t\t\toudplen = (uint16_t *)(mdata + m->outer_l2_len +\n+\t\t\t\t\t\t       m->outer_l3_len + 4);\n+\t\t\t\t*oudplen = rte_cpu_to_be_16(\n+\t\t\t\t\trte_be_to_cpu_16(*oudplen) - paylen);\n+\t\t\t}\n+\n+\t\t\t/* Update iplen position to inner ip hdr */\n+\t\t\tiplen = (uint16_t *)(mdata + lso_sb - m->l3_len -\n+\t\t\t\t\t     m->l4_len +\n+\t\t\t\t\t     (2 << !!(ol_flags & PKT_TX_IPV6)));\n+\t\t}\n+\n+\t\t*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,\n+\t\t      const uint64_t lso_tun_fmt)\n+{\n+\tstruct nix_send_ext_s *send_hdr_ext;\n+\tstruct nix_send_hdr_s *send_hdr;\n+\tuint64_t ol_flags = 0, mask;\n+\tunion nix_send_hdr_w1_u w1;\n+\tunion nix_send_sg_s *sg;\n+\n+\tsend_hdr = (struct nix_send_hdr_s *)cmd;\n+\tif (flags & NIX_TX_NEED_EXT_HDR) {\n+\t\tsend_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);\n+\t\tsg = (union nix_send_sg_s *)(cmd + 4);\n+\t\t/* Clear previous markings */\n+\t\tsend_hdr_ext->w0.lso = 0;\n+\t\tsend_hdr_ext->w1.u = 0;\n+\t} else {\n+\t\tsg = (union nix_send_sg_s *)(cmd + 2);\n+\t}\n+\n+\tif (flags & NIX_TX_NEED_SEND_HDR_W1) {\n+\t\tol_flags = m->ol_flags;\n+\t\tw1.u = 0;\n+\t}\n+\n+\tif (!(flags & NIX_TX_MULTI_SEG_F)) {\n+\t\tsend_hdr->w0.total = m->data_len;\n+\t\tsend_hdr->w0.aura =\n+\t\t\troc_npa_aura_handle_to_aura(m->pool->pool_id);\n+\t}\n+\n+\t/*\n+\t * L3type:  2 => IPV4\n+\t *          3 => IPV4 with csum\n+\t *          4 => IPV6\n+\t * L3type and L3ptr needs to be set for either\n+\t * L3 csum or L4 csum or LSO\n+\t *\n+\t */\n+\n+\tif ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&\n+\t    (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {\n+\t\tconst uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);\n+\t\tconst uint8_t ol3type =\n+\t\t\t((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +\n+\t\t\t((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +\n+\t\t\t!!(ol_flags & PKT_TX_OUTER_IP_CKSUM);\n+\n+\t\t/* Outer L3 */\n+\t\tw1.ol3type = ol3type;\n+\t\tmask = 0xffffull << ((!!ol3type) << 4);\n+\t\tw1.ol3ptr = ~mask & m->outer_l2_len;\n+\t\tw1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);\n+\n+\t\t/* Outer L4 */\n+\t\tw1.ol4type = csum + (csum << 1);\n+\n+\t\t/* Inner L3 */\n+\t\tw1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +\n+\t\t\t     ((!!(ol_flags & PKT_TX_IPV6)) << 2);\n+\t\tw1.il3ptr = w1.ol4ptr + m->l2_len;\n+\t\tw1.il4ptr = w1.il3ptr + m->l3_len;\n+\t\t/* Increment it by 1 if it is IPV4 as 3 is with csum */\n+\t\tw1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);\n+\n+\t\t/* Inner L4 */\n+\t\tw1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;\n+\n+\t\t/* In case of no tunnel header use only\n+\t\t * shift IL3/IL4 fields a bit to use\n+\t\t * OL3/OL4 for header checksum\n+\t\t */\n+\t\tmask = !ol3type;\n+\t\tw1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |\n+\t\t       ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));\n+\n+\t} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {\n+\t\tconst uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);\n+\t\tconst uint8_t outer_l2_len = m->outer_l2_len;\n+\n+\t\t/* Outer L3 */\n+\t\tw1.ol3ptr = outer_l2_len;\n+\t\tw1.ol4ptr = outer_l2_len + m->outer_l3_len;\n+\t\t/* Increment it by 1 if it is IPV4 as 3 is with csum */\n+\t\tw1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +\n+\t\t\t     ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +\n+\t\t\t     !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);\n+\n+\t\t/* Outer L4 */\n+\t\tw1.ol4type = csum + (csum << 1);\n+\n+\t} else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {\n+\t\tconst uint8_t l2_len = m->l2_len;\n+\n+\t\t/* Always use OLXPTR and OLXTYPE when only\n+\t\t * when one header is present\n+\t\t */\n+\n+\t\t/* Inner L3 */\n+\t\tw1.ol3ptr = l2_len;\n+\t\tw1.ol4ptr = l2_len + m->l3_len;\n+\t\t/* Increment it by 1 if it is IPV4 as 3 is with csum */\n+\t\tw1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +\n+\t\t\t     ((!!(ol_flags & PKT_TX_IPV6)) << 2) +\n+\t\t\t     !!(ol_flags & PKT_TX_IP_CKSUM);\n+\n+\t\t/* Inner L4 */\n+\t\tw1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;\n+\t}\n+\n+\tif (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {\n+\t\tsend_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);\n+\t\t/* HW will update ptr after vlan0 update */\n+\t\tsend_hdr_ext->w1.vlan1_ins_ptr = 12;\n+\t\tsend_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;\n+\n+\t\tsend_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);\n+\t\t/* 2B before end of l2 header */\n+\t\tsend_hdr_ext->w1.vlan0_ins_ptr = 12;\n+\t\tsend_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;\n+\t}\n+\n+\tif (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {\n+\t\tuint16_t lso_sb;\n+\t\tuint64_t mask;\n+\n+\t\tmask = -(!w1.il3type);\n+\t\tlso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len;\n+\n+\t\tsend_hdr_ext->w0.lso_sb = lso_sb;\n+\t\tsend_hdr_ext->w0.lso = 1;\n+\t\tsend_hdr_ext->w0.lso_mps = m->tso_segsz;\n+\t\tsend_hdr_ext->w0.lso_format =\n+\t\t\tNIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);\n+\t\tw1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;\n+\n+\t\t/* Handle tunnel tso */\n+\t\tif ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&\n+\t\t    (ol_flags & PKT_TX_TUNNEL_MASK)) {\n+\t\t\tconst uint8_t is_udp_tun =\n+\t\t\t\t(CNXK_NIX_UDP_TUN_BITMASK >>\n+\t\t\t\t ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &\n+\t\t\t\t0x1;\n+\t\t\tuint8_t shift = is_udp_tun ? 32 : 0;\n+\n+\t\t\tshift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);\n+\t\t\tshift += (!!(ol_flags & PKT_TX_IPV6) << 3);\n+\n+\t\t\tw1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;\n+\t\t\tw1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;\n+\t\t\t/* Update format for UDP tunneled packet */\n+\t\t\tsend_hdr_ext->w0.lso_format = (lso_tun_fmt >> shift);\n+\t\t}\n+\t}\n+\n+\tif (flags & NIX_TX_NEED_SEND_HDR_W1)\n+\t\tsend_hdr->w1.u = w1.u;\n+\n+\tif (!(flags & NIX_TX_MULTI_SEG_F)) {\n+\t\tsg->seg1_size = m->data_len;\n+\t\t*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);\n+\n+\t\tif (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {\n+\t\t\t/* DF bit = 1 if refcount of current mbuf or parent mbuf\n+\t\t\t *\t\tis greater than 1\n+\t\t\t * DF bit = 0 otherwise\n+\t\t\t */\n+\t\t\tsend_hdr->w0.df = cnxk_nix_prefree_seg(m);\n+\t\t\t/* Ensuring mbuf fields which got updated in\n+\t\t\t * cnxk_nix_prefree_seg are written before LMTST.\n+\t\t\t */\n+\t\t\trte_io_wmb();\n+\t\t}\n+\t\t/* Mark mempool object as \"put\" since it is freed by NIX */\n+\t\tif (!send_hdr->w0.df)\n+\t\t\t__mempool_check_cookies(m->pool, (void **)&m, 1, 0);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+cn9k_nix_xmit_one(uint64_t *cmd, void *lmt_addr, const rte_iova_t io_addr,\n+\t\t  const uint32_t flags)\n+{\n+\tuint64_t lmt_status;\n+\n+\tdo {\n+\t\troc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));\n+\t\tlmt_status = roc_lmt_submit_ldeor(io_addr);\n+\t} while (lmt_status == 0);\n+}\n+\n+static __rte_always_inline void\n+cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)\n+{\n+\troc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));\n+}\n+\n+static __rte_always_inline uint64_t\n+cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)\n+{\n+\treturn roc_lmt_submit_ldeor(io_addr);\n+}\n+\n+static __rte_always_inline uint64_t\n+cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)\n+{\n+\treturn roc_lmt_submit_ldeorl(io_addr);\n+}\n+\n+static __rte_always_inline uint16_t\n+cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,\n+\t\t   uint64_t *cmd, const uint16_t flags)\n+{\n+\tstruct cn9k_eth_txq *txq = tx_queue;\n+\tconst rte_iova_t io_addr = txq->io_addr;\n+\tvoid *lmt_addr = txq->lmt_addr;\n+\tuint64_t lso_tun_fmt;\n+\tuint16_t i;\n+\n+\tNIX_XMIT_FC_OR_RETURN(txq, pkts);\n+\n+\troc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));\n+\n+\t/* Perform header writes before barrier for TSO */\n+\tif (flags & NIX_TX_OFFLOAD_TSO_F) {\n+\t\tlso_tun_fmt = txq->lso_tun_fmt;\n+\n+\t\tfor (i = 0; i < pkts; i++)\n+\t\t\tcn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);\n+\t}\n+\n+\t/* Lets commit any changes in the packet here as no further changes\n+\t * to the packet will be done unless no fast free is enabled.\n+\t */\n+\tif (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))\n+\t\trte_io_wmb();\n+\n+\tfor (i = 0; i < pkts; i++) {\n+\t\tcn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);\n+\t\tcn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);\n+\t}\n+\n+\t/* Reduce the cached count */\n+\ttxq->fc_cache_pkts -= pkts;\n+\n+\treturn pkts;\n+}\n+\n+#define L3L4CSUM_F   NIX_TX_OFFLOAD_L3_L4_CSUM_F\n+#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F\n+#define VLAN_F\t     NIX_TX_OFFLOAD_VLAN_QINQ_F\n+#define NOFF_F\t     NIX_TX_OFFLOAD_MBUF_NOFF_F\n+#define TSO_F\t     NIX_TX_OFFLOAD_TSO_F\n+\n+/* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */\n+#define NIX_TX_FASTPATH_MODES\t\t\t\t\t\t\\\n+T(no_offload,\t\t\t\t0, 0, 0, 0, 0,\t4,\t\t\\\n+\t\tNIX_TX_OFFLOAD_NONE)\t\t\t\t\t\\\n+T(l3l4csum,\t\t\t\t0, 0, 0, 0, 1,\t4,\t\t\\\n+\t\tL3L4CSUM_F)\t\t\t\t\t\t\\\n+T(ol3ol4csum,\t\t\t\t0, 0, 0, 1, 0,\t4,\t\t\\\n+\t\tOL3OL4CSUM_F)\t\t\t\t\t\t\\\n+T(ol3ol4csum_l3l4csum,\t\t\t0, 0, 0, 1, 1,\t4,\t\t\\\n+\t\tOL3OL4CSUM_F | L3L4CSUM_F)\t\t\t\t\\\n+T(vlan,\t\t\t\t\t0, 0, 1, 0, 0,\t6,\t\t\\\n+\t\tVLAN_F)\t\t\t\t\t\t\t\\\n+T(vlan_l3l4csum,\t\t\t0, 0, 1, 0, 1,\t6,\t\t\\\n+\t\tVLAN_F | L3L4CSUM_F)\t\t\t\t\t\\\n+T(vlan_ol3ol4csum,\t\t\t0, 0, 1, 1, 0,\t6,\t\t\\\n+\t\tVLAN_F | OL3OL4CSUM_F)\t\t\t\t\t\\\n+T(vlan_ol3ol4csum_l3l4csum,\t\t0, 0, 1, 1, 1,\t6,\t\t\\\n+\t\tVLAN_F | OL3OL4CSUM_F |\tL3L4CSUM_F)\t\t\t\\\n+T(noff,\t\t\t\t\t0, 1, 0, 0, 0,\t4,\t\t\\\n+\t\tNOFF_F)\t\t\t\t\t\t\t\\\n+T(noff_l3l4csum,\t\t\t0, 1, 0, 0, 1,\t4,\t\t\\\n+\t\tNOFF_F | L3L4CSUM_F)\t\t\t\t\t\\\n+T(noff_ol3ol4csum,\t\t\t0, 1, 0, 1, 0,\t4,\t\t\\\n+\t\tNOFF_F | OL3OL4CSUM_F)\t\t\t\t\t\\\n+T(noff_ol3ol4csum_l3l4csum,\t\t0, 1, 0, 1, 1,\t4,\t\t\\\n+\t\tNOFF_F | OL3OL4CSUM_F |\tL3L4CSUM_F)\t\t\t\\\n+T(noff_vlan,\t\t\t\t0, 1, 1, 0, 0,\t6,\t\t\\\n+\t\tNOFF_F | VLAN_F)\t\t\t\t\t\\\n+T(noff_vlan_l3l4csum,\t\t\t0, 1, 1, 0, 1,\t6,\t\t\\\n+\t\tNOFF_F | VLAN_F | L3L4CSUM_F)\t\t\t\t\\\n+T(noff_vlan_ol3ol4csum,\t\t\t0, 1, 1, 1, 0,\t6,\t\t\\\n+\t\tNOFF_F | VLAN_F | OL3OL4CSUM_F)\t\t\t\t\\\n+T(noff_vlan_ol3ol4csum_l3l4csum,\t0, 1, 1, 1, 1,\t6,\t\t\\\n+\t\tNOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\t\t\\\n+T(tso,\t\t\t\t\t1, 0, 0, 0, 0,\t6,\t\t\\\n+\t\tTSO_F)\t\t\t\t\t\t\t\\\n+T(tso_l3l4csum,\t\t\t\t1, 0, 0, 0, 1,\t6,\t\t\\\n+\t\tTSO_F | L3L4CSUM_F)\t\t\t\t\t\\\n+T(tso_ol3ol4csum,\t\t\t1, 0, 0, 1, 0,\t6,\t\t\\\n+\t\tTSO_F | OL3OL4CSUM_F)\t\t\t\t\t\\\n+T(tso_ol3ol4csum_l3l4csum,\t\t1, 0, 0, 1, 1,\t6,\t\t\\\n+\t\tTSO_F | OL3OL4CSUM_F | L3L4CSUM_F)\t\t\t\\\n+T(tso_vlan,\t\t\t\t1, 0, 1, 0, 0,\t6,\t\t\\\n+\t\tTSO_F | VLAN_F)\t\t\t\t\t\t\\\n+T(tso_vlan_l3l4csum,\t\t\t1, 0, 1, 0, 1,\t6,\t\t\\\n+\t\tTSO_F | VLAN_F | L3L4CSUM_F)\t\t\t\t\\\n+T(tso_vlan_ol3ol4csum,\t\t\t1, 0, 1, 1, 0,\t6,\t\t\\\n+\t\tTSO_F | VLAN_F | OL3OL4CSUM_F)\t\t\t\t\\\n+T(tso_vlan_ol3ol4csum_l3l4csum,\t\t1, 0, 1, 1, 1,\t6,\t\t\\\n+\t\tTSO_F | VLAN_F | OL3OL4CSUM_F |\tL3L4CSUM_F)\t\t\\\n+T(tso_noff,\t\t\t\t1, 1, 0, 0, 0,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F)\t\t\t\t\t\t\\\n+T(tso_noff_l3l4csum,\t\t\t1, 1, 0, 0, 1,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | L3L4CSUM_F)\t\t\t\t\\\n+T(tso_noff_ol3ol4csum,\t\t\t1, 1, 0, 1, 0,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | OL3OL4CSUM_F)\t\t\t\t\\\n+T(tso_noff_ol3ol4csum_l3l4csum,\t\t1, 1, 0, 1, 1,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | OL3OL4CSUM_F |\tL3L4CSUM_F)\t\t\\\n+T(tso_noff_vlan,\t\t\t1, 1, 1, 0, 0,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | VLAN_F)\t\t\t\t\\\n+T(tso_noff_vlan_l3l4csum,\t\t1, 1, 1, 0, 1,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)\t\t\t\\\n+T(tso_noff_vlan_ol3ol4csum,\t\t1, 1, 1, 1, 0,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)\t\t\t\\\n+T(tso_noff_vlan_ol3ol4csum_l3l4csum,\t1, 1, 1, 1, 1,\t6,\t\t\\\n+\t\tTSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\n+\n+#define T(name, f4, f3, f2, f1, f0, sz, flags)                                 \\\n+\tuint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name(           \\\n+\t\tvoid *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);\n+\n+NIX_TX_FASTPATH_MODES\n+#undef T\n+\n #endif /* __CN9K_TX_H__ */\ndiff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h\nindex 333a54c..58cc6b7 100644\n--- a/drivers/net/cnxk/cnxk_ethdev.h\n+++ b/drivers/net/cnxk/cnxk_ethdev.h\n@@ -103,6 +103,10 @@\n /* Fastpath lookup */\n #define CNXK_NIX_FASTPATH_LOOKUP_MEM \"cnxk_nix_fastpath_lookup_mem\"\n \n+#define CNXK_NIX_UDP_TUN_BITMASK                                               \\\n+\t((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \\\n+\t (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))\n+\n struct cnxk_eth_qconf {\n \tunion {\n \t\tstruct rte_eth_txconf tx;\n@@ -241,4 +245,71 @@ void *cnxk_nix_fastpath_lookup_mem_get(void);\n int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,\n \t\t\t      struct cnxk_eth_dev *dev);\n \n+/* Inlines */\n+static __rte_always_inline uint64_t\n+cnxk_pktmbuf_detach(struct rte_mbuf *m)\n+{\n+\tstruct rte_mempool *mp = m->pool;\n+\tuint32_t mbuf_size, buf_len;\n+\tstruct rte_mbuf *md;\n+\tuint16_t priv_size;\n+\tuint16_t refcount;\n+\n+\t/* Update refcount of direct mbuf */\n+\tmd = rte_mbuf_from_indirect(m);\n+\trefcount = rte_mbuf_refcnt_update(md, -1);\n+\n+\tpriv_size = rte_pktmbuf_priv_size(mp);\n+\tmbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);\n+\tbuf_len = rte_pktmbuf_data_room_size(mp);\n+\n+\tm->priv_size = priv_size;\n+\tm->buf_addr = (char *)m + mbuf_size;\n+\tm->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;\n+\tm->buf_len = (uint16_t)buf_len;\n+\trte_pktmbuf_reset_headroom(m);\n+\tm->data_len = 0;\n+\tm->ol_flags = 0;\n+\tm->next = NULL;\n+\tm->nb_segs = 1;\n+\n+\t/* Now indirect mbuf is safe to free */\n+\trte_pktmbuf_free(m);\n+\n+\tif (refcount == 0) {\n+\t\trte_mbuf_refcnt_set(md, 1);\n+\t\tmd->data_len = 0;\n+\t\tmd->ol_flags = 0;\n+\t\tmd->next = NULL;\n+\t\tmd->nb_segs = 1;\n+\t\treturn 0;\n+\t} else {\n+\t\treturn 1;\n+\t}\n+}\n+\n+static __rte_always_inline uint64_t\n+cnxk_nix_prefree_seg(struct rte_mbuf *m)\n+{\n+\tif (likely(rte_mbuf_refcnt_read(m) == 1)) {\n+\t\tif (!RTE_MBUF_DIRECT(m))\n+\t\t\treturn cnxk_pktmbuf_detach(m);\n+\n+\t\tm->next = NULL;\n+\t\tm->nb_segs = 1;\n+\t\treturn 0;\n+\t} else if (rte_mbuf_refcnt_update(m, -1) == 0) {\n+\t\tif (!RTE_MBUF_DIRECT(m))\n+\t\t\treturn cnxk_pktmbuf_detach(m);\n+\n+\t\trte_mbuf_refcnt_set(m, 1);\n+\t\tm->next = NULL;\n+\t\tm->nb_segs = 1;\n+\t\treturn 0;\n+\t}\n+\n+\t/* Mbuf is having refcount more than 1 so need not to be freed */\n+\treturn 1;\n+}\n+\n #endif /* __CNXK_ETHDEV_H__ */\ndiff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build\nindex 9aba7d4..6c2cd13 100644\n--- a/drivers/net/cnxk/meson.build\n+++ b/drivers/net/cnxk/meson.build\n@@ -18,7 +18,8 @@ sources = files('cnxk_ethdev.c',\n sources += files('cn9k_ethdev.c',\n \t\t 'cn9k_rx.c',\n \t\t 'cn9k_rx_mseg.c',\n-\t\t 'cn9k_rx_vec.c')\n+\t\t 'cn9k_rx_vec.c',\n+\t\t 'cn9k_tx.c')\n # CN10K\n sources += files('cn10k_ethdev.c')\n \n",
    "prefixes": [
        "v4",
        "22/62"
    ]
}