get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/120568/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 120568,
    "url": "http://patches.dpdk.org/api/patches/120568/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221208075309.37852-14-beilei.xing@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221208075309.37852-14-beilei.xing@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221208075309.37852-14-beilei.xing@intel.com",
    "date": "2022-12-08T07:53:07",
    "name": "[13/15] common/idpf: add scalar data path",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b0b3ef7243322738868aad206d4e39e6e0375ef0",
    "submitter": {
        "id": 410,
        "url": "http://patches.dpdk.org/api/people/410/?format=api",
        "name": "Xing, Beilei",
        "email": "beilei.xing@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221208075309.37852-14-beilei.xing@intel.com/mbox/",
    "series": [
        {
            "id": 26041,
            "url": "http://patches.dpdk.org/api/series/26041/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=26041",
            "date": "2022-12-08T07:52:54",
            "name": "net/idpf: refactor idpf pmd",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/26041/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/120568/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/120568/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A82BCA00C2;\n\tThu,  8 Dec 2022 08:54:59 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 167E442D72;\n\tThu,  8 Dec 2022 08:53:50 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n by mails.dpdk.org (Postfix) with ESMTP id F392442D63\n for <dev@dpdk.org>; Thu,  8 Dec 2022 08:53:47 +0100 (CET)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 07 Dec 2022 23:53:47 -0800",
            "from dpdk-beileix-3.sh.intel.com ([10.67.110.253])\n by fmsmga004.fm.intel.com with ESMTP; 07 Dec 2022 23:53:44 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1670486028; x=1702022028;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=IdjxhPFs/sGKkV915S9ipUYEBJjruHwNHpbaXRj/YHw=;\n b=mrJRgetD9BCl2Z0DgfvT8zvUNS7c+XTeB/XwoYCm74pTVYuxailRBC7v\n rpL0go/rOg36OK2FaJw1ImgzmE323j3RXY54etzkxeTR0mjy0MgCSm4gB\n Jf4pnRSgDKqHEn+IEJjFEegJlC/NMg6aPOl48uGjPJ0Hb4DU61gJGZz4o\n SzFBu1GtyjhxG7SUy8gEuUHjG6U4hEuy8nzWFj1hbiZAOaanJb6rPyAly\n MdTQ2nQze9N1c4cMiNPD6Xwf1eguk7xWrVYH+oDMs5qNAqvQK6rCaHpga\n uLZ7n/CfZ5CMY5NDHXqjXci9HYF9DIhgJIu92tQfFUjJuy5fJt6vU4OXW Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10554\"; a=\"318244225\"",
            "E=Sophos;i=\"5.96,227,1665471600\"; d=\"scan'208\";a=\"318244225\"",
            "E=McAfee;i=\"6500,9779,10554\"; a=\"715499376\"",
            "E=Sophos;i=\"5.96,227,1665471600\"; d=\"scan'208\";a=\"715499376\""
        ],
        "X-ExtLoop1": "1",
        "From": "beilei.xing@intel.com",
        "To": "jingjing.wu@intel.com,\n\tqi.z.zhang@intel.com",
        "Cc": "dev@dpdk.org,\n\tBeilei Xing <beilei.xing@intel.com>",
        "Subject": "[PATCH 13/15] common/idpf: add scalar data path",
        "Date": "Thu,  8 Dec 2022 07:53:07 +0000",
        "Message-Id": "<20221208075309.37852-14-beilei.xing@intel.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "In-Reply-To": "<20221208075309.37852-1-beilei.xing@intel.com>",
        "References": "<20221208075309.37852-1-beilei.xing@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nAdd timestamp filed to idpf_adapter structure.\nAdd scalar data path for single queue model and split queue model.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/common/idpf/idpf_common_device.h |   5 +\n drivers/common/idpf/idpf_common_logs.h   |  24 +\n drivers/common/idpf/idpf_common_rxtx.c   | 985 +++++++++++++++++++++++\n drivers/common/idpf/idpf_common_rxtx.h   |  87 ++\n drivers/common/idpf/version.map          |   6 +\n drivers/net/idpf/idpf_ethdev.c           |   2 -\n drivers/net/idpf/idpf_ethdev.h           |   4 -\n drivers/net/idpf/idpf_logs.h             |  24 -\n drivers/net/idpf/idpf_rxtx.c             | 936 +--------------------\n drivers/net/idpf/idpf_rxtx.h             | 132 ---\n drivers/net/idpf/idpf_rxtx_vec_avx512.c  |   6 +-\n 11 files changed, 1111 insertions(+), 1100 deletions(-)",
    "diff": "diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h\nindex c007c0b705..6c9a65ae3b 100644\n--- a/drivers/common/idpf/idpf_common_device.h\n+++ b/drivers/common/idpf/idpf_common_device.h\n@@ -23,6 +23,8 @@\n #define IDPF_TX_COMPLQ_PER_GRP\t1\n #define IDPF_TXQ_PER_GRP\t1\n \n+#define IDPF_MIN_FRAME_SIZE\t14\n+\n #define IDPF_MAX_PKT_TYPE\t1024\n \n #define IDPF_DFLT_INTERVAL\t16\n@@ -43,6 +45,9 @@ struct idpf_adapter {\n \n \tuint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */\n \tuint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */\n+\n+\t/* For timestamp */\n+\tuint64_t time_hw;\n };\n \n struct idpf_chunks_info {\ndiff --git a/drivers/common/idpf/idpf_common_logs.h b/drivers/common/idpf/idpf_common_logs.h\nindex fe36562769..63ad2195be 100644\n--- a/drivers/common/idpf/idpf_common_logs.h\n+++ b/drivers/common/idpf/idpf_common_logs.h\n@@ -20,4 +20,28 @@ extern int idpf_common_logtype;\n #define DRV_LOG(level, fmt, args...)\t\t\\\n \tDRV_LOG_RAW(level, fmt \"\\n\", ## args)\n \n+#ifdef RTE_LIBRTE_IDPF_DEBUG_RX\n+#define RX_LOG(level, ...) \\\n+\tRTE_LOG(level, \\\n+\t\tPMD, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+#else\n+#define RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IDPF_DEBUG_TX\n+#define TX_LOG(level, ...) \\\n+\tRTE_LOG(level, \\\n+\t\tPMD, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+#else\n+#define TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n #endif /* _IDPF_COMMON_LOGS_H_ */\ndiff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c\nindex 440acc55a6..3030f89bf1 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.c\n+++ b/drivers/common/idpf/idpf_common_rxtx.c\n@@ -3,8 +3,13 @@\n  */\n \n #include <rte_mbuf_dyn.h>\n+#include <rte_errno.h>\n+\n #include \"idpf_common_rxtx.h\"\n \n+int idpf_timestamp_dynfield_offset = -1;\n+uint64_t idpf_timestamp_dynflag;\n+\n int\n check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n {\n@@ -337,6 +342,23 @@ idpf_tx_queue_release(void *txq)\n \trte_free(q);\n }\n \n+int\n+idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)\n+{\n+\tint err;\n+\tif ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {\n+\t\t/* Register mbuf field and flag for Rx timestamp */\n+\t\terr = rte_mbuf_dyn_rx_timestamp_register(&idpf_timestamp_dynfield_offset,\n+\t\t\t\t\t\t\t &idpf_timestamp_dynflag);\n+\t\tif (err != 0) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Cannot register mbuf field/flag for timestamp\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n int\n idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n {\n@@ -412,3 +434,966 @@ idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n \n \treturn 0;\n }\n+\n+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000\n+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */\n+static inline uint64_t\n+idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,\n+\t\t\t    uint32_t in_timestamp)\n+{\n+#ifdef RTE_ARCH_X86_64\n+\tstruct idpf_hw *hw = &ad->hw;\n+\tconst uint64_t mask = 0xFFFFFFFF;\n+\tuint32_t hi, lo, lo2, delta;\n+\tuint64_t ns;\n+\n+\tif (flag != 0) {\n+\t\tIDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);\n+\t\tIDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |\n+\t\t\t       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);\n+\t\tlo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n+\t\thi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);\n+\t\t/*\n+\t\t * On typical system, the delta between lo and lo2 is ~1000ns,\n+\t\t * so 10000 seems a large-enough but not overly-big guard band.\n+\t\t */\n+\t\tif (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))\n+\t\t\tlo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n+\t\telse\n+\t\t\tlo2 = lo;\n+\n+\t\tif (lo2 < lo) {\n+\t\t\tlo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n+\t\t\thi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);\n+\t\t}\n+\n+\t\tad->time_hw = ((uint64_t)hi << 32) | lo;\n+\t}\n+\n+\tdelta = (in_timestamp - (uint32_t)(ad->time_hw & mask));\n+\tif (delta > (mask / 2)) {\n+\t\tdelta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);\n+\t\tns = ad->time_hw - delta;\n+\t} else {\n+\t\tns = ad->time_hw + delta;\n+\t}\n+\n+\treturn ns;\n+#else /* !RTE_ARCH_X86_64 */\n+\tRTE_SET_USED(ad);\n+\tRTE_SET_USED(flag);\n+\tRTE_SET_USED(in_timestamp);\n+\treturn 0;\n+#endif /* RTE_ARCH_X86_64 */\n+}\n+\n+#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S\t\t\t\t\\\n+\t(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))\n+\n+static inline uint64_t\n+idpf_splitq_rx_csum_offload(uint8_t err)\n+{\n+\tuint64_t flags = 0;\n+\n+\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))\n+\t\treturn flags;\n+\n+\tif (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {\n+\t\tflags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |\n+\t\t\t  RTE_MBUF_F_RX_L4_CKSUM_GOOD);\n+\t\treturn flags;\n+\t}\n+\n+\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n+\n+\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n+\n+\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;\n+\n+\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;\n+\n+\treturn flags;\n+}\n+\n+#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0\n+#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16\n+#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24\n+\n+static inline uint64_t\n+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,\n+\t\t\t   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)\n+{\n+\tuint8_t status_err0_qw0;\n+\tuint64_t flags = 0;\n+\n+\tstatus_err0_qw0 = rx_desc->status_err0_qw0;\n+\n+\tif ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {\n+\t\tflags |= RTE_MBUF_F_RX_RSS_HASH;\n+\t\tmb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<\n+\t\t\t\tIDPF_RX_FLEX_DESC_ADV_HASH1_S) |\n+\t\t\t((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<\n+\t\t\t IDPF_RX_FLEX_DESC_ADV_HASH2_S) |\n+\t\t\t((uint32_t)(rx_desc->hash3) <<\n+\t\t\t IDPF_RX_FLEX_DESC_ADV_HASH3_S);\n+\t}\n+\n+\treturn flags;\n+}\n+\n+static void\n+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)\n+{\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;\n+\tuint16_t nb_refill = rx_bufq->rx_free_thresh;\n+\tuint16_t nb_desc = rx_bufq->nb_rx_desc;\n+\tuint16_t next_avail = rx_bufq->rx_tail;\n+\tstruct rte_mbuf *nmb[rx_bufq->rx_free_thresh];\n+\tuint64_t dma_addr;\n+\tuint16_t delta;\n+\tint i;\n+\n+\tif (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)\n+\t\treturn;\n+\n+\trx_buf_ring = rx_bufq->rx_ring;\n+\tdelta = nb_desc - next_avail;\n+\tif (unlikely(delta < nb_refill)) {\n+\t\tif (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {\n+\t\t\tfor (i = 0; i < delta; i++) {\n+\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n+\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n+\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n+\t\t\t\trx_buf_desc->hdr_addr = 0;\n+\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n+\t\t\t}\n+\t\t\tnb_refill -= delta;\n+\t\t\tnext_avail = 0;\n+\t\t\trx_bufq->nb_rx_hold -= delta;\n+\t\t} else {\n+\t\t\trx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - next_avail;\n+\t\t\tRX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n+\t\t\t       rx_bufq->port_id, rx_bufq->queue_id);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\tif (nb_desc - next_avail >= nb_refill) {\n+\t\tif (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {\n+\t\t\tfor (i = 0; i < nb_refill; i++) {\n+\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n+\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n+\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n+\t\t\t\trx_buf_desc->hdr_addr = 0;\n+\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n+\t\t\t}\n+\t\t\tnext_avail += nb_refill;\n+\t\t\trx_bufq->nb_rx_hold -= nb_refill;\n+\t\t} else {\n+\t\t\trx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - next_avail;\n+\t\t\tRX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n+\t\t\t       rx_bufq->port_id, rx_bufq->queue_id);\n+\t\t}\n+\t}\n+\n+\tIDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);\n+\n+\trx_bufq->rx_tail = next_avail;\n+}\n+\n+uint16_t\n+idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t      uint16_t nb_pkts)\n+{\n+\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;\n+\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;\n+\tuint16_t pktlen_gen_bufq_id;\n+\tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n+\tuint8_t status_err0_qw1;\n+\tstruct idpf_adapter *ad;\n+\tstruct rte_mbuf *rxm;\n+\tuint16_t rx_id_bufq1;\n+\tuint16_t rx_id_bufq2;\n+\tuint64_t pkt_flags;\n+\tuint16_t pkt_len;\n+\tuint16_t bufq_id;\n+\tuint16_t gen_id;\n+\tuint16_t rx_id;\n+\tuint16_t nb_rx;\n+\tuint64_t ts_ns;\n+\n+\tnb_rx = 0;\n+\trxq = rx_queue;\n+\tad = rxq->adapter;\n+\n+\tif (unlikely(rxq == NULL) || unlikely(!rxq->q_started))\n+\t\treturn nb_rx;\n+\n+\trx_id = rxq->rx_tail;\n+\trx_id_bufq1 = rxq->bufq1->rx_next_avail;\n+\trx_id_bufq2 = rxq->bufq2->rx_next_avail;\n+\trx_desc_ring = rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n+\n+\tif ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)\n+\t\trxq->hw_register_set = 1;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trx_desc = &rx_desc_ring[rx_id];\n+\n+\t\tpktlen_gen_bufq_id =\n+\t\t\trte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);\n+\t\tgen_id = (pktlen_gen_bufq_id &\n+\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>\n+\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;\n+\t\tif (gen_id != rxq->expected_gen_id)\n+\t\t\tbreak;\n+\n+\t\tpkt_len = (pktlen_gen_bufq_id &\n+\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>\n+\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;\n+\t\tif (pkt_len == 0)\n+\t\t\tRX_LOG(ERR, \"Packet length is 0\");\n+\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc)) {\n+\t\t\trx_id = 0;\n+\t\t\trxq->expected_gen_id ^= 1;\n+\t\t}\n+\n+\t\tbufq_id = (pktlen_gen_bufq_id &\n+\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>\n+\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;\n+\t\tif (bufq_id == 0) {\n+\t\t\trxm = rxq->bufq1->sw_ring[rx_id_bufq1];\n+\t\t\trx_id_bufq1++;\n+\t\t\tif (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))\n+\t\t\t\trx_id_bufq1 = 0;\n+\t\t\trxq->bufq1->nb_rx_hold++;\n+\t\t} else {\n+\t\t\trxm = rxq->bufq2->sw_ring[rx_id_bufq2];\n+\t\t\trx_id_bufq2++;\n+\t\t\tif (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))\n+\t\t\t\trx_id_bufq2 = 0;\n+\t\t\trxq->bufq2->nb_rx_hold++;\n+\t\t}\n+\n+\t\trxm->pkt_len = pkt_len;\n+\t\trxm->data_len = pkt_len;\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->next = NULL;\n+\t\trxm->nb_segs = 1;\n+\t\trxm->port = rxq->port_id;\n+\t\trxm->ol_flags = 0;\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &\n+\t\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>\n+\t\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];\n+\n+\t\tstatus_err0_qw1 = rx_desc->status_err0_qw1;\n+\t\tpkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);\n+\t\tpkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);\n+\t\tif (idpf_timestamp_dynflag > 0 &&\n+\t\t    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {\n+\t\t\t/* timestamp */\n+\t\t\tts_ns = idpf_tstamp_convert_32b_64b(ad,\n+\t\t\t\t\t\t\t    rxq->hw_register_set,\n+\t\t\t\t\t\t\t    rte_le_to_cpu_32(rx_desc->ts_high));\n+\t\t\trxq->hw_register_set = 0;\n+\t\t\t*RTE_MBUF_DYNFIELD(rxm,\n+\t\t\t\t\t   idpf_timestamp_dynfield_offset,\n+\t\t\t\t\t   rte_mbuf_timestamp_t *) = ts_ns;\n+\t\t\trxm->ol_flags |= idpf_timestamp_dynflag;\n+\t\t}\n+\n+\t\trxm->ol_flags |= pkt_flags;\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\n+\tif (nb_rx > 0) {\n+\t\trxq->rx_tail = rx_id;\n+\t\tif (rx_id_bufq1 != rxq->bufq1->rx_next_avail)\n+\t\t\trxq->bufq1->rx_next_avail = rx_id_bufq1;\n+\t\tif (rx_id_bufq2 != rxq->bufq2->rx_next_avail)\n+\t\t\trxq->bufq2->rx_next_avail = rx_id_bufq2;\n+\n+\t\tidpf_split_rx_bufq_refill(rxq->bufq1);\n+\t\tidpf_split_rx_bufq_refill(rxq->bufq2);\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+static inline void\n+idpf_split_tx_free(struct idpf_tx_queue *cq)\n+{\n+\tvolatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;\n+\tvolatile struct idpf_splitq_tx_compl_desc *txd;\n+\tuint16_t next = cq->tx_tail;\n+\tstruct idpf_tx_entry *txe;\n+\tstruct idpf_tx_queue *txq;\n+\tuint16_t gen, qid, q_head;\n+\tuint16_t nb_desc_clean;\n+\tuint8_t ctype;\n+\n+\ttxd = &compl_ring[next];\n+\tgen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t       IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;\n+\tif (gen != cq->expected_gen_id)\n+\t\treturn;\n+\n+\tctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t\t IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;\n+\tqid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t       IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;\n+\tq_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);\n+\ttxq = cq->txqs[qid - cq->tx_start_qid];\n+\n+\tswitch (ctype) {\n+\tcase IDPF_TXD_COMPLT_RE:\n+\t\t/* clean to q_head which indicates be fetched txq desc id + 1.\n+\t\t * TODO: need to refine and remove the if condition.\n+\t\t */\n+\t\tif (unlikely(q_head % 32)) {\n+\t\t\tTX_LOG(ERR, \"unexpected desc (head = %u) completion.\",\n+\t\t\t       q_head);\n+\t\t\treturn;\n+\t\t}\n+\t\tif (txq->last_desc_cleaned > q_head)\n+\t\t\tnb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +\n+\t\t\t\tq_head;\n+\t\telse\n+\t\t\tnb_desc_clean = q_head - txq->last_desc_cleaned;\n+\t\ttxq->nb_free += nb_desc_clean;\n+\t\ttxq->last_desc_cleaned = q_head;\n+\t\tbreak;\n+\tcase IDPF_TXD_COMPLT_RS:\n+\t\t/* q_head indicates sw_id when ctype is 2 */\n+\t\ttxe = &txq->sw_ring[q_head];\n+\t\tif (txe->mbuf != NULL) {\n+\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = NULL;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tTX_LOG(ERR, \"unknown completion type.\");\n+\t\treturn;\n+\t}\n+\n+\tif (++next == cq->nb_tx_desc) {\n+\t\tnext = 0;\n+\t\tcq->expected_gen_id ^= 1;\n+\t}\n+\n+\tcq->tx_tail = next;\n+}\n+\n+/* Check if the context descriptor is needed for TX offloading */\n+static inline uint16_t\n+idpf_calc_context_desc(uint64_t flags)\n+{\n+\tif ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n+/* set TSO context descriptor\n+ */\n+static inline void\n+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,\n+\t\t\tunion idpf_tx_offload tx_offload,\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc)\n+{\n+\tuint16_t cmd_dtype;\n+\tuint32_t tso_len;\n+\tuint8_t hdr_len;\n+\n+\tif (tx_offload.l4_len == 0) {\n+\t\tTX_LOG(DEBUG, \"L4 length set to 0\");\n+\t\treturn;\n+\t}\n+\n+\thdr_len = tx_offload.l2_len +\n+\t\ttx_offload.l3_len +\n+\t\ttx_offload.l4_len;\n+\tcmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |\n+\t\tIDPF_TX_FLEX_CTX_DESC_CMD_TSO;\n+\ttso_len = mbuf->pkt_len - hdr_len;\n+\n+\tctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);\n+\tctx_desc->tso.qw0.hdr_len = hdr_len;\n+\tctx_desc->tso.qw0.mss_rt =\n+\t\trte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &\n+\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n+\tctx_desc->tso.qw0.flex_tlen =\n+\t\trte_cpu_to_le_32(tso_len &\n+\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n+}\n+\n+uint16_t\n+idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t      uint16_t nb_pkts)\n+{\n+\tstruct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;\n+\tvolatile struct idpf_flex_tx_sched_desc *txr;\n+\tvolatile struct idpf_flex_tx_sched_desc *txd;\n+\tstruct idpf_tx_entry *sw_ring;\n+\tunion idpf_tx_offload tx_offload = {0};\n+\tstruct idpf_tx_entry *txe, *txn;\n+\tuint16_t nb_used, tx_id, sw_id;\n+\tstruct rte_mbuf *tx_pkt;\n+\tuint16_t nb_to_clean;\n+\tuint16_t nb_tx = 0;\n+\tuint64_t ol_flags;\n+\tuint16_t nb_ctx;\n+\n+\tif (unlikely(txq == NULL) || unlikely(!txq->q_started))\n+\t\treturn nb_tx;\n+\n+\ttxr = txq->desc_ring;\n+\tsw_ring = txq->sw_ring;\n+\ttx_id = txq->tx_tail;\n+\tsw_id = txq->sw_tail;\n+\ttxe = &sw_ring[sw_id];\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttx_pkt = tx_pkts[nb_tx];\n+\n+\t\tif (txq->nb_free <= txq->free_thresh) {\n+\t\t\t/* TODO: Need to refine\n+\t\t\t * 1. free and clean: Better to decide a clean destination instead of\n+\t\t\t * loop times. And don't free mbuf when RS got immediately, free when\n+\t\t\t * transmit or according to the clean destination.\n+\t\t\t * Now, just ignore the RE write back, free mbuf when get RS\n+\t\t\t * 2. out-of-order rewrite back haven't be supported, SW head and HW head\n+\t\t\t * need to be separated.\n+\t\t\t **/\n+\t\t\tnb_to_clean = 2 * txq->rs_thresh;\n+\t\t\twhile (nb_to_clean--)\n+\t\t\t\tidpf_split_tx_free(txq->complq);\n+\t\t}\n+\n+\t\tif (txq->nb_free < tx_pkt->nb_segs)\n+\t\t\tbreak;\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\t\tnb_used = tx_pkt->nb_segs + nb_ctx;\n+\n+\t\t/* context descriptor */\n+\t\tif (nb_ctx != 0) {\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc =\n+\t\t\t\t(volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];\n+\n+\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_desc);\n+\n+\t\t\ttx_id++;\n+\t\t\tif (tx_id == txq->nb_tx_desc)\n+\t\t\t\ttx_id = 0;\n+\t\t}\n+\n+\t\tdo {\n+\t\t\ttxd = &txr[tx_id];\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\ttxe->mbuf = tx_pkt;\n+\n+\t\t\t/* Setup TX descriptor */\n+\t\t\ttxd->buf_addr =\n+\t\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));\n+\t\t\ttxd->qw1.cmd_dtype =\n+\t\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE);\n+\t\t\ttxd->qw1.rxr_bufsize = tx_pkt->data_len;\n+\t\t\ttxd->qw1.compl_tag = sw_id;\n+\t\t\ttx_id++;\n+\t\t\tif (tx_id == txq->nb_tx_desc)\n+\t\t\t\ttx_id = 0;\n+\t\t\tsw_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\ttx_pkt = tx_pkt->next;\n+\t\t} while (tx_pkt);\n+\n+\t\t/* fill the last descriptor with End of Packet (EOP) bit */\n+\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;\n+\n+\t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n+\t\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;\n+\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n+\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n+\n+\t\tif (txq->nb_used >= 32) {\n+\t\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;\n+\t\t\t/* Update txq RE bit counters */\n+\t\t\ttxq->nb_used = 0;\n+\t\t}\n+\t}\n+\n+\t/* update the tail pointer if any packets were processed */\n+\tif (likely(nb_tx > 0)) {\n+\t\tIDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n+\t\ttxq->tx_tail = tx_id;\n+\t\ttxq->sw_tail = sw_id;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n+#define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S\t\t\t\t\\\n+\t(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |\t\t\\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |\t\t\\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |\t\\\n+\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))\n+\n+/* Translate the rx descriptor status and error fields to pkt flags */\n+static inline uint64_t\n+idpf_rxd_to_pkt_flags(uint16_t status_error)\n+{\n+\tuint64_t flags = 0;\n+\n+\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)) == 0))\n+\t\treturn flags;\n+\n+\tif (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) {\n+\t\tflags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |\n+\t\t\t  RTE_MBUF_F_RX_L4_CKSUM_GOOD);\n+\t\treturn flags;\n+\t}\n+\n+\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n+\n+\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n+\n+\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;\n+\n+\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) != 0))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;\n+\n+\treturn flags;\n+}\n+\n+static inline void\n+idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,\n+\t\t    uint16_t rx_id)\n+{\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tRX_LOG(DEBUG,\n+\t\t       \"port_id=%u queue_id=%u rx_tail=%u nb_hold=%u\",\n+\t\t       rxq->port_id, rxq->queue_id, rx_id, nb_hold);\n+\t\trx_id = (uint16_t)((rx_id == 0) ?\n+\t\t\t\t   (rxq->nb_rx_desc - 1) : (rx_id - 1));\n+\t\tIDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+}\n+\n+uint16_t\n+idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tvolatile union virtchnl2_rx_desc *rx_ring;\n+\tvolatile union virtchnl2_rx_desc *rxdp;\n+\tunion virtchnl2_rx_desc rxd;\n+\tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n+\tuint16_t rx_id, nb_hold;\n+\tstruct idpf_adapter *ad;\n+\tuint16_t rx_packet_len;\n+\tstruct rte_mbuf *rxm;\n+\tstruct rte_mbuf *nmb;\n+\tuint16_t rx_status0;\n+\tuint64_t pkt_flags;\n+\tuint64_t dma_addr;\n+\tuint64_t ts_ns;\n+\tuint16_t nb_rx;\n+\n+\tnb_rx = 0;\n+\tnb_hold = 0;\n+\trxq = rx_queue;\n+\n+\tad = rxq->adapter;\n+\n+\tif (unlikely(rxq == NULL) || unlikely(!rxq->q_started))\n+\t\treturn nb_rx;\n+\n+\trx_id = rxq->rx_tail;\n+\trx_ring = rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n+\n+\tif ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)\n+\t\trxq->hw_register_set = 1;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trxdp = &rx_ring[rx_id];\n+\t\trx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);\n+\n+\t\t/* Check the DD bit first */\n+\t\tif ((rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)) == 0)\n+\t\t\tbreak;\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(nmb == NULL)) {\n+\t\t\trxq->rx_stats.mbuf_alloc_failed++;\n+\t\t\tRX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u \"\n+\t\t\t       \"queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tbreak;\n+\t\t}\n+\t\trxd = *rxdp; /* copy descriptor in ring to temp variable*/\n+\n+\t\tnb_hold++;\n+\t\trxm = rxq->sw_ring[rx_id];\n+\t\trxq->sw_ring[rx_id] = nmb;\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf */\n+\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n+\n+\t\t/* When next RX descriptor is on a cache line boundary,\n+\t\t * prefetch the next 4 RX descriptors and next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_prefetch0(&rx_ring[rx_id]);\n+\t\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n+\t\t}\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr = dma_addr;\n+\n+\t\trx_packet_len = (rte_cpu_to_le_16(rxd.flex_nic_wb.pkt_len) &\n+\t\t\t\t VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M);\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));\n+\t\trxm->nb_segs = 1;\n+\t\trxm->next = NULL;\n+\t\trxm->pkt_len = rx_packet_len;\n+\t\trxm->data_len = rx_packet_len;\n+\t\trxm->port = rxq->port_id;\n+\t\trxm->ol_flags = 0;\n+\t\tpkt_flags = idpf_rxd_to_pkt_flags(rx_status0);\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &\n+\t\t\t\t\t    VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];\n+\n+\t\trxm->ol_flags |= pkt_flags;\n+\n+\t\tif (idpf_timestamp_dynflag > 0 &&\n+\t\t    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {\n+\t\t\t/* timestamp */\n+\t\t\tts_ns = idpf_tstamp_convert_32b_64b(ad,\n+\t\t\t\t\t    rxq->hw_register_set,\n+\t\t\t\t\t    rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));\n+\t\t\trxq->hw_register_set = 0;\n+\t\t\t*RTE_MBUF_DYNFIELD(rxm,\n+\t\t\t\t\t   idpf_timestamp_dynfield_offset,\n+\t\t\t\t\t   rte_mbuf_timestamp_t *) = ts_ns;\n+\t\t\trxm->ol_flags |= idpf_timestamp_dynflag;\n+\t\t}\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\tidpf_update_rx_tail(rxq, nb_hold, rx_id);\n+\n+\treturn nb_rx;\n+}\n+\n+static inline int\n+idpf_xmit_cleanup(struct idpf_tx_queue *txq)\n+{\n+\tuint16_t last_desc_cleaned = txq->last_desc_cleaned;\n+\tstruct idpf_tx_entry *sw_ring = txq->sw_ring;\n+\tuint16_t nb_tx_desc = txq->nb_tx_desc;\n+\tuint16_t desc_to_clean_to;\n+\tuint16_t nb_tx_to_clean;\n+\tuint16_t i;\n+\n+\tvolatile struct idpf_flex_tx_desc *txd = txq->tx_ring;\n+\n+\tdesc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);\n+\tif (desc_to_clean_to >= nb_tx_desc)\n+\t\tdesc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);\n+\n+\tdesc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n+\t/* In the writeback Tx desccriptor, the only significant fields are the 4-bit DTYPE */\n+\tif ((txd[desc_to_clean_to].qw1.cmd_dtype &\n+\t     rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=\n+\t    rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {\n+\t\tTX_LOG(DEBUG, \"TX descriptor %4u is not done \"\n+\t\t       \"(port=%d queue=%d)\", desc_to_clean_to,\n+\t\t       txq->port_id, txq->queue_id);\n+\t\treturn -1;\n+\t}\n+\n+\tif (last_desc_cleaned > desc_to_clean_to)\n+\t\tnb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +\n+\t\t\t\t\t    desc_to_clean_to);\n+\telse\n+\t\tnb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n+\t\t\t\t\t    last_desc_cleaned);\n+\n+\ttxd[desc_to_clean_to].qw1.cmd_dtype = 0;\n+\ttxd[desc_to_clean_to].qw1.buf_size = 0;\n+\tfor (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)\n+\t\ttxd[desc_to_clean_to].qw1.flex.raw[i] = 0;\n+\n+\ttxq->last_desc_cleaned = desc_to_clean_to;\n+\ttxq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);\n+\n+\treturn 0;\n+}\n+\n+/* TX function */\n+uint16_t\n+idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tvolatile struct idpf_flex_tx_desc *txd;\n+\tvolatile struct idpf_flex_tx_desc *txr;\n+\tunion idpf_tx_offload tx_offload = {0};\n+\tstruct idpf_tx_entry *txe, *txn;\n+\tstruct idpf_tx_entry *sw_ring;\n+\tstruct idpf_tx_queue *txq;\n+\tstruct rte_mbuf *tx_pkt;\n+\tstruct rte_mbuf *m_seg;\n+\tuint64_t buf_dma_addr;\n+\tuint64_t ol_flags;\n+\tuint16_t tx_last;\n+\tuint16_t nb_used;\n+\tuint16_t nb_ctx;\n+\tuint16_t td_cmd;\n+\tuint16_t tx_id;\n+\tuint16_t nb_tx;\n+\tuint16_t slen;\n+\n+\tnb_tx = 0;\n+\ttxq = tx_queue;\n+\n+\tif (unlikely(txq == NULL) || unlikely(!txq->q_started))\n+\t\treturn nb_tx;\n+\n+\tsw_ring = txq->sw_ring;\n+\ttxr = txq->tx_ring;\n+\ttx_id = txq->tx_tail;\n+\ttxe = &sw_ring[tx_id];\n+\n+\t/* Check if the descriptor ring needs to be cleaned. */\n+\tif (txq->nb_free < txq->free_thresh)\n+\t\t(void)idpf_xmit_cleanup(txq);\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttd_cmd = 0;\n+\n+\t\ttx_pkt = *tx_pkts++;\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\n+\t\t/* The number of descriptors that must be allocated for\n+\t\t * a packet equals to the number of the segments of that\n+\t\t * packet plus 1 context descriptor if needed.\n+\t\t */\n+\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n+\t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n+\n+\t\t/* Circular ring */\n+\t\tif (tx_last >= txq->nb_tx_desc)\n+\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\n+\t\tTX_LOG(DEBUG, \"port_id=%u queue_id=%u\"\n+\t\t       \" tx_first=%u tx_last=%u\",\n+\t\t       txq->port_id, txq->queue_id, tx_id, tx_last);\n+\n+\t\tif (nb_used > txq->nb_free) {\n+\t\t\tif (idpf_xmit_cleanup(txq) != 0) {\n+\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\treturn 0;\n+\t\t\t\tgoto end_of_tx;\n+\t\t\t}\n+\t\t\tif (unlikely(nb_used > txq->rs_thresh)) {\n+\t\t\t\twhile (nb_used > txq->nb_free) {\n+\t\t\t\t\tif (idpf_xmit_cleanup(txq) != 0) {\n+\t\t\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\t\t\treturn 0;\n+\t\t\t\t\t\tgoto end_of_tx;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (nb_ctx != 0) {\n+\t\t\t/* Setup TX context descriptor if required */\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_txd =\n+\t\t\t\t(volatile union idpf_flex_tx_ctx_desc *)\n+\t\t\t\t&txr[tx_id];\n+\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\t\t\tif (txe->mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t}\n+\n+\t\t\t/* TSO enabled */\n+\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_txd);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t}\n+\n+\t\tm_seg = tx_pkt;\n+\t\tdo {\n+\t\t\ttxd = &txr[tx_id];\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\n+\t\t\tif (txe->mbuf != NULL)\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = m_seg;\n+\n+\t\t\t/* Setup TX Descriptor */\n+\t\t\tslen = m_seg->data_len;\n+\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n+\t\t\ttxd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);\n+\t\t\ttxd->qw1.buf_size = slen;\n+\t\t\ttxd->qw1.cmd_dtype = rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA <<\n+\t\t\t\t\t\t\t      IDPF_FLEX_TXD_QW1_DTYPE_S);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\tm_seg = m_seg->next;\n+\t\t} while (m_seg);\n+\n+\t\t/* The last packet data descriptor needs End Of Packet (EOP) */\n+\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_EOP;\n+\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n+\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n+\n+\t\tif (txq->nb_used >= txq->rs_thresh) {\n+\t\t\tTX_LOG(DEBUG, \"Setting RS bit on TXD id=\"\n+\t\t\t       \"%4u (port=%d queue=%d)\",\n+\t\t\t       tx_last, txq->port_id, txq->queue_id);\n+\n+\t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_RS;\n+\n+\t\t\t/* Update txq RS bit counters */\n+\t\t\ttxq->nb_used = 0;\n+\t\t}\n+\n+\t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n+\t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;\n+\n+\t\ttxd->qw1.cmd_dtype |= rte_cpu_to_le_16(td_cmd << IDPF_FLEX_TXD_QW1_CMD_S);\n+\t}\n+\n+end_of_tx:\n+\trte_wmb();\n+\n+\tTX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n+\t       txq->port_id, txq->queue_id, tx_id, nb_tx);\n+\n+\tIDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n+\ttxq->tx_tail = tx_id;\n+\n+\treturn nb_tx;\n+}\n+\n+/* TX prep functions */\n+uint16_t\n+idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t       uint16_t nb_pkts)\n+{\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\tint ret;\n+#endif\n+\tint i;\n+\tuint64_t ol_flags;\n+\tstruct rte_mbuf *m;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tm = tx_pkts[i];\n+\t\tol_flags = m->ol_flags;\n+\n+\t\t/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */\n+\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {\n+\t\t\tif (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {\n+\t\t\t\trte_errno = EINVAL;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\t\t} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||\n+\t\t\t   (m->tso_segsz > IDPF_MAX_TSO_MSS) ||\n+\t\t\t   (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {\n+\t\t\t/* MSS outside the range are considered malicious */\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+\t\tif ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn i;\n+\t\t}\n+\n+\t\tif (m->pkt_len < IDPF_MIN_FRAME_SIZE) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n+\t}\n+\n+\treturn i;\n+}\ndiff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h\nindex 9abf321519..2da2a6dc49 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.h\n+++ b/drivers/common/idpf/idpf_common_rxtx.h\n@@ -27,6 +27,61 @@\n #define IDPF_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)\n #define IDPF_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)\n \n+#define IDPF_TX_MAX_MTU_SEG\t10\n+\n+#define IDPF_MIN_TSO_MSS\t88\n+#define IDPF_MAX_TSO_MSS\t9728\n+#define IDPF_MAX_TSO_FRAME_SIZE\t262143\n+#define IDPF_TX_MAX_MTU_SEG     10\n+\n+#define IDPF_TX_CKSUM_OFFLOAD_MASK (\t\t\\\n+\t\tRTE_MBUF_F_TX_IP_CKSUM |\t\\\n+\t\tRTE_MBUF_F_TX_L4_MASK |\t\t\\\n+\t\tRTE_MBUF_F_TX_TCP_SEG)\n+\n+#define IDPF_TX_OFFLOAD_MASK (\t\t\t\\\n+\t\tIDPF_TX_CKSUM_OFFLOAD_MASK |\t\\\n+\t\tRTE_MBUF_F_TX_IPV4 |\t\t\\\n+\t\tRTE_MBUF_F_TX_IPV6)\n+\n+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \\\n+\t\t(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)\n+\n+/* MTS */\n+#define GLTSYN_CMD_SYNC_0_0\t(PF_TIMESYNC_BASE + 0x0)\n+#define PF_GLTSYN_SHTIME_0_0\t(PF_TIMESYNC_BASE + 0x4)\n+#define PF_GLTSYN_SHTIME_L_0\t(PF_TIMESYNC_BASE + 0x8)\n+#define PF_GLTSYN_SHTIME_H_0\t(PF_TIMESYNC_BASE + 0xC)\n+#define GLTSYN_ART_L_0\t\t(PF_TIMESYNC_BASE + 0x10)\n+#define GLTSYN_ART_H_0\t\t(PF_TIMESYNC_BASE + 0x14)\n+#define PF_GLTSYN_SHTIME_0_1\t(PF_TIMESYNC_BASE + 0x24)\n+#define PF_GLTSYN_SHTIME_L_1\t(PF_TIMESYNC_BASE + 0x28)\n+#define PF_GLTSYN_SHTIME_H_1\t(PF_TIMESYNC_BASE + 0x2C)\n+#define PF_GLTSYN_SHTIME_0_2\t(PF_TIMESYNC_BASE + 0x44)\n+#define PF_GLTSYN_SHTIME_L_2\t(PF_TIMESYNC_BASE + 0x48)\n+#define PF_GLTSYN_SHTIME_H_2\t(PF_TIMESYNC_BASE + 0x4C)\n+#define PF_GLTSYN_SHTIME_0_3\t(PF_TIMESYNC_BASE + 0x64)\n+#define PF_GLTSYN_SHTIME_L_3\t(PF_TIMESYNC_BASE + 0x68)\n+#define PF_GLTSYN_SHTIME_H_3\t(PF_TIMESYNC_BASE + 0x6C)\n+\n+#define PF_TIMESYNC_BAR4_BASE\t0x0E400000\n+#define GLTSYN_ENA\t\t(PF_TIMESYNC_BAR4_BASE + 0x90)\n+#define GLTSYN_CMD\t\t(PF_TIMESYNC_BAR4_BASE + 0x94)\n+#define GLTSYC_TIME_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x104)\n+#define GLTSYC_TIME_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x108)\n+\n+#define GLTSYN_CMD_SYNC_0_4\t(PF_TIMESYNC_BAR4_BASE + 0x110)\n+#define PF_GLTSYN_SHTIME_L_4\t(PF_TIMESYNC_BAR4_BASE + 0x118)\n+#define PF_GLTSYN_SHTIME_H_4\t(PF_TIMESYNC_BAR4_BASE + 0x11C)\n+#define GLTSYN_INCVAL_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x150)\n+#define GLTSYN_INCVAL_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x154)\n+#define GLTSYN_SHADJ_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x158)\n+#define GLTSYN_SHADJ_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x15C)\n+\n+#define GLTSYN_CMD_SYNC_0_5\t(PF_TIMESYNC_BAR4_BASE + 0x130)\n+#define PF_GLTSYN_SHTIME_L_5\t(PF_TIMESYNC_BAR4_BASE + 0x138)\n+#define PF_GLTSYN_SHTIME_H_5\t(PF_TIMESYNC_BAR4_BASE + 0x13C)\n+\n struct idpf_rx_stats {\n \tuint64_t mbuf_alloc_failed;\n };\n@@ -126,6 +181,18 @@ struct idpf_tx_queue {\n \tstruct idpf_tx_queue *complq;\n };\n \n+/* Offload features */\n+union idpf_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l2_len:7; /* L2 (MAC) Header Length. */\n+\t\tuint64_t l3_len:9; /* L3 (IP) Header Length. */\n+\t\tuint64_t l4_len:8; /* L4 Header Length. */\n+\t\tuint64_t tso_segsz:16; /* TCP TSO segment size */\n+\t\t/* uint64_t unused : 24; */\n+\t};\n+};\n+\n struct idpf_rxq_ops {\n \tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n };\n@@ -134,6 +201,9 @@ struct idpf_txq_ops {\n \tvoid (*release_mbufs)(struct idpf_tx_queue *txq);\n };\n \n+extern int idpf_timestamp_dynfield_offset;\n+extern uint64_t idpf_timestamp_dynflag;\n+\n __rte_internal\n int check_rx_thresh(uint16_t nb_desc, uint16_t thresh);\n __rte_internal\n@@ -162,8 +232,25 @@ void idpf_rx_queue_release(void *rxq);\n __rte_internal\n void idpf_tx_queue_release(void *txq);\n __rte_internal\n+int idpf_register_ts_mbuf(struct idpf_rx_queue *rxq);\n+__rte_internal\n int idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq);\n __rte_internal\n int idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq);\n+__rte_internal\n+uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t       uint16_t nb_pkts);\n+__rte_internal\n+uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t       uint16_t nb_pkts);\n+__rte_internal\n+uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t\tuint16_t nb_pkts);\n+__rte_internal\n+uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\tuint16_t nb_pkts);\n+__rte_internal\n+uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\tuint16_t nb_pkts);\n \n #endif /* _IDPF_COMMON_RXTX_H_ */\ndiff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map\nindex 648f94bf16..bc2a069735 100644\n--- a/drivers/common/idpf/version.map\n+++ b/drivers/common/idpf/version.map\n@@ -35,8 +35,14 @@ INTERNAL {\n \treset_single_tx_queue;\n \tidpf_rx_queue_release;\n \tidpf_tx_queue_release;\n+\tidpf_register_ts_mbuf;\n \tidpf_alloc_single_rxq_mbufs;\n \tidpf_alloc_split_rxq_mbufs;\n+\tidpf_splitq_recv_pkts;\n+\tidpf_splitq_xmit_pkts;\n+\tidpf_singleq_recv_pkts;\n+\tidpf_singleq_xmit_pkts;\n+\tidpf_prep_pkts;\n \n \tlocal: *;\n };\ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 06625252b6..a70ae65558 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -22,8 +22,6 @@ rte_spinlock_t idpf_adapter_lock;\n struct idpf_adapter_list idpf_adapter_list;\n bool idpf_adapter_list_init;\n \n-uint64_t idpf_timestamp_dynflag;\n-\n static const char * const idpf_valid_args[] = {\n \tIDPF_TX_SINGLE_Q,\n \tIDPF_RX_SINGLE_Q,\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex 6d4738f6fe..133589cf98 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -28,7 +28,6 @@\n \n #define IDPF_MIN_BUF_SIZE\t1024\n #define IDPF_MAX_FRAME_SIZE\t9728\n-#define IDPF_MIN_FRAME_SIZE\t14\n \n #define IDPF_NUM_MACADDR_MAX\t64\n \n@@ -77,9 +76,6 @@ struct idpf_adapter_ext {\n \tuint16_t cur_vport_nb;\n \n \tuint16_t used_vecs_num;\n-\n-\t/* For PTP */\n-\tuint64_t time_hw;\n };\n \n TAILQ_HEAD(idpf_adapter_list, idpf_adapter_ext);\ndiff --git a/drivers/net/idpf/idpf_logs.h b/drivers/net/idpf/idpf_logs.h\nindex d5f778fefe..bf0774b8e4 100644\n--- a/drivers/net/idpf/idpf_logs.h\n+++ b/drivers/net/idpf/idpf_logs.h\n@@ -29,28 +29,4 @@ extern int idpf_logtype_driver;\n #define PMD_DRV_LOG(level, fmt, args...) \\\n \tPMD_DRV_LOG_RAW(level, fmt \"\\n\", ## args)\n \n-#ifdef RTE_LIBRTE_IDPF_DEBUG_RX\n-#define PMD_RX_LOG(level, ...) \\\n-\tRTE_LOG(level, \\\n-\t\tPMD, \\\n-\t\tRTE_FMT(\"%s(): \" \\\n-\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n-\t\t\t__func__, \\\n-\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n-#else\n-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n-#endif\n-\n-#ifdef RTE_LIBRTE_IDPF_DEBUG_TX\n-#define PMD_TX_LOG(level, ...) \\\n-\tRTE_LOG(level, \\\n-\t\tPMD, \\\n-\t\tRTE_FMT(\"%s(): \" \\\n-\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n-\t\t\t__func__, \\\n-\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n-#else\n-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n-#endif\n-\n #endif /* _IDPF_LOGS_H_ */\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 6c693f4c3a..fbf2a8f0cd 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -10,7 +10,8 @@\n #include \"idpf_rxtx.h\"\n #include \"idpf_rxtx_vec_common.h\"\n \n-static int idpf_timestamp_dynfield_offset = -1;\n+int idpf_timestamp_dynfield_offset;\n+uint64_t idpf_timestamp_dynflag;\n \n static uint64_t\n idpf_rx_offload_convert(uint64_t offload)\n@@ -501,23 +502,6 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \treturn ret;\n }\n \n-static int\n-idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)\n-{\n-\tint err;\n-\tif ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0) {\n-\t\t/* Register mbuf field and flag for Rx timestamp */\n-\t\terr = rte_mbuf_dyn_rx_timestamp_register(&idpf_timestamp_dynfield_offset,\n-\t\t\t\t\t\t\t &idpf_timestamp_dynflag);\n-\t\tif (err != 0) {\n-\t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t    \"Cannot register mbuf field/flag for timestamp\");\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n int\n idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n@@ -762,922 +746,6 @@ idpf_stop_queues(struct rte_eth_dev *dev)\n \t}\n }\n \n-#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S\t\t\t\t\\\n-\t(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |     \\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |     \\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |    \\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))\n-\n-static inline uint64_t\n-idpf_splitq_rx_csum_offload(uint8_t err)\n-{\n-\tuint64_t flags = 0;\n-\n-\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))\n-\t\treturn flags;\n-\n-\tif (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {\n-\t\tflags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |\n-\t\t\t  RTE_MBUF_F_RX_L4_CKSUM_GOOD);\n-\t\treturn flags;\n-\t}\n-\n-\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n-\n-\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n-\n-\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;\n-\n-\tif (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;\n-\n-\treturn flags;\n-}\n-\n-#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0\n-#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16\n-#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24\n-\n-static inline uint64_t\n-idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,\n-\t\t\t   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)\n-{\n-\tuint8_t status_err0_qw0;\n-\tuint64_t flags = 0;\n-\n-\tstatus_err0_qw0 = rx_desc->status_err0_qw0;\n-\n-\tif ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {\n-\t\tflags |= RTE_MBUF_F_RX_RSS_HASH;\n-\t\tmb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<\n-\t\t\t\tIDPF_RX_FLEX_DESC_ADV_HASH1_S) |\n-\t\t\t((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<\n-\t\t\t IDPF_RX_FLEX_DESC_ADV_HASH2_S) |\n-\t\t\t((uint32_t)(rx_desc->hash3) <<\n-\t\t\t IDPF_RX_FLEX_DESC_ADV_HASH3_S);\n-\t}\n-\n-\treturn flags;\n-}\n-\n-static void\n-idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)\n-{\n-\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;\n-\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;\n-\tuint16_t nb_refill = rx_bufq->rx_free_thresh;\n-\tuint16_t nb_desc = rx_bufq->nb_rx_desc;\n-\tuint16_t next_avail = rx_bufq->rx_tail;\n-\tstruct rte_mbuf *nmb[rx_bufq->rx_free_thresh];\n-\tstruct rte_eth_dev *dev;\n-\tuint64_t dma_addr;\n-\tuint16_t delta;\n-\tint i;\n-\n-\tif (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)\n-\t\treturn;\n-\n-\trx_buf_ring = rx_bufq->rx_ring;\n-\tdelta = nb_desc - next_avail;\n-\tif (unlikely(delta < nb_refill)) {\n-\t\tif (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {\n-\t\t\tfor (i = 0; i < delta; i++) {\n-\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n-\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n-\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n-\t\t\t\trx_buf_desc->hdr_addr = 0;\n-\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n-\t\t\t}\n-\t\t\tnb_refill -= delta;\n-\t\t\tnext_avail = 0;\n-\t\t\trx_bufq->nb_rx_hold -= delta;\n-\t\t} else {\n-\t\t\tdev = &rte_eth_devices[rx_bufq->port_id];\n-\t\t\tdev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;\n-\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n-\t\t\t\t   rx_bufq->port_id, rx_bufq->queue_id);\n-\t\t\treturn;\n-\t\t}\n-\t}\n-\n-\tif (nb_desc - next_avail >= nb_refill) {\n-\t\tif (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {\n-\t\t\tfor (i = 0; i < nb_refill; i++) {\n-\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n-\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n-\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n-\t\t\t\trx_buf_desc->hdr_addr = 0;\n-\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n-\t\t\t}\n-\t\t\tnext_avail += nb_refill;\n-\t\t\trx_bufq->nb_rx_hold -= nb_refill;\n-\t\t} else {\n-\t\t\tdev = &rte_eth_devices[rx_bufq->port_id];\n-\t\t\tdev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;\n-\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n-\t\t\t\t   rx_bufq->port_id, rx_bufq->queue_id);\n-\t\t}\n-\t}\n-\n-\tIDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);\n-\n-\trx_bufq->rx_tail = next_avail;\n-}\n-\n-uint16_t\n-idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\t      uint16_t nb_pkts)\n-{\n-\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;\n-\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;\n-\tuint16_t pktlen_gen_bufq_id;\n-\tstruct idpf_rx_queue *rxq;\n-\tconst uint32_t *ptype_tbl;\n-\tuint8_t status_err0_qw1;\n-\tstruct idpf_adapter_ext *ad;\n-\tstruct rte_mbuf *rxm;\n-\tuint16_t rx_id_bufq1;\n-\tuint16_t rx_id_bufq2;\n-\tuint64_t pkt_flags;\n-\tuint16_t pkt_len;\n-\tuint16_t bufq_id;\n-\tuint16_t gen_id;\n-\tuint16_t rx_id;\n-\tuint16_t nb_rx;\n-\tuint64_t ts_ns;\n-\n-\tnb_rx = 0;\n-\trxq = rx_queue;\n-\tad = IDPF_ADAPTER_TO_EXT(rxq->adapter);\n-\n-\tif (unlikely(rxq == NULL) || unlikely(!rxq->q_started))\n-\t\treturn nb_rx;\n-\n-\trx_id = rxq->rx_tail;\n-\trx_id_bufq1 = rxq->bufq1->rx_next_avail;\n-\trx_id_bufq2 = rxq->bufq2->rx_next_avail;\n-\trx_desc_ring = rxq->rx_ring;\n-\tptype_tbl = rxq->adapter->ptype_tbl;\n-\n-\tif ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)\n-\t\trxq->hw_register_set = 1;\n-\n-\twhile (nb_rx < nb_pkts) {\n-\t\trx_desc = &rx_desc_ring[rx_id];\n-\n-\t\tpktlen_gen_bufq_id =\n-\t\t\trte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);\n-\t\tgen_id = (pktlen_gen_bufq_id &\n-\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>\n-\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;\n-\t\tif (gen_id != rxq->expected_gen_id)\n-\t\t\tbreak;\n-\n-\t\tpkt_len = (pktlen_gen_bufq_id &\n-\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>\n-\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;\n-\t\tif (pkt_len == 0)\n-\t\t\tPMD_RX_LOG(ERR, \"Packet length is 0\");\n-\n-\t\trx_id++;\n-\t\tif (unlikely(rx_id == rxq->nb_rx_desc)) {\n-\t\t\trx_id = 0;\n-\t\t\trxq->expected_gen_id ^= 1;\n-\t\t}\n-\n-\t\tbufq_id = (pktlen_gen_bufq_id &\n-\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>\n-\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;\n-\t\tif (bufq_id == 0) {\n-\t\t\trxm = rxq->bufq1->sw_ring[rx_id_bufq1];\n-\t\t\trx_id_bufq1++;\n-\t\t\tif (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))\n-\t\t\t\trx_id_bufq1 = 0;\n-\t\t\trxq->bufq1->nb_rx_hold++;\n-\t\t} else {\n-\t\t\trxm = rxq->bufq2->sw_ring[rx_id_bufq2];\n-\t\t\trx_id_bufq2++;\n-\t\t\tif (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))\n-\t\t\t\trx_id_bufq2 = 0;\n-\t\t\trxq->bufq2->nb_rx_hold++;\n-\t\t}\n-\n-\t\trxm->pkt_len = pkt_len;\n-\t\trxm->data_len = pkt_len;\n-\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\trxm->next = NULL;\n-\t\trxm->nb_segs = 1;\n-\t\trxm->port = rxq->port_id;\n-\t\trxm->ol_flags = 0;\n-\t\trxm->packet_type =\n-\t\t\tptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &\n-\t\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>\n-\t\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];\n-\n-\t\tstatus_err0_qw1 = rx_desc->status_err0_qw1;\n-\t\tpkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);\n-\t\tpkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);\n-\t\tif (idpf_timestamp_dynflag > 0 &&\n-\t\t    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {\n-\t\t\t/* timestamp */\n-\t\t\tts_ns = idpf_tstamp_convert_32b_64b(ad,\n-\t\t\t\trxq->hw_register_set,\n-\t\t\t\trte_le_to_cpu_32(rx_desc->ts_high));\n-\t\t\trxq->hw_register_set = 0;\n-\t\t\t*RTE_MBUF_DYNFIELD(rxm,\n-\t\t\t\t\t   idpf_timestamp_dynfield_offset,\n-\t\t\t\t\t   rte_mbuf_timestamp_t *) = ts_ns;\n-\t\t\trxm->ol_flags |= idpf_timestamp_dynflag;\n-\t\t}\n-\n-\t\trxm->ol_flags |= pkt_flags;\n-\n-\t\trx_pkts[nb_rx++] = rxm;\n-\t}\n-\n-\tif (nb_rx > 0) {\n-\t\trxq->rx_tail = rx_id;\n-\t\tif (rx_id_bufq1 != rxq->bufq1->rx_next_avail)\n-\t\t\trxq->bufq1->rx_next_avail = rx_id_bufq1;\n-\t\tif (rx_id_bufq2 != rxq->bufq2->rx_next_avail)\n-\t\t\trxq->bufq2->rx_next_avail = rx_id_bufq2;\n-\n-\t\tidpf_split_rx_bufq_refill(rxq->bufq1);\n-\t\tidpf_split_rx_bufq_refill(rxq->bufq2);\n-\t}\n-\n-\treturn nb_rx;\n-}\n-\n-static inline void\n-idpf_split_tx_free(struct idpf_tx_queue *cq)\n-{\n-\tvolatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;\n-\tvolatile struct idpf_splitq_tx_compl_desc *txd;\n-\tuint16_t next = cq->tx_tail;\n-\tstruct idpf_tx_entry *txe;\n-\tstruct idpf_tx_queue *txq;\n-\tuint16_t gen, qid, q_head;\n-\tuint16_t nb_desc_clean;\n-\tuint8_t ctype;\n-\n-\ttxd = &compl_ring[next];\n-\tgen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n-\t\tIDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;\n-\tif (gen != cq->expected_gen_id)\n-\t\treturn;\n-\n-\tctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n-\t\tIDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;\n-\tqid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n-\t\tIDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;\n-\tq_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);\n-\ttxq = cq->txqs[qid - cq->tx_start_qid];\n-\n-\tswitch (ctype) {\n-\tcase IDPF_TXD_COMPLT_RE:\n-\t\t/* clean to q_head which indicates be fetched txq desc id + 1.\n-\t\t * TODO: need to refine and remove the if condition.\n-\t\t */\n-\t\tif (unlikely(q_head % 32)) {\n-\t\t\tPMD_DRV_LOG(ERR, \"unexpected desc (head = %u) completion.\",\n-\t\t\t\t\t\tq_head);\n-\t\t\treturn;\n-\t\t}\n-\t\tif (txq->last_desc_cleaned > q_head)\n-\t\t\tnb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +\n-\t\t\t\tq_head;\n-\t\telse\n-\t\t\tnb_desc_clean = q_head - txq->last_desc_cleaned;\n-\t\ttxq->nb_free += nb_desc_clean;\n-\t\ttxq->last_desc_cleaned = q_head;\n-\t\tbreak;\n-\tcase IDPF_TXD_COMPLT_RS:\n-\t\t/* q_head indicates sw_id when ctype is 2 */\n-\t\ttxe = &txq->sw_ring[q_head];\n-\t\tif (txe->mbuf != NULL) {\n-\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n-\t\t\ttxe->mbuf = NULL;\n-\t\t}\n-\t\tbreak;\n-\tdefault:\n-\t\tPMD_DRV_LOG(ERR, \"unknown completion type.\");\n-\t\treturn;\n-\t}\n-\n-\tif (++next == cq->nb_tx_desc) {\n-\t\tnext = 0;\n-\t\tcq->expected_gen_id ^= 1;\n-\t}\n-\n-\tcq->tx_tail = next;\n-}\n-\n-/* Check if the context descriptor is needed for TX offloading */\n-static inline uint16_t\n-idpf_calc_context_desc(uint64_t flags)\n-{\n-\tif ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n-\t\treturn 1;\n-\n-\treturn 0;\n-}\n-\n-/* set TSO context descriptor\n- */\n-static inline void\n-idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,\n-\t\t\tunion idpf_tx_offload tx_offload,\n-\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc)\n-{\n-\tuint16_t cmd_dtype;\n-\tuint32_t tso_len;\n-\tuint8_t hdr_len;\n-\n-\tif (tx_offload.l4_len == 0) {\n-\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n-\t\treturn;\n-\t}\n-\n-\thdr_len = tx_offload.l2_len +\n-\t\ttx_offload.l3_len +\n-\t\ttx_offload.l4_len;\n-\tcmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |\n-\t\tIDPF_TX_FLEX_CTX_DESC_CMD_TSO;\n-\ttso_len = mbuf->pkt_len - hdr_len;\n-\n-\tctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);\n-\tctx_desc->tso.qw0.hdr_len = hdr_len;\n-\tctx_desc->tso.qw0.mss_rt =\n-\t\trte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &\n-\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n-\tctx_desc->tso.qw0.flex_tlen =\n-\t\trte_cpu_to_le_32(tso_len &\n-\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n-}\n-\n-uint16_t\n-idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t      uint16_t nb_pkts)\n-{\n-\tstruct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;\n-\tvolatile struct idpf_flex_tx_sched_desc *txr;\n-\tvolatile struct idpf_flex_tx_sched_desc *txd;\n-\tstruct idpf_tx_entry *sw_ring;\n-\tunion idpf_tx_offload tx_offload = {0};\n-\tstruct idpf_tx_entry *txe, *txn;\n-\tuint16_t nb_used, tx_id, sw_id;\n-\tstruct rte_mbuf *tx_pkt;\n-\tuint16_t nb_to_clean;\n-\tuint16_t nb_tx = 0;\n-\tuint64_t ol_flags;\n-\tuint16_t nb_ctx;\n-\n-\tif (unlikely(txq == NULL) || unlikely(!txq->q_started))\n-\t\treturn nb_tx;\n-\n-\ttxr = txq->desc_ring;\n-\tsw_ring = txq->sw_ring;\n-\ttx_id = txq->tx_tail;\n-\tsw_id = txq->sw_tail;\n-\ttxe = &sw_ring[sw_id];\n-\n-\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n-\t\ttx_pkt = tx_pkts[nb_tx];\n-\n-\t\tif (txq->nb_free <= txq->free_thresh) {\n-\t\t\t/* TODO: Need to refine\n-\t\t\t * 1. free and clean: Better to decide a clean destination instead of\n-\t\t\t * loop times. And don't free mbuf when RS got immediately, free when\n-\t\t\t * transmit or according to the clean destination.\n-\t\t\t * Now, just ignore the RE write back, free mbuf when get RS\n-\t\t\t * 2. out-of-order rewrite back haven't be supported, SW head and HW head\n-\t\t\t * need to be separated.\n-\t\t\t **/\n-\t\t\tnb_to_clean = 2 * txq->rs_thresh;\n-\t\t\twhile (nb_to_clean--)\n-\t\t\t\tidpf_split_tx_free(txq->complq);\n-\t\t}\n-\n-\t\tif (txq->nb_free < tx_pkt->nb_segs)\n-\t\t\tbreak;\n-\n-\t\tol_flags = tx_pkt->ol_flags;\n-\t\ttx_offload.l2_len = tx_pkt->l2_len;\n-\t\ttx_offload.l3_len = tx_pkt->l3_len;\n-\t\ttx_offload.l4_len = tx_pkt->l4_len;\n-\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n-\t\t/* Calculate the number of context descriptors needed. */\n-\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n-\t\tnb_used = tx_pkt->nb_segs + nb_ctx;\n-\n-\t\t/* context descriptor */\n-\t\tif (nb_ctx != 0) {\n-\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc =\n-\t\t\t(volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];\n-\n-\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n-\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n-\t\t\t\t\t\t\tctx_desc);\n-\n-\t\t\ttx_id++;\n-\t\t\tif (tx_id == txq->nb_tx_desc)\n-\t\t\t\ttx_id = 0;\n-\t\t}\n-\n-\t\tdo {\n-\t\t\ttxd = &txr[tx_id];\n-\t\t\ttxn = &sw_ring[txe->next_id];\n-\t\t\ttxe->mbuf = tx_pkt;\n-\n-\t\t\t/* Setup TX descriptor */\n-\t\t\ttxd->buf_addr =\n-\t\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));\n-\t\t\ttxd->qw1.cmd_dtype =\n-\t\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE);\n-\t\t\ttxd->qw1.rxr_bufsize = tx_pkt->data_len;\n-\t\t\ttxd->qw1.compl_tag = sw_id;\n-\t\t\ttx_id++;\n-\t\t\tif (tx_id == txq->nb_tx_desc)\n-\t\t\t\ttx_id = 0;\n-\t\t\tsw_id = txe->next_id;\n-\t\t\ttxe = txn;\n-\t\t\ttx_pkt = tx_pkt->next;\n-\t\t} while (tx_pkt);\n-\n-\t\t/* fill the last descriptor with End of Packet (EOP) bit */\n-\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;\n-\n-\t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n-\t\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;\n-\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n-\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n-\n-\t\tif (txq->nb_used >= 32) {\n-\t\t\ttxd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;\n-\t\t\t/* Update txq RE bit counters */\n-\t\t\ttxq->nb_used = 0;\n-\t\t}\n-\t}\n-\n-\t/* update the tail pointer if any packets were processed */\n-\tif (likely(nb_tx > 0)) {\n-\t\tIDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n-\t\ttxq->tx_tail = tx_id;\n-\t\ttxq->sw_tail = sw_id;\n-\t}\n-\n-\treturn nb_tx;\n-}\n-\n-#define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S\t\t\t\t\\\n-\t(RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |\t\t\\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |\t\t\\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |\t\\\n-\t RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))\n-\n-/* Translate the rx descriptor status and error fields to pkt flags */\n-static inline uint64_t\n-idpf_rxd_to_pkt_flags(uint16_t status_error)\n-{\n-\tuint64_t flags = 0;\n-\n-\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)) == 0))\n-\t\treturn flags;\n-\n-\tif (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) {\n-\t\tflags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |\n-\t\t\t  RTE_MBUF_F_RX_L4_CKSUM_GOOD);\n-\t\treturn flags;\n-\t}\n-\n-\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n-\n-\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n-\n-\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;\n-\n-\tif (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) != 0))\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;\n-\telse\n-\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;\n-\n-\treturn flags;\n-}\n-\n-static inline void\n-idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,\n-\t\t    uint16_t rx_id)\n-{\n-\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n-\n-\tif (nb_hold > rxq->rx_free_thresh) {\n-\t\tPMD_RX_LOG(DEBUG,\n-\t\t\t   \"port_id=%u queue_id=%u rx_tail=%u nb_hold=%u\",\n-\t\t\t   rxq->port_id, rxq->queue_id, rx_id, nb_hold);\n-\t\trx_id = (uint16_t)((rx_id == 0) ?\n-\t\t\t\t   (rxq->nb_rx_desc - 1) : (rx_id - 1));\n-\t\tIDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);\n-\t\tnb_hold = 0;\n-\t}\n-\trxq->nb_rx_hold = nb_hold;\n-}\n-\n-uint16_t\n-idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\t       uint16_t nb_pkts)\n-{\n-\tvolatile union virtchnl2_rx_desc *rx_ring;\n-\tvolatile union virtchnl2_rx_desc *rxdp;\n-\tunion virtchnl2_rx_desc rxd;\n-\tstruct idpf_rx_queue *rxq;\n-\tconst uint32_t *ptype_tbl;\n-\tuint16_t rx_id, nb_hold;\n-\tstruct rte_eth_dev *dev;\n-\tstruct idpf_adapter_ext *ad;\n-\tuint16_t rx_packet_len;\n-\tstruct rte_mbuf *rxm;\n-\tstruct rte_mbuf *nmb;\n-\tuint16_t rx_status0;\n-\tuint64_t pkt_flags;\n-\tuint64_t dma_addr;\n-\tuint64_t ts_ns;\n-\tuint16_t nb_rx;\n-\n-\tnb_rx = 0;\n-\tnb_hold = 0;\n-\trxq = rx_queue;\n-\n-\tad = IDPF_ADAPTER_TO_EXT(rxq->adapter);\n-\n-\tif (unlikely(rxq == NULL) || unlikely(!rxq->q_started))\n-\t\treturn nb_rx;\n-\n-\trx_id = rxq->rx_tail;\n-\trx_ring = rxq->rx_ring;\n-\tptype_tbl = rxq->adapter->ptype_tbl;\n-\n-\tif ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)\n-\t\trxq->hw_register_set = 1;\n-\n-\twhile (nb_rx < nb_pkts) {\n-\t\trxdp = &rx_ring[rx_id];\n-\t\trx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);\n-\n-\t\t/* Check the DD bit first */\n-\t\tif ((rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)) == 0)\n-\t\t\tbreak;\n-\n-\t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n-\t\tif (unlikely(nmb == NULL)) {\n-\t\t\tdev = &rte_eth_devices[rxq->port_id];\n-\t\t\tdev->data->rx_mbuf_alloc_failed++;\n-\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u \"\n-\t\t\t\t   \"queue_id=%u\", rxq->port_id, rxq->queue_id);\n-\t\t\tbreak;\n-\t\t}\n-\t\trxd = *rxdp; /* copy descriptor in ring to temp variable*/\n-\n-\t\tnb_hold++;\n-\t\trxm = rxq->sw_ring[rx_id];\n-\t\trxq->sw_ring[rx_id] = nmb;\n-\t\trx_id++;\n-\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n-\t\t\trx_id = 0;\n-\n-\t\t/* Prefetch next mbuf */\n-\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n-\n-\t\t/* When next RX descriptor is on a cache line boundary,\n-\t\t * prefetch the next 4 RX descriptors and next 8 pointers\n-\t\t * to mbufs.\n-\t\t */\n-\t\tif ((rx_id & 0x3) == 0) {\n-\t\t\trte_prefetch0(&rx_ring[rx_id]);\n-\t\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n-\t\t}\n-\t\tdma_addr =\n-\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n-\t\trxdp->read.hdr_addr = 0;\n-\t\trxdp->read.pkt_addr = dma_addr;\n-\n-\t\trx_packet_len = (rte_cpu_to_le_16(rxd.flex_nic_wb.pkt_len) &\n-\t\t\t\t VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M);\n-\n-\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\trte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));\n-\t\trxm->nb_segs = 1;\n-\t\trxm->next = NULL;\n-\t\trxm->pkt_len = rx_packet_len;\n-\t\trxm->data_len = rx_packet_len;\n-\t\trxm->port = rxq->port_id;\n-\t\trxm->ol_flags = 0;\n-\t\tpkt_flags = idpf_rxd_to_pkt_flags(rx_status0);\n-\t\trxm->packet_type =\n-\t\t\tptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &\n-\t\t\t\t\t    VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];\n-\n-\t\trxm->ol_flags |= pkt_flags;\n-\n-\t\tif (idpf_timestamp_dynflag > 0 &&\n-\t\t   (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0) {\n-\t\t\t/* timestamp */\n-\t\t\tts_ns = idpf_tstamp_convert_32b_64b(ad,\n-\t\t\t\trxq->hw_register_set,\n-\t\t\t\trte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));\n-\t\t\trxq->hw_register_set = 0;\n-\t\t\t*RTE_MBUF_DYNFIELD(rxm,\n-\t\t\t\t\t   idpf_timestamp_dynfield_offset,\n-\t\t\t\t\t   rte_mbuf_timestamp_t *) = ts_ns;\n-\t\t\trxm->ol_flags |= idpf_timestamp_dynflag;\n-\t\t}\n-\n-\t\trx_pkts[nb_rx++] = rxm;\n-\t}\n-\trxq->rx_tail = rx_id;\n-\n-\tidpf_update_rx_tail(rxq, nb_hold, rx_id);\n-\n-\treturn nb_rx;\n-}\n-\n-static inline int\n-idpf_xmit_cleanup(struct idpf_tx_queue *txq)\n-{\n-\tuint16_t last_desc_cleaned = txq->last_desc_cleaned;\n-\tstruct idpf_tx_entry *sw_ring = txq->sw_ring;\n-\tuint16_t nb_tx_desc = txq->nb_tx_desc;\n-\tuint16_t desc_to_clean_to;\n-\tuint16_t nb_tx_to_clean;\n-\tuint16_t i;\n-\n-\tvolatile struct idpf_flex_tx_desc *txd = txq->tx_ring;\n-\n-\tdesc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);\n-\tif (desc_to_clean_to >= nb_tx_desc)\n-\t\tdesc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);\n-\n-\tdesc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n-\t/* In the writeback Tx desccriptor, the only significant fields are the 4-bit DTYPE */\n-\tif ((txd[desc_to_clean_to].qw1.cmd_dtype &\n-\t\t\trte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=\n-\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {\n-\t\tPMD_TX_LOG(DEBUG, \"TX descriptor %4u is not done \"\n-\t\t\t   \"(port=%d queue=%d)\", desc_to_clean_to,\n-\t\t\t   txq->port_id, txq->queue_id);\n-\t\treturn -1;\n-\t}\n-\n-\tif (last_desc_cleaned > desc_to_clean_to)\n-\t\tnb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +\n-\t\t\t\t\t    desc_to_clean_to);\n-\telse\n-\t\tnb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n-\t\t\t\t\tlast_desc_cleaned);\n-\n-\ttxd[desc_to_clean_to].qw1.cmd_dtype = 0;\n-\ttxd[desc_to_clean_to].qw1.buf_size = 0;\n-\tfor (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)\n-\t\ttxd[desc_to_clean_to].qw1.flex.raw[i] = 0;\n-\n-\ttxq->last_desc_cleaned = desc_to_clean_to;\n-\ttxq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);\n-\n-\treturn 0;\n-}\n-\n-/* TX function */\n-uint16_t\n-idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t       uint16_t nb_pkts)\n-{\n-\tvolatile struct idpf_flex_tx_desc *txd;\n-\tvolatile struct idpf_flex_tx_desc *txr;\n-\tunion idpf_tx_offload tx_offload = {0};\n-\tstruct idpf_tx_entry *txe, *txn;\n-\tstruct idpf_tx_entry *sw_ring;\n-\tstruct idpf_tx_queue *txq;\n-\tstruct rte_mbuf *tx_pkt;\n-\tstruct rte_mbuf *m_seg;\n-\tuint64_t buf_dma_addr;\n-\tuint64_t ol_flags;\n-\tuint16_t tx_last;\n-\tuint16_t nb_used;\n-\tuint16_t nb_ctx;\n-\tuint16_t td_cmd;\n-\tuint16_t tx_id;\n-\tuint16_t nb_tx;\n-\tuint16_t slen;\n-\n-\tnb_tx = 0;\n-\ttxq = tx_queue;\n-\n-\tif (unlikely(txq == NULL) || unlikely(!txq->q_started))\n-\t\treturn nb_tx;\n-\n-\tsw_ring = txq->sw_ring;\n-\ttxr = txq->tx_ring;\n-\ttx_id = txq->tx_tail;\n-\ttxe = &sw_ring[tx_id];\n-\n-\t/* Check if the descriptor ring needs to be cleaned. */\n-\tif (txq->nb_free < txq->free_thresh)\n-\t\t(void)idpf_xmit_cleanup(txq);\n-\n-\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n-\t\ttd_cmd = 0;\n-\n-\t\ttx_pkt = *tx_pkts++;\n-\t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n-\n-\t\tol_flags = tx_pkt->ol_flags;\n-\t\ttx_offload.l2_len = tx_pkt->l2_len;\n-\t\ttx_offload.l3_len = tx_pkt->l3_len;\n-\t\ttx_offload.l4_len = tx_pkt->l4_len;\n-\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n-\t\t/* Calculate the number of context descriptors needed. */\n-\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n-\n-\t\t/* The number of descriptors that must be allocated for\n-\t\t * a packet equals to the number of the segments of that\n-\t\t * packet plus 1 context descriptor if needed.\n-\t\t */\n-\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n-\t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n-\n-\t\t/* Circular ring */\n-\t\tif (tx_last >= txq->nb_tx_desc)\n-\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n-\n-\t\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u\"\n-\t\t\t   \" tx_first=%u tx_last=%u\",\n-\t\t\t   txq->port_id, txq->queue_id, tx_id, tx_last);\n-\n-\t\tif (nb_used > txq->nb_free) {\n-\t\t\tif (idpf_xmit_cleanup(txq) != 0) {\n-\t\t\t\tif (nb_tx == 0)\n-\t\t\t\t\treturn 0;\n-\t\t\t\tgoto end_of_tx;\n-\t\t\t}\n-\t\t\tif (unlikely(nb_used > txq->rs_thresh)) {\n-\t\t\t\twhile (nb_used > txq->nb_free) {\n-\t\t\t\t\tif (idpf_xmit_cleanup(txq) != 0) {\n-\t\t\t\t\t\tif (nb_tx == 0)\n-\t\t\t\t\t\t\treturn 0;\n-\t\t\t\t\t\tgoto end_of_tx;\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\n-\t\tif (nb_ctx != 0) {\n-\t\t\t/* Setup TX context descriptor if required */\n-\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_txd =\n-\t\t\t\t(volatile union idpf_flex_tx_ctx_desc *)\n-\t\t\t\t\t\t\t&txr[tx_id];\n-\n-\t\t\ttxn = &sw_ring[txe->next_id];\n-\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n-\t\t\tif (txe->mbuf != NULL) {\n-\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n-\t\t\t\ttxe->mbuf = NULL;\n-\t\t\t}\n-\n-\t\t\t/* TSO enabled */\n-\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n-\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n-\t\t\t\t\t\t\tctx_txd);\n-\n-\t\t\ttxe->last_id = tx_last;\n-\t\t\ttx_id = txe->next_id;\n-\t\t\ttxe = txn;\n-\t\t}\n-\n-\t\tm_seg = tx_pkt;\n-\t\tdo {\n-\t\t\ttxd = &txr[tx_id];\n-\t\t\ttxn = &sw_ring[txe->next_id];\n-\n-\t\t\tif (txe->mbuf != NULL)\n-\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n-\t\t\ttxe->mbuf = m_seg;\n-\n-\t\t\t/* Setup TX Descriptor */\n-\t\t\tslen = m_seg->data_len;\n-\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n-\t\t\ttxd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);\n-\t\t\ttxd->qw1.buf_size = slen;\n-\t\t\ttxd->qw1.cmd_dtype = rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA <<\n-\t\t\t\t\t\t\t      IDPF_FLEX_TXD_QW1_DTYPE_S);\n-\n-\t\t\ttxe->last_id = tx_last;\n-\t\t\ttx_id = txe->next_id;\n-\t\t\ttxe = txn;\n-\t\t\tm_seg = m_seg->next;\n-\t\t} while (m_seg);\n-\n-\t\t/* The last packet data descriptor needs End Of Packet (EOP) */\n-\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_EOP;\n-\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n-\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n-\n-\t\tif (txq->nb_used >= txq->rs_thresh) {\n-\t\t\tPMD_TX_LOG(DEBUG, \"Setting RS bit on TXD id=\"\n-\t\t\t\t   \"%4u (port=%d queue=%d)\",\n-\t\t\t\t   tx_last, txq->port_id, txq->queue_id);\n-\n-\t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_RS;\n-\n-\t\t\t/* Update txq RS bit counters */\n-\t\t\ttxq->nb_used = 0;\n-\t\t}\n-\n-\t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n-\t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;\n-\n-\t\ttxd->qw1.cmd_dtype |= rte_cpu_to_le_16(td_cmd << IDPF_FLEX_TXD_QW1_CMD_S);\n-\t}\n-\n-end_of_tx:\n-\trte_wmb();\n-\n-\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n-\t\t   txq->port_id, txq->queue_id, tx_id, nb_tx);\n-\n-\tIDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n-\ttxq->tx_tail = tx_id;\n-\n-\treturn nb_tx;\n-}\n-\n-/* TX prep functions */\n-uint16_t\n-idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t       uint16_t nb_pkts)\n-{\n-#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n-\tint ret;\n-#endif\n-\tint i;\n-\tuint64_t ol_flags;\n-\tstruct rte_mbuf *m;\n-\n-\tfor (i = 0; i < nb_pkts; i++) {\n-\t\tm = tx_pkts[i];\n-\t\tol_flags = m->ol_flags;\n-\n-\t\t/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */\n-\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {\n-\t\t\tif (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {\n-\t\t\t\trte_errno = EINVAL;\n-\t\t\t\treturn i;\n-\t\t\t}\n-\t\t} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||\n-\t\t\t   (m->tso_segsz > IDPF_MAX_TSO_MSS) ||\n-\t\t\t   (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {\n-\t\t\t/* MSS outside the range are considered malicious */\n-\t\t\trte_errno = EINVAL;\n-\t\t\treturn i;\n-\t\t}\n-\n-\t\tif ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {\n-\t\t\trte_errno = ENOTSUP;\n-\t\t\treturn i;\n-\t\t}\n-\n-\t\tif (m->pkt_len < IDPF_MIN_FRAME_SIZE) {\n-\t\t\trte_errno = EINVAL;\n-\t\t\treturn i;\n-\t\t}\n-\n-#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n-\t\tret = rte_validate_tx_offload(m);\n-\t\tif (ret != 0) {\n-\t\t\trte_errno = -ret;\n-\t\t\treturn i;\n-\t\t}\n-#endif\n-\t}\n-\n-\treturn i;\n-}\n-\n static void __rte_cold\n release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)\n {\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex 4efbf10295..eab363c3e7 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -8,41 +8,6 @@\n #include <idpf_common_rxtx.h>\n #include \"idpf_ethdev.h\"\n \n-/* MTS */\n-#define GLTSYN_CMD_SYNC_0_0\t(PF_TIMESYNC_BASE + 0x0)\n-#define PF_GLTSYN_SHTIME_0_0\t(PF_TIMESYNC_BASE + 0x4)\n-#define PF_GLTSYN_SHTIME_L_0\t(PF_TIMESYNC_BASE + 0x8)\n-#define PF_GLTSYN_SHTIME_H_0\t(PF_TIMESYNC_BASE + 0xC)\n-#define GLTSYN_ART_L_0\t\t(PF_TIMESYNC_BASE + 0x10)\n-#define GLTSYN_ART_H_0\t\t(PF_TIMESYNC_BASE + 0x14)\n-#define PF_GLTSYN_SHTIME_0_1\t(PF_TIMESYNC_BASE + 0x24)\n-#define PF_GLTSYN_SHTIME_L_1\t(PF_TIMESYNC_BASE + 0x28)\n-#define PF_GLTSYN_SHTIME_H_1\t(PF_TIMESYNC_BASE + 0x2C)\n-#define PF_GLTSYN_SHTIME_0_2\t(PF_TIMESYNC_BASE + 0x44)\n-#define PF_GLTSYN_SHTIME_L_2\t(PF_TIMESYNC_BASE + 0x48)\n-#define PF_GLTSYN_SHTIME_H_2\t(PF_TIMESYNC_BASE + 0x4C)\n-#define PF_GLTSYN_SHTIME_0_3\t(PF_TIMESYNC_BASE + 0x64)\n-#define PF_GLTSYN_SHTIME_L_3\t(PF_TIMESYNC_BASE + 0x68)\n-#define PF_GLTSYN_SHTIME_H_3\t(PF_TIMESYNC_BASE + 0x6C)\n-\n-#define PF_TIMESYNC_BAR4_BASE\t0x0E400000\n-#define GLTSYN_ENA\t\t(PF_TIMESYNC_BAR4_BASE + 0x90)\n-#define GLTSYN_CMD\t\t(PF_TIMESYNC_BAR4_BASE + 0x94)\n-#define GLTSYC_TIME_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x104)\n-#define GLTSYC_TIME_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x108)\n-\n-#define GLTSYN_CMD_SYNC_0_4\t(PF_TIMESYNC_BAR4_BASE + 0x110)\n-#define PF_GLTSYN_SHTIME_L_4\t(PF_TIMESYNC_BAR4_BASE + 0x118)\n-#define PF_GLTSYN_SHTIME_H_4\t(PF_TIMESYNC_BAR4_BASE + 0x11C)\n-#define GLTSYN_INCVAL_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x150)\n-#define GLTSYN_INCVAL_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x154)\n-#define GLTSYN_SHADJ_L\t\t(PF_TIMESYNC_BAR4_BASE + 0x158)\n-#define GLTSYN_SHADJ_H\t\t(PF_TIMESYNC_BAR4_BASE + 0x15C)\n-\n-#define GLTSYN_CMD_SYNC_0_5\t(PF_TIMESYNC_BAR4_BASE + 0x130)\n-#define PF_GLTSYN_SHTIME_L_5\t(PF_TIMESYNC_BAR4_BASE + 0x138)\n-#define PF_GLTSYN_SHTIME_H_5\t(PF_TIMESYNC_BAR4_BASE + 0x13C)\n-\n /* In QLEN must be whole number of 32 descriptors. */\n #define IDPF_ALIGN_RING_DESC\t32\n #define IDPF_MIN_RING_DESC\t32\n@@ -62,44 +27,10 @@\n #define IDPF_DEFAULT_TX_RS_THRESH\t32\n #define IDPF_DEFAULT_TX_FREE_THRESH\t32\n \n-#define IDPF_TX_MAX_MTU_SEG\t10\n-\n-#define IDPF_MIN_TSO_MSS\t88\n-#define IDPF_MAX_TSO_MSS\t9728\n-#define IDPF_MAX_TSO_FRAME_SIZE\t262143\n-#define IDPF_TX_MAX_MTU_SEG     10\n-\n-#define IDPF_TX_CKSUM_OFFLOAD_MASK (\t\t\\\n-\t\tRTE_MBUF_F_TX_IP_CKSUM |\t\\\n-\t\tRTE_MBUF_F_TX_L4_MASK |\t\t\\\n-\t\tRTE_MBUF_F_TX_TCP_SEG)\n-\n-#define IDPF_TX_OFFLOAD_MASK (\t\t\t\\\n-\t\tIDPF_TX_CKSUM_OFFLOAD_MASK |\t\\\n-\t\tRTE_MBUF_F_TX_IPV4 |\t\t\\\n-\t\tRTE_MBUF_F_TX_IPV6)\n-\n-#define IDPF_TX_OFFLOAD_NOTSUP_MASK \\\n-\t\t(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)\n-\n-extern uint64_t idpf_timestamp_dynflag;\n-\n struct idpf_tx_vec_entry {\n \tstruct rte_mbuf *mbuf;\n };\n \n-/* Offload features */\n-union idpf_tx_offload {\n-\tuint64_t data;\n-\tstruct {\n-\t\tuint64_t l2_len:7; /* L2 (MAC) Header Length. */\n-\t\tuint64_t l3_len:9; /* L3 (IP) Header Length. */\n-\t\tuint64_t l4_len:8; /* L4 Header Length. */\n-\t\tuint64_t tso_segsz:16; /* TCP TSO segment size */\n-\t\t/* uint64_t unused : 24; */\n-\t};\n-};\n-\n int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_rxconf *rx_conf,\n@@ -118,77 +49,14 @@ int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n-uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\t\t\tuint16_t nb_pkts);\n uint16_t idpf_singleq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t\t       uint16_t nb_pkts);\n-uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\t\t       uint16_t nb_pkts);\n-uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t\t\tuint16_t nb_pkts);\n uint16_t idpf_singleq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t       uint16_t nb_pkts);\n-uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t\t       uint16_t nb_pkts);\n-uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t\tuint16_t nb_pkts);\n \n void idpf_stop_queues(struct rte_eth_dev *dev);\n \n void idpf_set_rx_function(struct rte_eth_dev *dev);\n void idpf_set_tx_function(struct rte_eth_dev *dev);\n \n-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000\n-/* Helper function to convert a 32b nanoseconds timestamp to 64b. */\n-static inline uint64_t\n-\n-idpf_tstamp_convert_32b_64b(struct idpf_adapter_ext *ad, uint32_t flag,\n-\t\t\t    uint32_t in_timestamp)\n-{\n-#ifdef RTE_ARCH_X86_64\n-\tstruct idpf_hw *hw = &ad->base.hw;\n-\tconst uint64_t mask = 0xFFFFFFFF;\n-\tuint32_t hi, lo, lo2, delta;\n-\tuint64_t ns;\n-\n-\tif (flag != 0) {\n-\t\tIDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);\n-\t\tIDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |\n-\t\t\t       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);\n-\t\tlo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n-\t\thi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);\n-\t\t/*\n-\t\t * On typical system, the delta between lo and lo2 is ~1000ns,\n-\t\t * so 10000 seems a large-enough but not overly-big guard band.\n-\t\t */\n-\t\tif (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))\n-\t\t\tlo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n-\t\telse\n-\t\t\tlo2 = lo;\n-\n-\t\tif (lo2 < lo) {\n-\t\t\tlo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);\n-\t\t\thi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);\n-\t\t}\n-\n-\t\tad->time_hw = ((uint64_t)hi << 32) | lo;\n-\t}\n-\n-\tdelta = (in_timestamp - (uint32_t)(ad->time_hw & mask));\n-\tif (delta > (mask / 2)) {\n-\t\tdelta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);\n-\t\tns = ad->time_hw - delta;\n-\t} else {\n-\t\tns = ad->time_hw + delta;\n-\t}\n-\n-\treturn ns;\n-#else /* !RTE_ARCH_X86_64 */\n-\tRTE_SET_USED(ad);\n-\tRTE_SET_USED(flag);\n-\tRTE_SET_USED(in_timestamp);\n-\treturn 0;\n-#endif /* RTE_ARCH_X86_64 */\n-}\n-\n #endif /* _IDPF_RXTX_H_ */\ndiff --git a/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/drivers/net/idpf/idpf_rxtx_vec_avx512.c\nindex 71a6c59823..19047d23c1 100644\n--- a/drivers/net/idpf/idpf_rxtx_vec_avx512.c\n+++ b/drivers/net/idpf/idpf_rxtx_vec_avx512.c\n@@ -38,8 +38,7 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)\n \t\t\t\t\t\tdma_addr0);\n \t\t\t}\n \t\t}\n-\t\trte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=\n-\t\t\tIDPF_RXQ_REARM_THRESH;\n+\t\trxq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;\n \t\treturn;\n \t}\n \tstruct rte_mbuf *mb0, *mb1, *mb2, *mb3;\n@@ -168,8 +167,7 @@ idpf_singleq_rearm(struct idpf_rx_queue *rxq)\n \t\t\t\t\t\t\t dma_addr0);\n \t\t\t\t}\n \t\t\t}\n-\t\t\trte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=\n-\t\t\t\t\tIDPF_RXQ_REARM_THRESH;\n+\t\t\trxq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;\n \t\t\treturn;\n \t\t}\n \t}\n",
    "prefixes": [
        "13/15"
    ]
}