get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/114574/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 114574,
    "url": "http://patches.dpdk.org/api/patches/114574/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-9-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220803113104.1184059-9-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220803113104.1184059-9-junfeng.guo@intel.com",
    "date": "2022-08-03T11:30:59",
    "name": "[08/13] net/idpf: add basic Rx/Tx datapath",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c1b57fcd3bd23836955581391bc4f33c3f59c44d",
    "submitter": {
        "id": 1785,
        "url": "http://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-9-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 24188,
            "url": "http://patches.dpdk.org/api/series/24188/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24188",
            "date": "2022-08-03T11:30:51",
            "name": "add support for idpf PMD in DPDK",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/24188/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/114574/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/114574/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 50A47A00C5;\n\tWed,  3 Aug 2022 13:32:24 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1994642BD0;\n\tWed,  3 Aug 2022 13:31:36 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id A8B2442BCD\n for <dev@dpdk.org>; Wed,  3 Aug 2022 13:31:33 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Aug 2022 04:31:33 -0700",
            "from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246])\n by FMSMGA003.fm.intel.com with ESMTP; 03 Aug 2022 04:31:31 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1659526293; x=1691062293;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=EDyjwYquzYxieXWvuJvvQ4hw59/Lw+n1bF3PPKKqXvc=;\n b=DW+k0xttioZXoJm++rvh7MmD8KjVoPY4RizAesjx3/c+UnRKqPiB/5SX\n +GLOLvLlMIRzxiMBFpksVwjSjr+9uQuny/02zkyqMWigyiMIpcKTXoT9h\n mAgOvemwXDm3ylZxuSs0smPumoKechgFMitW1V5QSMkH1KP1iQzUfO9/m\n mvsdl/d4mgrJk3oNNwm1lkMlnqCja4yWPgS5pSs65YvKYgNrtCOQDesoB\n R12roP8VlrDvR9cYjILL4k6U4ssR7Gref7g1rfi618TSEu3BKrB4DIl/y\n FUiMHtZmdjIjfKre7VKcABb7OnFq2zUQwDeQxNAk3MsUcikgi0KruinQ+ A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10427\"; a=\"375948527\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"375948527\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"692211088\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com,\n\tjingjing.wu@intel.com,\n\tbeilei.xing@intel.com",
        "Cc": "dev@dpdk.org,\n\tjunfeng.guo@intel.com,\n\tXiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH 08/13] net/idpf: add basic Rx/Tx datapath",
        "Date": "Wed,  3 Aug 2022 19:30:59 +0800",
        "Message-Id": "<20220803113104.1184059-9-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "References": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add basic RX & TX support in split queue mode and single queue mode.\nSplit queue mode is selected by default.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |  69 ++-\n drivers/net/idpf/idpf_rxtx.c   | 896 +++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h   |  35 ++\n 3 files changed, 998 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 668e488843..55ec24872e 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -14,12 +14,16 @@\n #include \"idpf_ethdev.h\"\n #include \"idpf_rxtx.h\"\n \n+#define IDPF_TX_SINGLE_Q\t\"tx_single\"\n+#define IDPF_RX_SINGLE_Q\t\"rx_single\"\n #define REPRESENTOR\t\t\"representor\"\n \n struct idpf_adapter *adapter;\n uint16_t used_vecs_num;\n \n static const char * const idpf_valid_args[] = {\n+\tIDPF_TX_SINGLE_Q,\n+\tIDPF_RX_SINGLE_Q,\n \tREPRESENTOR,\n \tNULL\n };\n@@ -157,6 +161,30 @@ idpf_init_vport_req_info(__rte_unused struct rte_eth_dev *dev)\n \t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n \n \tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\tif (!adapter->txq_model) {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_tx_q = dev->data->nb_tx_queues;\n+\t\tvport_info->num_tx_complq =\n+\t\t\tdev->data->nb_tx_queues * IDPF_TX_COMPLQ_PER_GRP;\n+\t} else {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_tx_q = dev->data->nb_tx_queues;\n+\t\tvport_info->num_tx_complq = 0;\n+\t}\n+\tif (!adapter->rxq_model) {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_rx_q = dev->data->nb_rx_queues;\n+\t\tvport_info->num_rx_bufq =\n+\t\t\tdev->data->nb_rx_queues * IDPF_RX_BUFQ_PER_GRP;\n+\t} else {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_rx_q = dev->data->nb_rx_queues;\n+\t\tvport_info->num_rx_bufq = 0;\n+\t}\n \n \treturn 0;\n }\n@@ -344,6 +372,9 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \t\tgoto err_mtu;\n \t}\n \n+\tidpf_set_rx_function(dev);\n+\tidpf_set_tx_function(dev);\n+\n \tif (idpf_ena_dis_vport(vport, true)) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to enable vport\");\n \t\tgoto err_vport;\n@@ -394,11 +425,31 @@ idpf_dev_close(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static int\n+parse_bool(const char *key, const char *value, void *args)\n+{\n+\tint *i = (int *)args;\n+\tchar *end;\n+\tint num;\n+\n+\tnum = strtoul(value, &end, 10);\n+\n+\tif (num != 0 && num != 1) {\n+\t\tPMD_DRV_LOG(WARNING, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", \"\n+\t\t\t\"value must be 0 or 1\",\n+\t\t\tvalue, key);\n+\t\treturn -1;\n+\t}\n+\n+\t*i = num;\n+\treturn 0;\n+}\n+\n static int idpf_parse_devargs(struct rte_eth_dev *dev)\n {\n \tstruct rte_devargs *devargs = dev->device->devargs;\n \tstruct rte_kvargs *kvlist;\n-\tint ret = 0;\n+\tint ret;\n \n \tif (!devargs)\n \t\treturn 0;\n@@ -409,6 +460,17 @@ static int idpf_parse_devargs(struct rte_eth_dev *dev)\n \t\treturn -EINVAL;\n \t}\n \n+\tret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->txq_model);\n+\tif (ret)\n+\t\tgoto bail;\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->rxq_model);\n+\tif (ret)\n+\t\tgoto bail;\n+\n+bail:\n \trte_kvargs_free(kvlist);\n \treturn ret;\n }\n@@ -637,8 +699,11 @@ idpf_dev_init(struct rte_eth_dev *dev, __rte_unused void *init_params)\n \t/* for secondary processes, we don't initialise any further as primary\n \t * has already done this work.\n \t */\n-\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n+\t\tidpf_set_rx_function(dev);\n+\t\tidpf_set_tx_function(dev);\n \t\treturn ret;\n+\t}\n \n \tret = idpf_adapter_init(dev);\n \tif (ret) {\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex eae762ced3..e6040cece1 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -1312,4 +1312,900 @@ idpf_stop_queues(struct rte_eth_dev *dev)\n \t}\n }\n \n+#define IDPF_RX_ERR0_QW1\t\t\t\t\t\\\n+\t(BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) |\t\\\n+\t BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) |\t\\\n+\t BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |\t\\\n+\t BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))\n+\n+static inline uint64_t\n+idpf_splitq_rx_csum_offload(uint8_t err)\n+{\n+\tuint64_t flags = 0;\n+\n+\tif (unlikely(!(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S))))\n+\t\treturn flags;\n+\n+\tif (likely((err & IDPF_RX_ERR0_QW1) == 0)) {\n+\t\tflags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |\n+\t\t\t  RTE_MBUF_F_RX_L4_CKSUM_GOOD);\n+\t\treturn flags;\n+\t}\n+\n+\tif (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)))\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n+\n+\tif (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)))\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n+\n+\tif (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;\n+\n+\tif (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)))\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;\n+\n+\treturn flags;\n+}\n+\n+#define IDPF_RX_FLEX_DESC_HASH1_S\t0\n+#define IDPF_RX_FLEX_DESC_HASH2_S\t16\n+#define IDPF_RX_FLEX_DESC_HASH3_S\t24\n+\n+static inline uint64_t\n+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,\n+\t\t\t   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)\n+{\n+\tuint8_t status_err0_qw0;\n+\tuint64_t flags = 0;\n+\n+\tstatus_err0_qw0 = rx_desc->status_err0_qw0;\n+\n+\tif (status_err0_qw0 & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) {\n+\t\tflags |= RTE_MBUF_F_RX_RSS_HASH;\n+\t\tmb->hash.rss = rte_le_to_cpu_16(rx_desc->hash1) |\n+\t\t\t((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<\n+\t\t\t IDPF_RX_FLEX_DESC_HASH2_S) |\n+\t\t\t((uint32_t)(rx_desc->hash3) <<\n+\t\t\t IDPF_RX_FLEX_DESC_HASH3_S);\n+\t}\n+\n+\treturn flags;\n+}\n+\n+static void\n+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)\n+{\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;\n+\tuint16_t nb_refill = rx_bufq->nb_rx_hold;\n+\tuint16_t nb_desc = rx_bufq->nb_rx_desc;\n+\tuint16_t next_avail = rx_bufq->rx_tail;\n+\tstruct rte_mbuf *nmb[nb_refill];\n+\tstruct rte_eth_dev *dev;\n+\tuint64_t dma_addr;\n+\tuint16_t delta;\n+\n+\tif (nb_refill <= rx_bufq->rx_free_thresh)\n+\t\treturn;\n+\n+\tif (nb_refill >= nb_desc)\n+\t\tnb_refill = nb_desc - 1;\n+\n+\trx_buf_ring =\n+\t       (volatile struct virtchnl2_splitq_rx_buf_desc *)rx_bufq->rx_ring;\n+\tdelta = nb_desc - next_avail;\n+\tif (delta < nb_refill) {\n+\t\tif (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta))) {\n+\t\t\tfor (int i = 0; i < delta; i++) {\n+\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n+\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n+\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n+\t\t\t\trx_buf_desc->hdr_addr = 0;\n+\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n+\t\t\t}\n+\t\t\tnb_refill -= delta;\n+\t\t\tnext_avail = 0;\n+\t\t\trx_bufq->nb_rx_hold -= delta;\n+\t\t} else {\n+\t\t\tdev = &rte_eth_devices[rx_bufq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n+\t\t\t\t   rx_bufq->port_id, rx_bufq->queue_id);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\tif (nb_desc - next_avail >= nb_refill) {\n+\t\tif (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill))) {\n+\t\t\tfor (int i = 0; i < nb_refill; i++) {\n+\t\t\t\trx_buf_desc = &rx_buf_ring[next_avail + i];\n+\t\t\t\trx_bufq->sw_ring[next_avail + i] = nmb[i];\n+\t\t\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));\n+\t\t\t\trx_buf_desc->hdr_addr = 0;\n+\t\t\t\trx_buf_desc->pkt_addr = dma_addr;\n+\t\t\t}\n+\t\t\tnext_avail += nb_refill;\n+\t\t\trx_bufq->nb_rx_hold -= nb_refill;\n+\t\t} else {\n+\t\t\tdev = &rte_eth_devices[rx_bufq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u queue_id=%u\",\n+\t\t\t\t   rx_bufq->port_id, rx_bufq->queue_id);\n+\t\t}\n+\t}\n+\n+\tIECM_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);\n+\n+\trx_bufq->rx_tail = next_avail;\n+}\n+\n+uint16_t\n+idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t      uint16_t nb_pkts)\n+{\n+\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;\n+\tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;\n+\tuint16_t pktlen_gen_bufq_id;\n+\tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n+\tuint8_t status_err0_qw1;\n+\tstruct rte_mbuf *rxm;\n+\tuint16_t rx_id_bufq1;\n+\tuint16_t rx_id_bufq2;\n+\tuint64_t pkt_flags;\n+\tuint16_t pkt_len;\n+\tuint16_t bufq_id;\n+\tuint16_t gen_id;\n+\tuint16_t rx_id;\n+\tuint16_t nb_rx;\n+\n+\tnb_rx = 0;\n+\trxq = (struct idpf_rx_queue *)rx_queue;\n+\n+\tif (unlikely(!rxq) || unlikely(!rxq->q_started))\n+\t\treturn nb_rx;\n+\n+\trx_id = rxq->rx_tail;\n+\trx_id_bufq1 = rxq->bufq1->rx_next_avail;\n+\trx_id_bufq2 = rxq->bufq2->rx_next_avail;\n+\trx_desc_ring =\n+\t       (volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *)rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trx_desc = &rx_desc_ring[rx_id];\n+\n+\t\tpktlen_gen_bufq_id =\n+\t\t\trte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);\n+\t\tgen_id = (pktlen_gen_bufq_id &\n+\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>\n+\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;\n+\t\tif (gen_id != rxq->expected_gen_id)\n+\t\t\tbreak;\n+\n+\t\tpkt_len = (pktlen_gen_bufq_id &\n+\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>\n+\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;\n+\t\tif (!pkt_len)\n+\t\t\tPMD_RX_LOG(ERR, \"Packet length is 0\");\n+\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc)) {\n+\t\t\trx_id = 0;\n+\t\t\trxq->expected_gen_id ^= 1;\n+\t\t}\n+\n+\t\tbufq_id = (pktlen_gen_bufq_id &\n+\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>\n+\t\t\tVIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;\n+\t\tif (!bufq_id) {\n+\t\t\trxm = rxq->bufq1->sw_ring[rx_id_bufq1];\n+\t\t\trx_id_bufq1++;\n+\t\t\tif (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))\n+\t\t\t\trx_id_bufq1 = 0;\n+\t\t\trxq->bufq1->nb_rx_hold++;\n+\t\t} else {\n+\t\t\trxm = rxq->bufq2->sw_ring[rx_id_bufq2];\n+\t\t\trx_id_bufq2++;\n+\t\t\tif (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))\n+\t\t\t\trx_id_bufq2 = 0;\n+\t\t\trxq->bufq2->nb_rx_hold++;\n+\t\t}\n+\n+\t\tpkt_len -= rxq->crc_len;\n+\t\trxm->pkt_len = pkt_len;\n+\t\trxm->data_len = pkt_len;\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->next = NULL;\n+\t\trxm->nb_segs = 1;\n+\t\trxm->port = rxq->port_id;\n+\t\trxm->ol_flags = 0;\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &\n+\t\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>\n+\t\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];\n+\n+\t\tstatus_err0_qw1 = rx_desc->status_err0_qw1;\n+\t\tpkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);\n+\t\tpkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);\n+\t\trxm->ol_flags |= pkt_flags;\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\n+\tif (nb_rx) {\n+\t\trxq->rx_tail = rx_id;\n+\t\tif (rx_id_bufq1 != rxq->bufq1->rx_next_avail)\n+\t\t\trxq->bufq1->rx_next_avail = rx_id_bufq1;\n+\t\tif (rx_id_bufq2 != rxq->bufq2->rx_next_avail)\n+\t\t\trxq->bufq2->rx_next_avail = rx_id_bufq2;\n+\n+\t\tidpf_split_rx_bufq_refill(rxq->bufq1);\n+\t\tidpf_split_rx_bufq_refill(rxq->bufq2);\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+static inline void\n+idpf_split_tx_free(struct idpf_tx_queue *cq)\n+{\n+\tvolatile struct iecm_splitq_tx_compl_desc *compl_ring = cq->compl_ring;\n+\tvolatile struct iecm_splitq_tx_compl_desc *txd;\n+\tuint16_t next = cq->tx_tail;\n+\tstruct idpf_tx_entry *txe;\n+\tstruct idpf_tx_queue *txq;\n+\tuint16_t gen, qid, q_head;\n+\tuint8_t ctype;\n+\n+\ttxd = &compl_ring[next];\n+\tgen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t\tIECM_TXD_COMPLQ_GEN_M) >> IECM_TXD_COMPLQ_GEN_S;\n+\tif (gen != cq->expected_gen_id)\n+\t\treturn;\n+\n+\tctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t\tIECM_TXD_COMPLQ_COMPL_TYPE_M) >> IECM_TXD_COMPLQ_COMPL_TYPE_S;\n+\tqid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &\n+\t\tIECM_TXD_COMPLQ_QID_M) >> IECM_TXD_COMPLQ_QID_S;\n+\tq_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);\n+\ttxq = cq->txqs[qid - cq->tx_start_qid];\n+\n+\tswitch (ctype) {\n+\tcase IECM_TXD_COMPLT_RE:\n+\t\tif (q_head == 0)\n+\t\t\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n+\t\telse\n+\t\t\ttxq->last_desc_cleaned = q_head - 1;\n+\t\tif (unlikely(!(txq->last_desc_cleaned % 32))) {\n+\t\t\tPMD_DRV_LOG(ERR, \"unexpected desc (head = %u) completion.\",\n+\t\t\t\t\t\tq_head);\n+\t\t\treturn;\n+\t\t}\n+\n+\t\tbreak;\n+\tcase IECM_TXD_COMPLT_RS:\n+\t\ttxq->nb_free++;\n+\t\ttxq->nb_used--;\n+\t\ttxe = &txq->sw_ring[q_head];\n+\t\tif (txe->mbuf) {\n+\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = NULL;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(ERR, \"unknown completion type.\");\n+\t\treturn;\n+\t}\n+\n+\tif (++next == cq->nb_tx_desc) {\n+\t\tnext = 0;\n+\t\tcq->expected_gen_id ^= 1;\n+\t}\n+\n+\tcq->tx_tail = next;\n+}\n+\n+/* Check if the context descriptor is needed for TX offloading */\n+static inline uint16_t\n+idpf_calc_context_desc(uint64_t flags)\n+{\n+\tif (flags & RTE_MBUF_F_TX_TCP_SEG)\n+\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n+/* set TSO context descriptor\n+ */\n+static inline void\n+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,\n+\t\t\tunion idpf_tx_offload tx_offload,\n+\t\t\tvolatile union iecm_flex_tx_ctx_desc *ctx_desc)\n+{\n+\tuint16_t cmd_dtype;\n+\tuint32_t tso_len;\n+\tuint8_t hdr_len;\n+\n+\tif (!tx_offload.l4_len) {\n+\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n+\t\treturn;\n+\t}\n+\n+\thdr_len = tx_offload.l2_len +\n+\t\ttx_offload.l3_len +\n+\t\ttx_offload.l4_len;\n+\tcmd_dtype = IECM_TX_DESC_DTYPE_FLEX_TSO_CTX |\n+\t\tIECM_TX_FLEX_CTX_DESC_CMD_TSO;\n+\ttso_len = mbuf->pkt_len - hdr_len;\n+\n+\tctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);\n+\tctx_desc->tso.qw0.hdr_len = hdr_len;\n+\tctx_desc->tso.qw0.mss_rt =\n+\t\trte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &\n+\t\t\t\t IECM_TXD_FLEX_CTX_MSS_RT_M);\n+\tctx_desc->tso.qw0.flex_tlen =\n+\t\trte_cpu_to_le_32(tso_len &\n+\t\t\t\t IECM_TXD_FLEX_CTX_MSS_RT_M);\n+}\n+\n+uint16_t\n+idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t      uint16_t nb_pkts)\n+{\n+\tstruct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;\n+\tvolatile struct iecm_flex_tx_sched_desc *txr;\n+\tvolatile struct iecm_flex_tx_sched_desc *txd;\n+\tstruct idpf_tx_entry *sw_ring;\n+\tunion idpf_tx_offload tx_offload = {0};\n+\tstruct idpf_tx_entry *txe, *txn;\n+\tuint16_t nb_used, tx_id, sw_id;\n+\tstruct rte_mbuf *tx_pkt;\n+\tuint16_t nb_to_clean;\n+\tuint16_t nb_tx = 0;\n+\tuint64_t ol_flags;\n+\tuint16_t nb_ctx;\n+\n+\tif (unlikely(!txq) || unlikely(!txq->q_started))\n+\t\treturn nb_tx;\n+\n+\ttxr = txq->desc_ring;\n+\tsw_ring = txq->sw_ring;\n+\ttx_id = txq->tx_tail;\n+\tsw_id = txq->sw_tail;\n+\ttxe = &sw_ring[sw_id];\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttx_pkt = tx_pkts[nb_tx];\n+\n+\t\tif (txq->nb_free <= txq->free_thresh) {\n+\t\t\t/* TODO: Need to refine\n+\t\t\t * 1. free and clean: Better to decide a clean destination instead of\n+\t\t\t * loop times. And don't free mbuf when RS got immediately, free when\n+\t\t\t * transmit or according to the clean destination.\n+\t\t\t * Now, just ingnore the RE write back, free mbuf when get RS\n+\t\t\t * 2. out-of-order rewrite back haven't be supported, SW head and HW head\n+\t\t\t * need to be separated.\n+\t\t\t **/\n+\t\t\tnb_to_clean = 2 * txq->rs_thresh;\n+\t\t\twhile (nb_to_clean--)\n+\t\t\t\tidpf_split_tx_free(txq->complq);\n+\t\t}\n+\n+\t\tif (txq->nb_free < tx_pkt->nb_segs)\n+\t\t\tbreak;\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\t\tnb_used = tx_pkt->nb_segs + nb_ctx;\n+\n+\t\t/* context descriptor */\n+\t\tif (nb_ctx) {\n+\t\t\tvolatile union iecm_flex_tx_ctx_desc *ctx_desc =\n+\t\t\t(volatile union iecm_flex_tx_ctx_desc *)&txr[tx_id];\n+\n+\t\t\tif (ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_desc);\n+\n+\t\t\ttx_id++;\n+\t\t\tif (tx_id == txq->nb_tx_desc)\n+\t\t\t\ttx_id = 0;\n+\t\t}\n+\n+\t\tdo {\n+\t\t\ttxd = &txr[tx_id];\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\ttxe->mbuf = tx_pkt;\n+\n+\t\t\t/* Setup TX descriptor */\n+\t\t\ttxd->buf_addr =\n+\t\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));\n+\t\t\ttxd->qw1.cmd_dtype =\n+\t\t\t\trte_cpu_to_le_16(IECM_TX_DESC_DTYPE_FLEX_FLOW_SCHE);\n+\t\t\ttxd->qw1.rxr_bufsize = tx_pkt->data_len;\n+\t\t\ttxd->qw1.compl_tag = sw_id;\n+\t\t\ttx_id++;\n+\t\t\tif (tx_id == txq->nb_tx_desc)\n+\t\t\t\ttx_id = 0;\n+\t\t\tsw_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\ttx_pkt = tx_pkt->next;\n+\t\t} while (tx_pkt);\n+\n+\t\t/* fill the last descriptor with End of Packet (EOP) bit */\n+\t\ttxd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_EOP;\n+\n+\t\tif (unlikely(!(tx_id % 32)))\n+\t\t\ttxd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_RE;\n+\t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n+\t\t\ttxd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_CS_EN;\n+\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n+\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n+\t}\n+\n+\t/* update the tail pointer if any packets were processed */\n+\tif (likely(nb_tx)) {\n+\t\tIECM_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n+\t\ttxq->tx_tail = tx_id;\n+\t\ttxq->sw_tail = sw_id;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n+static inline void\n+idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,\n+\t\t    uint16_t rx_id)\n+{\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG,\n+\t\t\t   \"port_id=%u queue_id=%u rx_tail=%u nb_hold=%u\",\n+\t\t\t   rxq->port_id, rxq->queue_id, rx_id, nb_hold);\n+\t\trx_id = (uint16_t)((rx_id == 0) ?\n+\t\t\t(rxq->nb_rx_desc - 1) : (rx_id - 1));\n+\t\tIECM_PCI_REG_WRITE(rxq->qrx_tail, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+}\n+\n+uint16_t\n+idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tvolatile union virtchnl2_rx_desc *rx_ring;\n+\tvolatile union virtchnl2_rx_desc *rxdp;\n+\tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n+\tuint16_t rx_id, nb_hold;\n+\tstruct rte_eth_dev *dev;\n+\tuint16_t rx_packet_len;\n+\tstruct rte_mbuf *rxe;\n+\tstruct rte_mbuf *rxm;\n+\tstruct rte_mbuf *nmb;\n+\tuint16_t rx_status0;\n+\tuint64_t dma_addr;\n+\tuint16_t nb_rx;\n+\n+\tnb_rx = 0;\n+\tnb_hold = 0;\n+\trxq = rx_queue;\n+\n+\tif (unlikely(!rxq) || unlikely(!rxq->q_started))\n+\t\treturn nb_rx;\n+\n+\trx_id = rxq->rx_tail;\n+\trx_ring = rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trxdp = &rx_ring[rx_id];\n+\t\trx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);\n+\n+\t\t/* Check the DD bit first */\n+\t\tif (!(rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S)))\n+\t\t\tbreak;\n+\n+\t\trx_packet_len = (rte_cpu_to_le_16(rxdp->flex_nic_wb.pkt_len)) -\n+\t\t\t\trxq->crc_len;\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!nmb)) {\n+\t\t\tdev = &rte_eth_devices[rxq->port_id];\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u \"\n+\t\t\t\t   \"queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = rxq->sw_ring[rx_id];\n+\t\trxq->sw_ring[rx_id] = nmb;\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf */\n+\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n+\n+\t\t/* When next RX descriptor is on a cache line boundary,\n+\t\t * prefetch the next 4 RX descriptors and next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_prefetch0(&rx_ring[rx_id]);\n+\t\t\trte_prefetch0(rxq->sw_ring[rx_id]);\n+\t\t}\n+\t\trxm = rxe;\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr = dma_addr;\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));\n+\t\trxm->nb_segs = 1;\n+\t\trxm->next = NULL;\n+\t\trxm->pkt_len = rx_packet_len;\n+\t\trxm->data_len = rx_packet_len;\n+\t\trxm->port = rxq->port_id;\n+\t\trxm->ol_flags = 0;\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxdp->flex_nic_wb.ptype_flex_flags0) &\n+\t\t\t\tVIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\tidpf_update_rx_tail(rxq, nb_hold, rx_id);\n+\n+\treturn nb_rx;\n+}\n+\n+static inline int\n+idpf_xmit_cleanup(struct idpf_tx_queue *txq)\n+{\n+\tuint16_t last_desc_cleaned = txq->last_desc_cleaned;\n+\tstruct idpf_tx_entry *sw_ring = txq->sw_ring;\n+\tuint16_t nb_tx_desc = txq->nb_tx_desc;\n+\tuint16_t desc_to_clean_to;\n+\tuint16_t nb_tx_to_clean;\n+\n+\tvolatile struct iecm_base_tx_desc *txd = txq->tx_ring;\n+\n+\tdesc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);\n+\tif (desc_to_clean_to >= nb_tx_desc)\n+\t\tdesc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);\n+\n+\tdesc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n+\tif ((txd[desc_to_clean_to].qw1 &\n+\t\t\trte_cpu_to_le_64(IECM_TXD_QW1_DTYPE_M)) !=\n+\t\t\trte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE)) {\n+\t\tPMD_TX_LOG(DEBUG, \"TX descriptor %4u is not done \"\n+\t\t\t   \"(port=%d queue=%d)\", desc_to_clean_to,\n+\t\t\t   txq->port_id, txq->queue_id);\n+\t\treturn -1;\n+\t}\n+\n+\tif (last_desc_cleaned > desc_to_clean_to)\n+\t\tnb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +\n+\t\t\t\t\t    desc_to_clean_to);\n+\telse\n+\t\tnb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n+\t\t\t\t\tlast_desc_cleaned);\n+\n+\ttxd[desc_to_clean_to].qw1 = 0;\n+\n+\ttxq->last_desc_cleaned = desc_to_clean_to;\n+\ttxq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);\n+\n+\treturn 0;\n+}\n+\n+/* set TSO context descriptor\n+ * support IP -> L4 and IP -> IP -> L4\n+ */\n+static inline uint64_t\n+idpf_set_tso_ctx(struct rte_mbuf *mbuf, union idpf_tx_offload tx_offload)\n+{\n+\tuint64_t ctx_desc = 0;\n+\tuint32_t cd_cmd, hdr_len, cd_tso_len;\n+\n+\tif (!tx_offload.l4_len) {\n+\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n+\t\treturn ctx_desc;\n+\t}\n+\n+\thdr_len = tx_offload.l2_len +\n+\t\t  tx_offload.l3_len +\n+\t\t  tx_offload.l4_len;\n+\n+\tcd_cmd = IECM_TX_CTX_DESC_TSO;\n+\tcd_tso_len = mbuf->pkt_len - hdr_len;\n+\tctx_desc |= ((uint64_t)cd_cmd << IECM_TXD_CTX_QW1_CMD_S) |\n+\t\t     ((uint64_t)cd_tso_len << IECM_TXD_CTX_QW1_TSO_LEN_S) |\n+\t\t     ((uint64_t)mbuf->tso_segsz << IECM_TXD_CTX_QW1_MSS_S);\n+\n+\treturn ctx_desc;\n+}\n+\n+/* Construct the tx flags */\n+static inline uint64_t\n+idpf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size)\n+{\n+\treturn rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DATA |\n+\t\t\t\t((uint64_t)td_cmd  << IECM_TXD_QW1_CMD_S) |\n+\t\t\t\t((uint64_t)td_offset <<\n+\t\t\t\t IECM_TXD_QW1_OFFSET_S) |\n+\t\t\t\t((uint64_t)size  <<\n+\t\t\t\t IECM_TXD_QW1_TX_BUF_SZ_S));\n+}\n+\n+/* TX function */\n+uint16_t\n+idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tvolatile struct iecm_base_tx_desc *txd;\n+\tvolatile struct iecm_base_tx_desc *txr;\n+\tunion idpf_tx_offload tx_offload = {0};\n+\tstruct idpf_tx_entry *txe, *txn;\n+\tstruct idpf_tx_entry *sw_ring;\n+\tstruct idpf_tx_queue *txq;\n+\tstruct rte_mbuf *tx_pkt;\n+\tstruct rte_mbuf *m_seg;\n+\tuint64_t buf_dma_addr;\n+\tuint32_t td_offset;\n+\tuint64_t ol_flags;\n+\tuint16_t tx_last;\n+\tuint16_t nb_used;\n+\tuint16_t nb_ctx;\n+\tuint32_t td_cmd;\n+\tuint16_t tx_id;\n+\tuint16_t nb_tx;\n+\tuint16_t slen;\n+\n+\tnb_tx = 0;\n+\ttxq = tx_queue;\n+\n+\tif (unlikely(!txq) || unlikely(!txq->q_started))\n+\t\treturn nb_tx;\n+\n+\tsw_ring = txq->sw_ring;\n+\ttxr = txq->tx_ring;\n+\ttx_id = txq->tx_tail;\n+\ttxe = &sw_ring[tx_id];\n+\n+\t/* Check if the descriptor ring needs to be cleaned. */\n+\tif (txq->nb_free < txq->free_thresh)\n+\t\t(void)idpf_xmit_cleanup(txq);\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttd_cmd = 0;\n+\t\ttd_offset = 0;\n+\n+\t\ttx_pkt = *tx_pkts++;\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\n+\t\t/* The number of descriptors that must be allocated for\n+\t\t * a packet equals to the number of the segments of that\n+\t\t * packet plus 1 context descriptor if needed.\n+\t\t */\n+\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n+\t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n+\n+\t\t/* Circular ring */\n+\t\tif (tx_last >= txq->nb_tx_desc)\n+\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\n+\t\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u\"\n+\t\t\t   \" tx_first=%u tx_last=%u\",\n+\t\t\t   txq->port_id, txq->queue_id, tx_id, tx_last);\n+\n+\t\tif (nb_used > txq->nb_free) {\n+\t\t\tif (idpf_xmit_cleanup(txq)) {\n+\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\treturn 0;\n+\t\t\t\tgoto end_of_tx;\n+\t\t\t}\n+\t\t\tif (unlikely(nb_used > txq->rs_thresh)) {\n+\t\t\t\twhile (nb_used > txq->nb_free) {\n+\t\t\t\t\tif (idpf_xmit_cleanup(txq)) {\n+\t\t\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\t\t\treturn 0;\n+\t\t\t\t\t\tgoto end_of_tx;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* According to datasheet, the bit2 is reserved and must be\n+\t\t * set to 1.\n+\t\t */\n+\t\ttd_cmd |= 0x04;\n+\n+\t\tif (nb_ctx) {\n+\t\t\t/* Setup TX context descriptor if required */\n+\t\t\tvolatile union iecm_flex_tx_ctx_desc *ctx_txd =\n+\t\t\t\t(volatile union iecm_flex_tx_ctx_desc *)\n+\t\t\t\t\t\t\t&txr[tx_id];\n+\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\t\t\tif (txe->mbuf) {\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t}\n+\n+\t\t\t/* TSO enabled */\n+\t\t\tif (ol_flags & RTE_MBUF_F_TX_TCP_SEG)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_txd);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t}\n+\n+\t\tm_seg = tx_pkt;\n+\t\tdo {\n+\t\t\ttxd = &txr[tx_id];\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\n+\t\t\tif (txe->mbuf)\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = m_seg;\n+\n+\t\t\t/* Setup TX Descriptor */\n+\t\t\tslen = m_seg->data_len;\n+\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n+\t\t\ttxd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);\n+\t\t\ttxd->qw1 = idpf_build_ctob(td_cmd, td_offset, slen);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\tm_seg = m_seg->next;\n+\t\t} while (m_seg);\n+\n+\t\t/* The last packet data descriptor needs End Of Packet (EOP) */\n+\t\ttd_cmd |= IECM_TX_DESC_CMD_EOP;\n+\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n+\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n+\n+\t\tif (txq->nb_used >= txq->rs_thresh) {\n+\t\t\tPMD_TX_LOG(DEBUG, \"Setting RS bit on TXD id=\"\n+\t\t\t\t   \"%4u (port=%d queue=%d)\",\n+\t\t\t\t   tx_last, txq->port_id, txq->queue_id);\n+\n+\t\t\ttd_cmd |= IECM_TX_DESC_CMD_RS;\n+\n+\t\t\t/* Update txq RS bit counters */\n+\t\t\ttxq->nb_used = 0;\n+\t\t}\n+\n+\t\ttxd->qw1 |=\n+\t\t\trte_cpu_to_le_64(((uint64_t)td_cmd) <<\n+\t\t\t\t\t IECM_TXD_QW1_CMD_S);\n+\t}\n+\n+end_of_tx:\n+\trte_wmb();\n+\n+\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n+\t\t   txq->port_id, txq->queue_id, tx_id, nb_tx);\n+\n+\tIECM_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n+\ttxq->tx_tail = tx_id;\n+\n+\treturn nb_tx;\n+}\n+\n+/* TX prep functions */\n+uint16_t\n+idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t       uint16_t nb_pkts)\n+{\n+\tint i, ret;\n+\tuint64_t ol_flags;\n+\tstruct rte_mbuf *m;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tm = tx_pkts[i];\n+\t\tol_flags = m->ol_flags;\n+\n+\t\t/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */\n+\t\tif (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {\n+\t\t\tif (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {\n+\t\t\t\trte_errno = EINVAL;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\t\t} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||\n+\t\t\t   (m->tso_segsz > IDPF_MAX_TSO_MSS)) {\n+\t\t\t/* MSS outside the range are considered malicious */\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+\t\tif (ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn i;\n+\t\t}\n+\n+\t\tif (!m->pkt_len) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n+\t\tret = rte_net_intel_cksum_prepare(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n+void\n+idpf_set_rx_function(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\tdev->rx_pkt_burst = idpf_splitq_recv_pkts;\n+\t\treturn;\n+\t} else {\n+\t\tdev->rx_pkt_burst = idpf_singleq_recv_pkts;\n+\t\treturn;\n+\t}\n+}\n+\n+void\n+idpf_set_tx_function(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\tdev->tx_pkt_burst = idpf_splitq_xmit_pkts;\n+\t\tdev->tx_pkt_prepare = idpf_prep_pkts;\n+\t\treturn;\n+\t} else {\n+\t\tdev->tx_pkt_burst = idpf_singleq_xmit_pkts;\n+\t\tdev->tx_pkt_prepare = idpf_prep_pkts;\n+\t\treturn;\n+\t}\n+}\n \ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex 5e57995f8c..3e79c0ac2a 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -42,6 +42,25 @@\n #define IDPF_TSO_MAX_SEG\tUINT8_MAX\n #define IDPF_TX_MAX_MTU_SEG     8\n \n+#define IDPF_TX_CKSUM_OFFLOAD_MASK (\t\t\\\n+\t\tRTE_MBUF_F_TX_IP_CKSUM |\t\\\n+\t\tRTE_MBUF_F_TX_L4_MASK |\t\t\\\n+\t\tRTE_MBUF_F_TX_TCP_SEG)\n+\n+#define IDPF_TX_OFFLOAD_MASK (\t\t\t\\\n+\t\tRTE_MBUF_F_TX_OUTER_IPV6 |\t\\\n+\t\tRTE_MBUF_F_TX_OUTER_IPV4 |\t\\\n+\t\tRTE_MBUF_F_TX_IPV6 |\t\t\\\n+\t\tRTE_MBUF_F_TX_IPV4 |\t\t\\\n+\t\tRTE_MBUF_F_TX_VLAN |\t\t\\\n+\t\tRTE_MBUF_F_TX_IP_CKSUM |\t\\\n+\t\tRTE_MBUF_F_TX_L4_MASK |\t\t\\\n+\t\tRTE_MBUF_F_TX_TCP_SEG |\t\t\\\n+\t\tRTE_ETH_TX_OFFLOAD_SECURITY)\n+\n+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \\\n+\t\t(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)\n+\n struct idpf_rx_queue {\n \tstruct idpf_adapter *adapter;\t/* the adapter this queue belongs to */\n \tstruct rte_mempool *mp;\t\t/* mbuf pool to populate Rx ring */\n@@ -176,8 +195,24 @@ int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n+uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t\tuint16_t nb_pkts);\n+uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t       uint16_t nb_pkts);\n+uint16_t idpf_singleq_recv_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\t       uint16_t nb_pkts);\n+uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\tuint16_t nb_pkts);\n+uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t       uint16_t nb_pkts);\n+uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\tuint16_t nb_pkts);\n+\n void idpf_stop_queues(struct rte_eth_dev *dev);\n \n+void idpf_set_rx_function(struct rte_eth_dev *dev);\n+void idpf_set_tx_function(struct rte_eth_dev *dev);\n+\n void idpf_set_default_ptype_table(struct rte_eth_dev *dev);\n const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n \n",
    "prefixes": [
        "08/13"
    ]
}