get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/114570/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 114570,
    "url": "http://patches.dpdk.org/api/patches/114570/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-5-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220803113104.1184059-5-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220803113104.1184059-5-junfeng.guo@intel.com",
    "date": "2022-08-03T11:30:55",
    "name": "[04/13] net/idpf: add queue operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "6fec546d023159ce9704d5fba23ca6524552319d",
    "submitter": {
        "id": 1785,
        "url": "http://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220803113104.1184059-5-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 24188,
            "url": "http://patches.dpdk.org/api/series/24188/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24188",
            "date": "2022-08-03T11:30:51",
            "name": "add support for idpf PMD in DPDK",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/24188/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/114570/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/114570/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 24B7BA00C5;\n\tWed,  3 Aug 2022 13:31:55 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D025D42BB8;\n\tWed,  3 Aug 2022 13:31:29 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 8C91342BA5\n for <dev@dpdk.org>; Wed,  3 Aug 2022 13:31:26 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Aug 2022 04:31:26 -0700",
            "from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246])\n by FMSMGA003.fm.intel.com with ESMTP; 03 Aug 2022 04:31:23 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1659526286; x=1691062286;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=9dtOetsT3t99jwav/qPq51qr8owYpKUUYjS2LAolh+s=;\n b=OXxZuolWk3pfvUrRl4Cccxkl/I8j18dsLJy5Ug0J8P1vP8R6UnmIiibR\n ZTFBPbHV2wO2pT5FjR9zNpS6T+d9bdySgRpGCM7HVQ8kJTlWHejbVlIxW\n ytMf1qvidSaEO4cskdJqP+TNAQxKzow2GH6di8rT2DFhgxTZtv96GAdx9\n Pgd5Y48tSEiJVEsc4RownhlhNCU/gO8Z6AD7nfRARJ8F1QiAochmL2p+e\n 5L6lOiAy/gTQXkJixQWJwG6SEq3BaLlAMJ75XK7CDvOQAESz77qShUccL\n JJ/rj+mR/UI28Qzj/KkdbfiYkBMe37yNOL4m1AyVljQdqnPgOl1KMUzU7 g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10427\"; a=\"375948508\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"375948508\"",
            "E=Sophos;i=\"5.93,214,1654585200\"; d=\"scan'208\";a=\"692211056\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com,\n\tjingjing.wu@intel.com,\n\tbeilei.xing@intel.com",
        "Cc": "dev@dpdk.org,\n\tjunfeng.guo@intel.com,\n\tXiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH 04/13] net/idpf: add queue operations",
        "Date": "Wed,  3 Aug 2022 19:30:55 +0800",
        "Message-Id": "<20220803113104.1184059-5-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "References": "<20220803113104.1184059-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for queue operations:\n\t- rx_queue_start\n\t- rx_queue_stop\n\t- tx_queue_start\n\t- tx_queue_stop\n\t- rx_queue_setup\n\t- rx_queue_release\n\t- tx_queue_setup\n\t- tx_queue_release\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |   53 +-\n drivers/net/idpf/idpf_ethdev.h |    7 +\n drivers/net/idpf/idpf_rxtx.c   | 1264 ++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h   |  182 +++++\n drivers/net/idpf/idpf_vchnl.c  |  503 +++++++++++++\n drivers/net/idpf/meson.build   |    1 +\n 6 files changed, 2009 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/idpf/idpf_rxtx.c\n create mode 100644 drivers/net/idpf/idpf_rxtx.h",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 87c68226dd..b302e42a9c 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -12,6 +12,7 @@\n #include <rte_dev.h>\n \n #include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n \n #define REPRESENTOR\t\t\"representor\"\n \n@@ -33,10 +34,18 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {\n \t.dev_start\t\t\t= idpf_dev_start,\n \t.dev_stop\t\t\t= idpf_dev_stop,\n \t.dev_close\t\t\t= idpf_dev_close,\n+\t.rx_queue_start\t\t\t= idpf_rx_queue_start,\n+\t.rx_queue_stop\t\t\t= idpf_rx_queue_stop,\n+\t.tx_queue_start\t\t\t= idpf_tx_queue_start,\n+\t.tx_queue_stop\t\t\t= idpf_tx_queue_stop,\n+\t.rx_queue_setup\t\t\t= idpf_rx_queue_setup,\n+\t.rx_queue_release\t\t= idpf_dev_rx_queue_release,\n+\t.tx_queue_setup\t\t\t= idpf_tx_queue_setup,\n+\t.tx_queue_release\t\t= idpf_dev_tx_queue_release,\n };\n \n static int\n-idpf_init_vport_req_info(struct rte_eth_dev *dev)\n+idpf_init_vport_req_info(__rte_unused struct rte_eth_dev *dev)\n {\n \tstruct virtchnl2_create_vport *vport_info;\n \tuint16_t idx = adapter->next_vport_idx;\n@@ -193,6 +202,39 @@ idpf_dev_configure(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n+static int\n+idpf_start_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_tx_queue *txq;\n+\tint err = 0;\n+\tint i;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (!txq || txq->tx_deferred_start)\n+\t\t\tcontinue;\n+\t\terr = idpf_tx_queue_start(dev, i);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start Tx queue %u\", i);\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (!rxq || rxq->rx_deferred_start)\n+\t\t\tcontinue;\n+\t\terr = idpf_rx_queue_start(dev, i);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start Rx queue %u\", i);\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n static int\n idpf_dev_start(struct rte_eth_dev *dev)\n {\n@@ -203,6 +245,11 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \n \tvport->stopped = 0;\n \n+\tif (idpf_start_queues(dev)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start queues\");\n+\t\tgoto err_mtu;\n+\t}\n+\n \tif (idpf_ena_dis_vport(vport, true)) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to enable vport\");\n \t\tgoto err_vport;\n@@ -211,6 +258,8 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \treturn 0;\n \n err_vport:\n+\tidpf_stop_queues(dev);\n+err_mtu:\n \treturn -1;\n }\n \n@@ -228,6 +277,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)\n \tif (idpf_ena_dis_vport(vport, false))\n \t\tPMD_DRV_LOG(ERR, \"disable vport failed\");\n \n+\tidpf_stop_queues(dev);\n+\n \tvport->stopped = 1;\n \tdev->data->dev_started = 0;\n \ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex 501f772fa8..25e0c5cae7 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -200,6 +200,13 @@ int idpf_check_api_version(struct idpf_adapter *adapter);\n int idpf_get_caps(struct idpf_adapter *adapter);\n int idpf_create_vport(__rte_unused struct rte_eth_dev *dev);\n int idpf_destroy_vport(struct idpf_vport *vport);\n+int idpf_config_rxqs(struct idpf_vport *vport);\n+int idpf_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);\n+int idpf_config_txqs(struct idpf_vport *vport);\n+int idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id);\n+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t      bool rx, bool on);\n+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);\n int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);\n \n #endif /* _IDPF_ETHDEV_H_ */\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nnew file mode 100644\nindex 0000000000..7d5428b750\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -0,0 +1,1264 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <ethdev_driver.h>\n+#include <rte_net.h>\n+\n+#include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n+\n+static inline int\n+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+{\n+\t/* The following constraints must be satisfied:\n+\t *   thresh < rxq->nb_rx_desc\n+\t */\n+\tif (thresh >= nb_desc) {\n+\t\tPMD_INIT_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n+\t\t\t     thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\tuint16_t tx_free_thresh)\n+{\n+\t/* TX descriptors will have their RS bit set after tx_rs_thresh\n+\t * descriptors have been used. The TX descriptor ring will be cleaned\n+\t * after tx_free_thresh descriptors are used or if the number of\n+\t * descriptors required to transmit a packet is greater than the\n+\t * number of free TX descriptors.\n+\t *\n+\t * The following constraints must be satisfied:\n+\t *  - tx_rs_thresh must be less than the size of the ring minus 2.\n+\t *  - tx_free_thresh must be less than the size of the ring minus 3.\n+\t *  - tx_rs_thresh must be less than or equal to tx_free_thresh.\n+\t *  - tx_rs_thresh must be a divisor of the ring size.\n+\t *\n+\t * One descriptor in the TX ring is used as a sentinel to avoid a H/W\n+\t * race condition, hence the maximum threshold constraints. When set\n+\t * to zero use default values.\n+\t */\n+\tif (tx_rs_thresh >= (nb_desc - 2)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than the \"\n+\t\t\t     \"number of TX descriptors (%u) minus 2\",\n+\t\t\t     tx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_free_thresh >= (nb_desc - 3)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_free_thresh (%u) must be less than the \"\n+\t\t\t     \"number of TX descriptors (%u) minus 3.\",\n+\t\t\t     tx_free_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_rs_thresh > tx_free_thresh) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than or \"\n+\t\t\t     \"equal to tx_free_thresh (%u).\",\n+\t\t\t     tx_rs_thresh, tx_free_thresh);\n+\t\treturn -EINVAL;\n+\t}\n+\tif ((nb_desc % tx_rs_thresh) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be a divisor of the \"\n+\t\t\t     \"number of TX descriptors (%u).\",\n+\t\t\t     tx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+release_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tif (!rxq->sw_ring)\n+\t\treturn;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->sw_ring[i]) {\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i]);\n+\t\t\trxq->sw_ring[i] = NULL;\n+\t\t}\n+\t}\n+}\n+\n+static inline void\n+release_txq_mbufs(struct idpf_tx_queue *txq)\n+{\n+\tuint16_t nb_desc, i;\n+\n+\tif (!txq || !txq->sw_ring) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to rxq or sw_ring is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tif (txq->sw_nb_desc) {\n+\t\t/* For split queue model, descriptor ring */\n+\t\tnb_desc = txq->sw_nb_desc;\n+\t} else {\n+\t\t/* For single queue model */\n+\t\tnb_desc = txq->nb_tx_desc;\n+\t}\n+\tfor (i = 0; i < nb_desc; i++) {\n+\t\tif (txq->sw_ring[i].mbuf) {\n+\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t}\n+\t}\n+}\n+\n+static const struct idpf_rxq_ops def_rxq_ops = {\n+\t.release_mbufs = release_rxq_mbufs,\n+};\n+\n+static const struct idpf_txq_ops def_txq_ops = {\n+\t.release_mbufs = release_txq_mbufs,\n+};\n+\n+static void\n+idpf_rx_queue_release(void *rxq)\n+{\n+\tstruct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\t/* Split queue */\n+\tif (q->bufq1 && q->bufq2) {\n+\t\tq->bufq1->ops->release_mbufs(q->bufq1);\n+\t\trte_free(q->bufq1->sw_ring);\n+\t\trte_memzone_free(q->bufq1->mz);\n+\t\trte_free(q->bufq1);\n+\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n+\t\trte_free(q->bufq2->sw_ring);\n+\t\trte_memzone_free(q->bufq2->mz);\n+\t\trte_free(q->bufq2);\n+\t\trte_memzone_free(q->mz);\n+\t\trte_free(q);\n+\t\treturn;\n+\t}\n+\n+\t/* Single queue */\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+static void\n+idpf_tx_queue_release(void *txq)\n+{\n+\tstruct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\tif (q->complq)\n+\t\trte_free(q->complq);\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+static inline void\n+reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\trxq->rx_tail = 0;\n+\trxq->expected_gen_id = 1;\n+}\n+\n+static inline void\n+reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\t/* The next descriptor id which can be received. */\n+\trxq->rx_next_avail = 0;\n+\n+\t/* The next descriptor id which can be refilled. */\n+\trxq->rx_tail = 0;\n+\t/* The number of descriptors which can be refilled. */\n+\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n+\n+\trxq->bufq1 = NULL;\n+\trxq->bufq2 = NULL;\n+}\n+\n+static inline void\n+reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\treset_split_rx_descq(rxq);\n+\treset_split_rx_bufq(rxq->bufq1);\n+\treset_split_rx_bufq(rxq->bufq2);\n+}\n+\n+static inline void\n+reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\n+\tif (rxq->pkt_first_seg != NULL)\n+\t\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+\trxq->rxrearm_start = 0;\n+\trxq->rxrearm_nb = 0;\n+}\n+\n+static inline void\n+reset_split_tx_descq(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->desc_ring)[i] = 0;\n+\n+\ttxe = txq->sw_ring;\n+\tprev = (uint16_t)(txq->sw_nb_desc - 1);\n+\tfor (i = 0; i < txq->sw_nb_desc; i++) {\n+\t\ttxe[i].mbuf = NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\t/* Use this as next to clean for split desc queue */\n+\ttxq->last_desc_cleaned = 0;\n+\ttxq->sw_tail = 0;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+}\n+\n+static inline void\n+reset_split_tx_complq(struct idpf_tx_queue *cq)\n+{\n+\tuint32_t i, size;\n+\n+\tif (!cq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to complq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)cq->compl_ring)[i] = 0;\n+\n+\tcq->tx_tail = 0;\n+\tcq->expected_gen_id = 1;\n+}\n+\n+static inline void\n+reset_single_tx_queue(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\ttxe = txq->sw_ring;\n+\tsize = sizeof(struct iecm_base_tx_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_ring)[i] = 0;\n+\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\ttxq->tx_ring[i].qw1 =\n+\t\t\trte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxe[i].mbuf =  NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+\n+\ttxq->next_dd = txq->rs_thresh - 1;\n+\ttxq->next_rs = txq->rs_thresh - 1;\n+}\n+\n+static int\n+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+\t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->rx_free_thresh = rx_free_thresh;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\tbufq->rx_hdr_len = 0;\n+\tbufq->adapter = adapter;\n+\n+\tif (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)\n+\t\tbufq->crc_len = RTE_ETHER_CRC_LEN;\n+\telse\n+\t\tbufq->crc_len = 0;\n+\n+\tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n+\tbufq->rx_buf_len = len;\n+\n+\t/* Allocate the software ring. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tbufq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rx bufq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_buf_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(bufq->sw_ring);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\n+\tbufq->mz = mz;\n+\treset_split_rx_bufq(bufq);\n+\tbufq->q_set = true;\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\tbufq->ops = &def_rxq_ops;\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_rxconf *rx_conf,\n+\t\t\t  struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_rx_queue *bufq1, *bufq2;\n+\tconst struct rte_memzone *mz;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint16_t qid;\n+\tuint16_t len;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\n+\tif (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n+\telse\n+\t\trxq->crc_len = 0;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_cpmpl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_rxq;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_split_rx_descq(rxq);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\t/* setup Rx buffer queue */\n+\tbufq1 = rte_zmalloc_socket(\"idpf bufq1\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq1) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 1.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_mz;\n+\t}\n+\tqid = 2 * queue_idx;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq1;\n+\t}\n+\trxq->bufq1 = bufq1;\n+\n+\tbufq2 = rte_zmalloc_socket(\"idpf bufq2\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq2) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 2.\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -ENOMEM;\n+\t\tgoto free_bufq1;\n+\t}\n+\tqid = 2 * queue_idx + 1;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq2;\n+\t}\n+\trxq->bufq2 = bufq2;\n+\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn 0;\n+\n+free_bufq2:\n+\trte_free(bufq2);\n+free_bufq1:\n+\trte_free(bufq1);\n+free_mz:\n+\trte_memzone_free(mz);\n+free_rxq:\n+\trte_free(rxq);\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tstruct idpf_rx_queue *rxq;\n+\tconst struct rte_memzone *mz;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\n+\tif (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n+\telse\n+\t\trxq->crc_len = 0;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\trxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rxq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!rxq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_single_rx_queue(rxq);\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\trxq->ops = &def_rxq_ops;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, rx_conf, mp);\n+\telse\n+\t\treturn idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, rx_conf, mp);\n+}\n+\n+static int\n+idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tstruct idpf_tx_queue *txq, *cq;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?\n+\t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n+\t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n+\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"idpf split txq\",\n+\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->rs_thresh = tx_rs_thresh;\n+\ttxq->free_thresh = tx_free_thresh;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_nb_desc = 2 * nb_desc;\n+\ttxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf split tx sw ring\",\n+\t\t\t\t   sizeof(struct idpf_tx_entry) *\n+\t\t\t\t   txq->sw_nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"split_tx_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->desc_ring = (struct iecm_flex_tx_sched_desc *)mz->addr;\n+\n+\ttxq->mz = mz;\n+\treset_split_tx_descq(txq);\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->ops = &def_txq_ops;\n+\n+\t/* Allocate the TX completion queue data structure. */\n+\ttxq->complq = rte_zmalloc_socket(\"idpf splitq cq\",\n+\t\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t socket_id);\n+\tcq = txq->complq;\n+\tif (!cq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tcq->nb_tx_desc = 2 * nb_desc;\n+\tcq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;\n+\tcq->port_id = dev->data->port_id;\n+\tcq->txqs = dev->data->tx_queues;\n+\tcq->tx_start_qid = vport->chunks_info.tx_start_qid;\n+\n+\tring_size = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"tx_split_compl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX completion queue\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\tcq->tx_ring_phys_addr = mz->iova;\n+\tcq->compl_ring = (struct iecm_splitq_tx_compl_desc *)mz->addr;\n+\tcq->mz = mz;\n+\treset_split_tx_complq(cq);\n+\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tstruct idpf_tx_queue *txq;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?\n+\t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n+\t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n+\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"idpf txq\",\n+\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* TODO: vlan offload */\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->rs_thresh = tx_rs_thresh;\n+\ttxq->free_thresh = tx_free_thresh;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf tx sw ring\",\n+\t\t\t\t   sizeof(struct idpf_tx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct iecm_base_tx_desc) * nb_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->tx_ring = (struct iecm_base_tx_desc *)mz->addr;\n+\n+\ttxq->mz = mz;\n+\treset_single_tx_queue(txq);\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->ops = &def_txq_ops;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, tx_conf);\n+\telse\n+\t\treturn idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, tx_conf);\n+}\n+\n+static int\n+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_singleq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+#ifndef RTE_LIBRTE_IDPF_16BYTE_RX_DESC\n+\t\trxd->rsvd1 = 0;\n+\t\trxd->rsvd2 = 0;\n+#endif\n+\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc - 1; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->qword0.buf_id = i;\n+\t\trxd->qword0.rsvd0 = 0;\n+\t\trxd->qword0.rsvd1 = 0;\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trxd->rsvd2 = 0;\n+\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\trxq->nb_rx_hold = 0;\n+\trxq->rx_tail = rxq->nb_rx_desc - 1;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tint err;\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\tif (!rxq->bufq1) {\n+\t\t/* Single queue */\n+\t\terr = idpf_alloc_single_rxq_mbufs(rxq);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\n+\t\trte_wmb();\n+\n+\t\t/* Init the RX tail register. */\n+\t\tIECM_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\t} else {\n+\t\t/* Split queue */\n+\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq1);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq2);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\n+\t\trte_wmb();\n+\n+\t\t/* Init the RX tail register. */\n+\t\tIECM_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->nb_rx_desc - 1);\n+\t\tIECM_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->nb_rx_desc - 1);\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct idpf_rx_queue *rxq =\n+\t\t(struct idpf_rx_queue *)dev->data->rx_queues[rx_queue_id];\n+\tint err = 0;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\terr = idpf_config_rxq(vport, rx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Rx queue %u\", rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\terr = idpf_rx_queue_init(dev, rx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init RX queue %u\",\n+\t\t\t    rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\t/* Ready to switch the queue on */\n+\terr = idpf_switch_queue(vport, rx_queue_id, true, true);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\t} else {\n+\t\trxq->q_started = true;\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_tx_queue *txq;\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\n+\t/* Init the RX tail register. */\n+\tIECM_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct idpf_tx_queue *txq =\n+\t\t(struct idpf_tx_queue *)dev->data->tx_queues[tx_queue_id];\n+\tint err = 0;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\terr = idpf_config_txq(vport, tx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Tx queue %u\", tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\terr = idpf_tx_queue_init(dev, tx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init TX queue %u\",\n+\t\t\t    tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\t/* Ready to switch the queue on */\n+\terr = idpf_switch_queue(vport, tx_queue_id, false, true);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n+\t\t\t    tx_queue_id);\n+\t} else {\n+\t\ttxq->q_started = true;\n+\t\tdev->data->tx_queue_state[tx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct idpf_rx_queue *rxq;\n+\tint err;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = idpf_switch_queue(vport, rx_queue_id, true, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n+\t\t\t    rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\trxq->ops->release_mbufs(rxq);\n+\t\treset_single_rx_queue(rxq);\n+\t} else {\n+\t\trxq->bufq1->ops->release_mbufs(rxq->bufq1);\n+\t\trxq->bufq2->ops->release_mbufs(rxq->bufq2);\n+\t\treset_split_rx_queue(rxq);\n+\t}\n+\tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_vport *vport =\n+\t\t(struct idpf_vport *)dev->data->dev_private;\n+\tstruct idpf_tx_queue *txq;\n+\tint err;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = idpf_switch_queue(vport, tx_queue_id, false, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u off\",\n+\t\t\t    tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\ttxq->ops->release_mbufs(txq);\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\treset_single_tx_queue(txq);\n+\t} else {\n+\t\treset_split_tx_descq(txq);\n+\t\treset_split_tx_complq(txq->complq);\n+\t}\n+\tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+void\n+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tidpf_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+void\n+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tidpf_tx_queue_release(dev->data->tx_queues[qid]);\n+}\n+\n+void\n+idpf_stop_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_tx_queue *txq;\n+\tint i;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (!rxq)\n+\t\t\tcontinue;\n+\n+\t\tif (idpf_rx_queue_stop(dev, i))\n+\t\t\tPMD_DRV_LOG(WARNING, \"Fail to stop Rx queue %d\", i);\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (!txq)\n+\t\t\tcontinue;\n+\n+\t\tif (idpf_tx_queue_stop(dev, i))\n+\t\t\tPMD_DRV_LOG(WARNING, \"Fail to stop Tx queue %d\", i);\n+\t}\n+}\n+\n+\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nnew file mode 100644\nindex 0000000000..3997082b21\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -0,0 +1,182 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_RXTX_H_\n+#define _IDPF_RXTX_H_\n+\n+#include \"base/iecm_osdep.h\"\n+#include \"base/iecm_type.h\"\n+#include \"base/iecm_devids.h\"\n+#include \"base/iecm_lan_txrx.h\"\n+#include \"base/iecm_lan_pf_regs.h\"\n+#include \"base/virtchnl.h\"\n+#include \"base/virtchnl2.h\"\n+#include \"base/virtchnl2_lan_desc.h\"\n+\n+/* In QLEN must be whole number of 32 descriptors. */\n+#define IDPF_ALIGN_RING_DESC\t32\n+#define IDPF_MIN_RING_DESC\t32\n+#define IDPF_MAX_RING_DESC\t4096\n+#define IDPF_DMA_MEM_ALIGN\t4096\n+/* Base address of the HW descriptor ring should be 128B aligned. */\n+#define IDPF_RING_BASE_ALIGN\t128\n+\n+/* used for Rx Bulk Allocate */\n+#define IDPF_RX_MAX_BURST\t32\n+#define IDPF_TX_MAX_BURST\t32\n+\n+#define IDPF_DEFAULT_RX_FREE_THRESH\t32\n+\n+/* used for Vector PMD */\n+#define IDPF_VPMD_RX_MAX_BURST\t32\n+#define IDPF_VPMD_TX_MAX_BURST\t32\n+#define IDPF_VPMD_DESCS_PER_LOOP\t4\n+#define IDPF_RXQ_REARM_THRESH\t64\n+\n+#define IDPF_DEFAULT_TX_RS_THRESH\t32\n+#define IDPF_DEFAULT_TX_FREE_THRESH\t32\n+\n+#define IDPF_MIN_TSO_MSS\t256\n+#define IDPF_MAX_TSO_MSS\t9668\n+#define IDPF_TSO_MAX_SEG\tUINT8_MAX\n+#define IDPF_TX_MAX_MTU_SEG     8\n+\n+struct idpf_rx_queue {\n+\tstruct idpf_adapter *adapter;\t/* the adapter this queue belongs to */\n+\tstruct rte_mempool *mp;\t\t/* mbuf pool to populate Rx ring */\n+\tconst struct rte_memzone *mz;\t/* memzone for Rx ring */\n+\tvolatile void *rx_ring;\n+\tstruct rte_mbuf **sw_ring;\t/* address of SW ring */\n+\tuint64_t rx_ring_phys_addr;\t/* Rx ring DMA address */\n+\n+\tuint16_t nb_rx_desc;\t\t/* ring length */\n+\tuint16_t rx_tail;\t\t/* current value of tail */\n+\tvolatile uint8_t *qrx_tail;\t/* register address of tail */\n+\tuint16_t rx_free_thresh;\t/* max free RX desc to hold */\n+\tuint16_t nb_rx_hold;\t\t/* number of held free RX desc */\n+\tstruct rte_mbuf *pkt_first_seg;\t/* first segment of current packet */\n+\tstruct rte_mbuf *pkt_last_seg;\t/* last segment of current packet */\n+\tstruct rte_mbuf fake_mbuf;\t/* dummy mbuf */\n+\n+\t/* used for VPMD */\n+\tuint16_t rxrearm_nb;       /* number of remaining to be re-armed */\n+\tuint16_t rxrearm_start;    /* the idx we start the re-arming from */\n+\tuint64_t mbuf_initializer; /* value to init mbufs */\n+\n+\t/* for rx bulk */\n+\tuint16_t rx_nb_avail;      /* number of staged packets ready */\n+\tuint16_t rx_next_avail;    /* index of next staged packets */\n+\tuint16_t rx_free_trigger;  /* triggers rx buffer allocation */\n+\tstruct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */\n+\n+\tuint16_t port_id;\t/* device port ID */\n+\tuint16_t queue_id;      /* Rx queue index */\n+\tuint16_t rx_buf_len;    /* The packet buffer size */\n+\tuint16_t rx_hdr_len;    /* The header buffer size */\n+\tuint16_t max_pkt_len;   /* Maximum packet length */\n+\tuint8_t crc_len;\t/* 0 if CRC stripped, 4 otherwise */\n+\tuint8_t rxdid;\n+\n+\tbool q_set;\t\t/* if rx queue has been configured */\n+\tbool q_started;\t\t/* if rx queue has been started */\n+\tbool rx_deferred_start;\t/* don't start this queue in dev start */\n+\tconst struct idpf_rxq_ops *ops;\n+\n+\t/* only valid for split queue mode */\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_rx_queue *bufq1;\n+\tstruct idpf_rx_queue *bufq2;\n+};\n+\n+struct idpf_tx_entry {\n+\tstruct rte_mbuf *mbuf;\n+\tuint16_t next_id;\n+\tuint16_t last_id;\n+};\n+\n+/* Structure associated with each TX queue. */\n+struct idpf_tx_queue {\n+\tconst struct rte_memzone *mz;\t\t/* memzone for Tx ring */\n+\tvolatile struct iecm_base_tx_desc *tx_ring;\t/* Tx ring virtual address */\n+\tvolatile union {\n+\t\tstruct iecm_flex_tx_sched_desc *desc_ring;\n+\t\tstruct iecm_splitq_tx_compl_desc *compl_ring;\n+\t};\n+\tuint64_t tx_ring_phys_addr;\t\t/* Tx ring DMA address */\n+\tstruct idpf_tx_entry *sw_ring;\t\t/* address array of SW ring */\n+\n+\tuint16_t nb_tx_desc;\t\t/* ring length */\n+\tuint16_t tx_tail;\t\t/* current value of tail */\n+\tvolatile uint8_t *qtx_tail;\t/* register address of tail */\n+\t/* number of used desc since RS bit set */\n+\tuint16_t nb_used;\n+\tuint16_t nb_free;\n+\tuint16_t last_desc_cleaned;\t/* last desc have been cleaned*/\n+\tuint16_t free_thresh;\n+\tuint16_t rs_thresh;\n+\n+\tuint16_t port_id;\n+\tuint16_t queue_id;\n+\tuint64_t offloads;\n+\tuint16_t next_dd;\t/* next to set RS, for VPMD */\n+\tuint16_t next_rs;\t/* next to check DD,  for VPMD */\n+\n+\tbool q_set;\t\t/* if tx queue has been configured */\n+\tbool q_started;\t\t/* if tx queue has been started */\n+\tbool tx_deferred_start;\t/* don't start this queue in dev start */\n+\tconst struct idpf_txq_ops *ops;\n+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1       BIT(0)\n+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2       BIT(1)\n+\tuint8_t vlan_flag;\n+\n+\t/* only valid for split queue mode */\n+\tuint16_t sw_nb_desc;\n+\tuint16_t sw_tail;\n+\tvoid **txqs;\n+\tuint32_t tx_start_qid;\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_tx_queue *complq;\n+};\n+\n+/* Offload features */\n+union idpf_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l2_len:7; /* L2 (MAC) Header Length. */\n+\t\tuint64_t l3_len:9; /* L3 (IP) Header Length. */\n+\t\tuint64_t l4_len:8; /* L4 Header Length. */\n+\t\tuint64_t tso_segsz:16; /* TCP TSO segment size */\n+\t\t/* uint64_t unused : 24; */\n+\t};\n+};\n+\n+struct idpf_rxq_ops {\n+\tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n+};\n+\n+struct idpf_txq_ops {\n+\tvoid (*release_mbufs)(struct idpf_tx_queue *txq);\n+};\n+\n+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+\n+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_txconf *tx_conf);\n+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+\n+void idpf_stop_queues(struct rte_eth_dev *dev);\n+\n+#endif /* _IDPF_RXTX_H_ */\n+\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex 4fc15d5b71..d78903d983 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -21,6 +21,7 @@\n #include <rte_dev.h>\n \n #include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n \n #include \"base/iecm_prototype.h\"\n \n@@ -450,6 +451,508 @@ idpf_destroy_vport(struct idpf_vport *vport)\n \treturn err;\n }\n \n+#define IDPF_RX_BUF_STRIDE\t\t64\n+int\n+idpf_config_rxqs(struct idpf_vport *vport)\n+{\n+\tstruct idpf_rx_queue **rxq =\n+\t\t(struct idpf_rx_queue **)vport->dev_data->rx_queues;\n+\tstruct virtchnl2_config_rx_queues *vc_rxqs = NULL;\n+\tstruct virtchnl2_rxq_info *rxq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t total_qs, num_qs;\n+\tint size, err, i, j;\n+\tint k = 0;\n+\n+\ttotal_qs = vport->num_rx_q + vport->num_rx_bufq;\n+\twhile (total_qs) {\n+\t\tif (total_qs > adapter->max_rxq_per_msg) {\n+\t\t\tnum_qs = adapter->max_rxq_per_msg;\n+\t\t\ttotal_qs -= adapter->max_rxq_per_msg;\n+\t\t} else {\n+\t\t\tnum_qs = total_qs;\n+\t\t\ttotal_qs = 0;\n+\t\t}\n+\n+\t\tsize = sizeof(*vc_rxqs) + (num_qs - 1) *\n+\t\t\tsizeof(struct virtchnl2_rxq_info);\n+\t\tvc_rxqs = rte_zmalloc(\"cfg_rxqs\", size, 0);\n+\t\tif (vc_rxqs == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_rx_queues\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tvc_rxqs->vport_id = vport->vport_id;\n+\t\tvc_rxqs->num_qinfo = num_qs;\n+\t\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\t\tfor (i = 0; i < num_qs; i++, k++) {\n+\t\t\t\trxq_info = &vc_rxqs->qinfo[i];\n+\t\t\t\trxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;\n+\t\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\t\t\trxq_info->queue_id = rxq[k]->queue_id;\n+\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\t\t\trxq_info->data_buffer_size = rxq[k]->rx_buf_len;\n+\t\t\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;\n+\t\t\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\t\t\trxq_info->ring_len = rxq[k]->nb_rx_desc;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tfor (i = 0; i < num_qs / 3; i++, k++) {\n+\t\t\t\t/* Rx queue */\n+\t\t\t\trxq_info = &vc_rxqs->qinfo[i * 3];\n+\t\t\t\trxq_info->dma_ring_addr =\n+\t\t\t\t\trxq[k]->rx_ring_phys_addr;\n+\t\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\t\t\trxq_info->queue_id = rxq[k]->queue_id;\n+\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\trxq_info->data_buffer_size = rxq[k]->rx_buf_len;\n+\t\t\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\t\t\trxq_info->ring_len = rxq[k]->nb_rx_desc;\n+\t\t\t\trxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;\n+\t\t\t\trxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;\n+\t\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\n+\t\t\t\t/* Buffer queue */\n+\t\t\t\tfor (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {\n+\t\t\t\t\tstruct idpf_rx_queue *bufq = j == 1 ?\n+\t\t\t\t\t\trxq[k]->bufq1 : rxq[k]->bufq2;\n+\t\t\t\t\trxq_info = &vc_rxqs->qinfo[i * 3 + j];\n+\t\t\t\t\trxq_info->dma_ring_addr =\n+\t\t\t\t\t\tbufq->rx_ring_phys_addr;\n+\t\t\t\t\trxq_info->type =\n+\t\t\t\t\t\tVIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\t\t\t\trxq_info->queue_id = bufq->queue_id;\n+\t\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\t\trxq_info->data_buffer_size = bufq->rx_buf_len;\n+\t\t\t\t\trxq_info->desc_ids =\n+\t\t\t\t\t\tVIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\t\t\trxq_info->ring_len = bufq->nb_rx_desc;\n+\n+\t\t\t\t\trxq_info->buffer_notif_stride =\n+\t\t\t\t\t\tIDPF_RX_BUF_STRIDE;\n+\t\t\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tmemset(&args, 0, sizeof(args));\n+\t\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n+\t\targs.in_args = (uint8_t *)vc_rxqs;\n+\t\targs.in_args_size = size;\n+\t\targs.out_buffer = adapter->mbx_resp;\n+\t\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\t\terr = idpf_execute_vc_cmd(adapter, &args);\n+\t\trte_free(vc_rxqs);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)\n+{\n+\tstruct idpf_rx_queue **rxq =\n+\t\t(struct idpf_rx_queue **)vport->dev_data->rx_queues;\n+\tstruct virtchnl2_config_rx_queues *vc_rxqs = NULL;\n+\tstruct virtchnl2_rxq_info *rxq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err, i;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\tnum_qs = IDPF_RXQ_PER_GRP;\n+\telse\n+\t\tnum_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;\n+\n+\tsize = sizeof(*vc_rxqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_rxq_info);\n+\tvc_rxqs = rte_zmalloc(\"cfg_rxqs\", size, 0);\n+\tif (vc_rxqs == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_rx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_rxqs->vport_id = vport->vport_id;\n+\tvc_rxqs->num_qinfo = num_qs;\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\trxq_info = &vc_rxqs->qinfo[0];\n+\t\trxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;\n+\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\trxq_info->queue_id = rxq[rxq_id]->queue_id;\n+\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\trxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;\n+\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;\n+\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\trxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;\n+\t}  else {\n+\t\t/* Rx queue */\n+\t\trxq_info = &vc_rxqs->qinfo[0];\n+\t\trxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;\n+\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\trxq_info->queue_id = rxq[rxq_id]->queue_id;\n+\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\trxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;\n+\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\trxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;\n+\t\trxq_info->rx_bufq1_id = rxq[rxq_id]->bufq1->queue_id;\n+\t\trxq_info->rx_bufq2_id = rxq[rxq_id]->bufq2->queue_id;\n+\t\trxq_info->rx_buffer_low_watermark = 64;\n+\n+\t\t/* Buffer queue */\n+\t\tfor (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {\n+\t\t\tstruct idpf_rx_queue *bufq =\n+\t\t\t\ti == 1 ? rxq[rxq_id]->bufq1 : rxq[rxq_id]->bufq2;\n+\t\t\trxq_info = &vc_rxqs->qinfo[i];\n+\t\t\trxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;\n+\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\t\trxq_info->queue_id = bufq->queue_id;\n+\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\trxq_info->data_buffer_size = bufq->rx_buf_len;\n+\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\trxq_info->ring_len = bufq->nb_rx_desc;\n+\n+\t\t\trxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;\n+\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\t\t}\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_rxqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\trte_free(vc_rxqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES\");\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_config_txqs(struct idpf_vport *vport)\n+{\n+\tstruct idpf_tx_queue **txq =\n+\t\t(struct idpf_tx_queue **)vport->dev_data->tx_queues;\n+\tstruct virtchnl2_config_tx_queues *vc_txqs = NULL;\n+\tstruct virtchnl2_txq_info *txq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t total_qs, num_qs;\n+\tint size, err, i;\n+\tint k = 0;\n+\n+\ttotal_qs = vport->num_tx_q + vport->num_tx_complq;\n+\twhile (total_qs) {\n+\t\tif (total_qs > adapter->max_txq_per_msg) {\n+\t\t\tnum_qs = adapter->max_txq_per_msg;\n+\t\t\ttotal_qs -= adapter->max_txq_per_msg;\n+\t\t} else {\n+\t\t\tnum_qs = total_qs;\n+\t\t\ttotal_qs = 0;\n+\t\t}\n+\t\tsize = sizeof(*vc_txqs) + (num_qs - 1) *\n+\t\t\tsizeof(struct virtchnl2_txq_info);\n+\t\tvc_txqs = rte_zmalloc(\"cfg_txqs\", size, 0);\n+\t\tif (vc_txqs == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_tx_queues\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tvc_txqs->vport_id = vport->vport_id;\n+\t\tvc_txqs->num_qinfo = num_qs;\n+\t\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\t\tfor (i = 0; i < num_qs; i++, k++) {\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[i];\n+\t\t\t\ttxq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\t\t\ttxq_info->queue_id = txq[k]->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n+\t\t\t\ttxq_info->ring_len = txq[k]->nb_tx_desc;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tfor (i = 0; i < num_qs / 2; i++, k++) {\n+\t\t\t\t/* txq info */\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[2 * i];\n+\t\t\t\ttxq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\t\t\ttxq_info->queue_id = txq[k]->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\t\t\ttxq_info->ring_len = txq[k]->nb_tx_desc;\n+\t\t\t\ttxq_info->tx_compl_queue_id =\n+\t\t\t\t\ttxq[k]->complq->queue_id;\n+\t\t\t\ttxq_info->relative_queue_id = txq_info->queue_id;\n+\n+\t\t\t\t/* tx completion queue info */\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[2 * i + 1];\n+\t\t\t\ttxq_info->dma_ring_addr =\n+\t\t\t\t\ttxq[k]->complq->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\t\t\ttxq_info->queue_id = txq[k]->complq->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\t\t\ttxq_info->ring_len = txq[k]->complq->nb_tx_desc;\n+\t\t\t}\n+\t\t}\n+\n+\t\tmemset(&args, 0, sizeof(args));\n+\t\targs.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;\n+\t\targs.in_args = (uint8_t *)vc_txqs;\n+\t\targs.in_args_size = size;\n+\t\targs.out_buffer = adapter->mbx_resp;\n+\t\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\t\terr = idpf_execute_vc_cmd(adapter, &args);\n+\t\trte_free(vc_txqs);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id)\n+{\n+\tstruct idpf_tx_queue **txq =\n+\t\t(struct idpf_tx_queue **)vport->dev_data->tx_queues;\n+\tstruct virtchnl2_config_tx_queues *vc_txqs = NULL;\n+\tstruct virtchnl2_txq_info *txq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\tnum_qs = IDPF_TXQ_PER_GRP;\n+\telse\n+\t\tnum_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;\n+\n+\tsize = sizeof(*vc_txqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_txq_info);\n+\tvc_txqs = rte_zmalloc(\"cfg_txqs\", size, 0);\n+\tif (vc_txqs == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_tx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_txqs->vport_id = vport->vport_id;\n+\tvc_txqs->num_qinfo = num_qs;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\ttxq_info = &vc_txqs->qinfo[0];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\ttxq_info->queue_id = txq[txq_id]->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n+\t\ttxq_info->ring_len = txq[txq_id]->nb_tx_desc;\n+\t} else {\n+\t\t/* txq info */\n+\t\ttxq_info = &vc_txqs->qinfo[0];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\ttxq_info->queue_id = txq[txq_id]->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\ttxq_info->ring_len = txq[txq_id]->nb_tx_desc;\n+\t\ttxq_info->tx_compl_queue_id = txq[txq_id]->complq->queue_id;\n+\t\ttxq_info->relative_queue_id = txq_info->queue_id;\n+\n+\t\t/* tx completion queue info */\n+\t\ttxq_info = &vc_txqs->qinfo[1];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->complq->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\ttxq_info->queue_id = txq[txq_id]->complq->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\ttxq_info->ring_len = txq[txq_id]->complq->nb_tx_desc;\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_txqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\trte_free(vc_txqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES\");\n+\n+\treturn err;\n+}\n+\n+static int\n+idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t       uint32_t type, bool on)\n+{\n+\tstruct virtchnl2_del_ena_dis_queues *queue_select;\n+\tstruct virtchnl2_queue_chunk *queue_chunk;\n+\tstruct idpf_cmd_info args;\n+\tint err, len;\n+\n+\tlen = sizeof(struct virtchnl2_del_ena_dis_queues);\n+\tqueue_select = rte_zmalloc(\"queue_select\", len, 0);\n+\tif (!queue_select)\n+\t\treturn -ENOMEM;\n+\n+\tqueue_chunk = queue_select->chunks.chunks;\n+\tqueue_select->chunks.num_chunks = 1;\n+\tqueue_select->vport_id = vport->vport_id;\n+\n+\tqueue_chunk->type = type;\n+\tqueue_chunk->start_queue_id = qid;\n+\tqueue_chunk->num_queues = 1;\n+\n+\targs.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :\n+\t\tVIRTCHNL2_OP_DISABLE_QUEUES;\n+\targs.in_args = (u8 *)queue_select;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_QUEUES\",\n+\t\t\t    on ? \"ENABLE\" : \"DISABLE\");\n+\n+\trte_free(queue_select);\n+\treturn err;\n+}\n+\n+int\n+idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t bool rx, bool on)\n+{\n+\tuint32_t type;\n+\tint err, queue_id;\n+\n+\t/* switch txq/rxq */\n+\ttype = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;\n+\n+\tif (type == VIRTCHNL2_QUEUE_TYPE_RX)\n+\t\tqueue_id = vport->chunks_info.rx_start_qid + qid;\n+\telse\n+\t\tqueue_id = vport->chunks_info.tx_start_qid + qid;\n+\terr = idpf_ena_dis_one_queue(vport, queue_id, type, on);\n+\tif (err)\n+\t\treturn err;\n+\n+\t/* switch tx completion queue */\n+\tif (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\tqueue_id = vport->chunks_info.tx_compl_start_qid + qid;\n+\t\terr = idpf_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\t/* switch rx buffer queue */\n+\tif (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\tqueue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;\n+\t\terr = idpf_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t\tqueue_id++;\n+\t\terr = idpf_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+#define IDPF_RXTX_QUEUE_CHUNKS_NUM\t2\n+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable)\n+{\n+\tstruct virtchnl2_del_ena_dis_queues *queue_select;\n+\tstruct virtchnl2_queue_chunk *queue_chunk;\n+\tuint32_t type;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_chunks;\n+\tint err, len;\n+\n+\tnum_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)\n+\t\tnum_chunks++;\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)\n+\t\tnum_chunks++;\n+\n+\tlen = sizeof(struct virtchnl2_del_ena_dis_queues) +\n+\t\tsizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);\n+\tqueue_select = rte_zmalloc(\"queue_select\", len, 0);\n+\tif (queue_select == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tqueue_chunk = queue_select->chunks.chunks;\n+\tqueue_select->chunks.num_chunks = num_chunks;\n+\tqueue_select->vport_id = vport->vport_id;\n+\n+\ttype = VIRTCHNL_QUEUE_TYPE_RX;\n+\tqueue_chunk[type].type = type;\n+\tqueue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;\n+\tqueue_chunk[type].num_queues = vport->num_rx_q;\n+\n+\ttype = VIRTCHNL2_QUEUE_TYPE_TX;\n+\tqueue_chunk[type].type = type;\n+\tqueue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;\n+\tqueue_chunk[type].num_queues = vport->num_tx_q;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\tqueue_chunk[type].type = type;\n+\t\tqueue_chunk[type].start_queue_id =\n+\t\t\tvport->chunks_info.rx_buf_start_qid;\n+\t\tqueue_chunk[type].num_queues = vport->num_rx_bufq;\n+\t}\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\tqueue_chunk[type].type = type;\n+\t\tqueue_chunk[type].start_queue_id =\n+\t\t\tvport->chunks_info.tx_compl_start_qid;\n+\t\tqueue_chunk[type].num_queues = vport->num_tx_complq;\n+\t}\n+\n+\targs.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :\n+\t\tVIRTCHNL2_OP_DISABLE_QUEUES;\n+\targs.in_args = (u8 *)queue_select;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_QUEUES\",\n+\t\t\t    enable ? \"ENABLE\" : \"DISABLE\");\n+\n+\trte_free(queue_select);\n+\treturn err;\n+}\n+\n int\n idpf_ena_dis_vport(struct idpf_vport *vport, bool enable)\n {\ndiff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build\nindex 3a84162f93..fce53ef50c 100644\n--- a/drivers/net/idpf/meson.build\n+++ b/drivers/net/idpf/meson.build\n@@ -12,6 +12,7 @@ objs = [base_objs]\n \n sources = files(\n \t'idpf_ethdev.c',\n+\t'idpf_rxtx.c',\n \t'idpf_vchnl.c',\n )\n \n",
    "prefixes": [
        "04/13"
    ]
}