get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/122155/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 122155,
    "url": "http://patches.dpdk.org/api/patches/122155/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230117072626.93796-16-beilei.xing@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230117072626.93796-16-beilei.xing@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230117072626.93796-16-beilei.xing@intel.com",
    "date": "2023-01-17T07:26:22",
    "name": "[v3,12/15] common/idpf: add help functions for queue setup and release",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "470a056c66f085ea254184ffaece15e8621e2fd0",
    "submitter": {
        "id": 410,
        "url": "http://patches.dpdk.org/api/people/410/?format=api",
        "name": "Xing, Beilei",
        "email": "beilei.xing@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230117072626.93796-16-beilei.xing@intel.com/mbox/",
    "series": [
        {
            "id": 26568,
            "url": "http://patches.dpdk.org/api/series/26568/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=26568",
            "date": "2023-01-17T07:26:07",
            "name": "net/idpf: introduce idpf common modle",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/26568/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/122155/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/122155/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 38EFD423FA;\n\tTue, 17 Jan 2023 08:52:51 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B789D42D5B;\n\tTue, 17 Jan 2023 08:51:24 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 3170C42D77\n for <dev@dpdk.org>; Tue, 17 Jan 2023 08:51:22 +0100 (CET)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 16 Jan 2023 23:51:21 -0800",
            "from dpdk-beileix-3.sh.intel.com ([10.67.110.253])\n by fmsmga002.fm.intel.com with ESMTP; 16 Jan 2023 23:51:18 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1673941882; x=1705477882;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=U6k6sc38sDcgXxzF+XZ0x/sGS15wlOjhs5QaxUxrXF8=;\n b=gJFhDFRBDsRV1WjNOK970Ak7VmTjeglDTo+b8XBwoilL/pd7gjY9k7bg\n Wzcqy2ZTz4MkgVb34ESPjo1O8s4URBv47mwocyvkFEre4gwNQSk23NhJB\n tMhMmOGbohqIKFUm7y1F/kaSOW7QA5CecCqVQWnct2ZokLjRjlLQbjZNV\n gsaaIsU5r9+ftRGhlmzgclax3OSJyKy2M7X20PeomuGYlrv55SDuEkMrS\n 2tIkzWoerA0txTDEE+wdcsQ0LuZDQu+4cxrUYJTCYaXkVFqXChQQo5Urn\n 4N3MXAHA9mgBztzuutaCaMSSvAv/tjuooJ4zvhV8eymh+c5ho2MOONhVA A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10592\"; a=\"312497080\"",
            "E=Sophos;i=\"5.97,222,1669104000\"; d=\"scan'208\";a=\"312497080\"",
            "E=McAfee;i=\"6500,9779,10592\"; a=\"767174560\"",
            "E=Sophos;i=\"5.97,222,1669104000\"; d=\"scan'208\";a=\"767174560\""
        ],
        "X-ExtLoop1": "1",
        "From": "beilei.xing@intel.com",
        "To": "jingjing.wu@intel.com",
        "Cc": "dev@dpdk.org,\n\tqi.z.zhang@intel.com,\n\tBeilei Xing <beilei.xing@intel.com>",
        "Subject": "[PATCH v3 12/15] common/idpf: add help functions for queue setup and\n release",
        "Date": "Tue, 17 Jan 2023 07:26:22 +0000",
        "Message-Id": "<20230117072626.93796-16-beilei.xing@intel.com>",
        "X-Mailer": "git-send-email 2.26.2",
        "In-Reply-To": "<20230117072626.93796-1-beilei.xing@intel.com>",
        "References": "\n <https://patches.dpdk.org/project/dpdk/cover/20230106091627.13530-1-beilei.xing@intel.com/>\n <20230117072626.93796-1-beilei.xing@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nRefine rxq setup and txq setup.\nMove some help functions of queue setup and queue release\nto common module.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/common/idpf/idpf_common_rxtx.c  |  414 +++++++++\n drivers/common/idpf/idpf_common_rxtx.h  |   57 ++\n drivers/common/idpf/meson.build         |    1 +\n drivers/common/idpf/version.map         |   15 +\n drivers/net/idpf/idpf_rxtx.c            | 1051 ++++++-----------------\n drivers/net/idpf/idpf_rxtx.h            |    9 -\n drivers/net/idpf/idpf_rxtx_vec_avx512.c |    2 +-\n 7 files changed, 773 insertions(+), 776 deletions(-)\n create mode 100644 drivers/common/idpf/idpf_common_rxtx.c",
    "diff": "diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c\nnew file mode 100644\nindex 0000000000..eeeeedca88\n--- /dev/null\n+++ b/drivers/common/idpf/idpf_common_rxtx.c\n@@ -0,0 +1,414 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <rte_mbuf_dyn.h>\n+#include \"idpf_common_rxtx.h\"\n+\n+int\n+idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+{\n+\t/* The following constraints must be satisfied:\n+\t * thresh < rxq->nb_rx_desc\n+\t */\n+\tif (thresh >= nb_desc) {\n+\t\tDRV_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n+\t\t\tthresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\t     uint16_t tx_free_thresh)\n+{\n+\t/* TX descriptors will have their RS bit set after tx_rs_thresh\n+\t * descriptors have been used. The TX descriptor ring will be cleaned\n+\t * after tx_free_thresh descriptors are used or if the number of\n+\t * descriptors required to transmit a packet is greater than the\n+\t * number of free TX descriptors.\n+\t *\n+\t * The following constraints must be satisfied:\n+\t *  - tx_rs_thresh must be less than the size of the ring minus 2.\n+\t *  - tx_free_thresh must be less than the size of the ring minus 3.\n+\t *  - tx_rs_thresh must be less than or equal to tx_free_thresh.\n+\t *  - tx_rs_thresh must be a divisor of the ring size.\n+\t *\n+\t * One descriptor in the TX ring is used as a sentinel to avoid a H/W\n+\t * race condition, hence the maximum threshold constraints. When set\n+\t * to zero use default values.\n+\t */\n+\tif (tx_rs_thresh >= (nb_desc - 2)) {\n+\t\tDRV_LOG(ERR, \"tx_rs_thresh (%u) must be less than the \"\n+\t\t\t\"number of TX descriptors (%u) minus 2\",\n+\t\t\ttx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_free_thresh >= (nb_desc - 3)) {\n+\t\tDRV_LOG(ERR, \"tx_free_thresh (%u) must be less than the \"\n+\t\t\t\"number of TX descriptors (%u) minus 3.\",\n+\t\t\ttx_free_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_rs_thresh > tx_free_thresh) {\n+\t\tDRV_LOG(ERR, \"tx_rs_thresh (%u) must be less than or \"\n+\t\t\t\"equal to tx_free_thresh (%u).\",\n+\t\t\ttx_rs_thresh, tx_free_thresh);\n+\t\treturn -EINVAL;\n+\t}\n+\tif ((nb_desc % tx_rs_thresh) != 0) {\n+\t\tDRV_LOG(ERR, \"tx_rs_thresh (%u) must be a divisor of the \"\n+\t\t\t\"number of TX descriptors (%u).\",\n+\t\t\ttx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tif (rxq->sw_ring == NULL)\n+\t\treturn;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->sw_ring[i] != NULL) {\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i]);\n+\t\t\trxq->sw_ring[i] = NULL;\n+\t\t}\n+\t}\n+}\n+\n+void\n+idpf_release_txq_mbufs(struct idpf_tx_queue *txq)\n+{\n+\tuint16_t nb_desc, i;\n+\n+\tif (txq == NULL || txq->sw_ring == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Pointer to rxq or sw_ring is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tif (txq->sw_nb_desc != 0) {\n+\t\t/* For split queue model, descriptor ring */\n+\t\tnb_desc = txq->sw_nb_desc;\n+\t} else {\n+\t\t/* For single queue model */\n+\t\tnb_desc = txq->nb_tx_desc;\n+\t}\n+\tfor (i = 0; i < nb_desc; i++) {\n+\t\tif (txq->sw_ring[i].mbuf != NULL) {\n+\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t}\n+\t}\n+}\n+\n+void\n+idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\trxq->rx_tail = 0;\n+\trxq->expected_gen_id = 1;\n+}\n+\n+void\n+idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\t/* The next descriptor id which can be received. */\n+\trxq->rx_next_avail = 0;\n+\n+\t/* The next descriptor id which can be refilled. */\n+\trxq->rx_tail = 0;\n+\t/* The number of descriptors which can be refilled. */\n+\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n+\n+\trxq->bufq1 = NULL;\n+\trxq->bufq2 = NULL;\n+}\n+\n+void\n+idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tidpf_reset_split_rx_descq(rxq);\n+\tidpf_reset_split_rx_bufq(rxq->bufq1);\n+\tidpf_reset_split_rx_bufq(rxq->bufq2);\n+}\n+\n+void\n+idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\n+\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+\trxq->rxrearm_start = 0;\n+\trxq->rxrearm_nb = 0;\n+}\n+\n+void\n+idpf_reset_split_tx_descq(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (txq == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->desc_ring)[i] = 0;\n+\n+\ttxe = txq->sw_ring;\n+\tprev = (uint16_t)(txq->sw_nb_desc - 1);\n+\tfor (i = 0; i < txq->sw_nb_desc; i++) {\n+\t\ttxe[i].mbuf = NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\t/* Use this as next to clean for split desc queue */\n+\ttxq->last_desc_cleaned = 0;\n+\ttxq->sw_tail = 0;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+}\n+\n+void\n+idpf_reset_split_tx_complq(struct idpf_tx_queue *cq)\n+{\n+\tuint32_t i, size;\n+\n+\tif (cq == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Pointer to complq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)cq->compl_ring)[i] = 0;\n+\n+\tcq->tx_tail = 0;\n+\tcq->expected_gen_id = 1;\n+}\n+\n+void\n+idpf_reset_single_tx_queue(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (txq == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\ttxe = txq->sw_ring;\n+\tsize = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_ring)[i] = 0;\n+\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\ttxq->tx_ring[i].qw1.cmd_dtype =\n+\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxe[i].mbuf =  NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+\n+\ttxq->next_dd = txq->rs_thresh - 1;\n+\ttxq->next_rs = txq->rs_thresh - 1;\n+}\n+\n+void\n+idpf_rx_queue_release(void *rxq)\n+{\n+\tstruct idpf_rx_queue *q = rxq;\n+\n+\tif (q == NULL)\n+\t\treturn;\n+\n+\t/* Split queue */\n+\tif (q->bufq1 != NULL && q->bufq2 != NULL) {\n+\t\tq->bufq1->ops->release_mbufs(q->bufq1);\n+\t\trte_free(q->bufq1->sw_ring);\n+\t\trte_memzone_free(q->bufq1->mz);\n+\t\trte_free(q->bufq1);\n+\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n+\t\trte_free(q->bufq2->sw_ring);\n+\t\trte_memzone_free(q->bufq2->mz);\n+\t\trte_free(q->bufq2);\n+\t\trte_memzone_free(q->mz);\n+\t\trte_free(q);\n+\t\treturn;\n+\t}\n+\n+\t/* Single queue */\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+void\n+idpf_tx_queue_release(void *txq)\n+{\n+\tstruct idpf_tx_queue *q = txq;\n+\n+\tif (q == NULL)\n+\t\treturn;\n+\n+\tif (q->complq) {\n+\t\trte_memzone_free(q->complq->mz);\n+\t\trte_free(q->complq);\n+\t}\n+\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+int\n+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_singleq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(mbuf == NULL)) {\n+\t\t\tDRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trxd->rsvd1 = 0;\n+\t\trxd->rsvd2 = 0;\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(mbuf == NULL)) {\n+\t\t\tDRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->qword0.buf_id = i;\n+\t\trxd->qword0.rsvd0 = 0;\n+\t\trxd->qword0.rsvd1 = 0;\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trxd->rsvd2 = 0;\n+\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\trxq->nb_rx_hold = 0;\n+\trxq->rx_tail = rxq->nb_rx_desc - 1;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h\nindex a9ed31c08a..c5bb7d48af 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.h\n+++ b/drivers/common/idpf/idpf_common_rxtx.h\n@@ -5,11 +5,28 @@\n #ifndef _IDPF_COMMON_RXTX_H_\n #define _IDPF_COMMON_RXTX_H_\n \n+#include <rte_mbuf.h>\n #include <rte_mbuf_ptype.h>\n #include <rte_mbuf_core.h>\n \n #include \"idpf_common_device.h\"\n \n+#define IDPF_RX_MAX_BURST\t\t32\n+\n+#define IDPF_RX_OFFLOAD_IPV4_CKSUM\t\tRTE_BIT64(1)\n+#define IDPF_RX_OFFLOAD_UDP_CKSUM\t\tRTE_BIT64(2)\n+#define IDPF_RX_OFFLOAD_TCP_CKSUM\t\tRTE_BIT64(3)\n+#define IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM\tRTE_BIT64(6)\n+#define IDPF_RX_OFFLOAD_TIMESTAMP\t\tRTE_BIT64(14)\n+\n+#define IDPF_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)\n+#define IDPF_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)\n+#define IDPF_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)\n+#define IDPF_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)\n+#define IDPF_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)\n+#define IDPF_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)\n+#define IDPF_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)\n+\n struct idpf_rx_stats {\n \tuint64_t mbuf_alloc_failed;\n };\n@@ -109,4 +126,44 @@ struct idpf_tx_queue {\n \tstruct idpf_tx_queue *complq;\n };\n \n+struct idpf_rxq_ops {\n+\tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n+};\n+\n+struct idpf_txq_ops {\n+\tvoid (*release_mbufs)(struct idpf_tx_queue *txq);\n+};\n+\n+__rte_internal\n+int idpf_check_rx_thresh(uint16_t nb_desc, uint16_t thresh);\n+__rte_internal\n+int idpf_check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\t\t uint16_t tx_free_thresh);\n+__rte_internal\n+void idpf_release_rxq_mbufs(struct idpf_rx_queue *rxq);\n+__rte_internal\n+void idpf_release_txq_mbufs(struct idpf_tx_queue *txq);\n+__rte_internal\n+void idpf_reset_split_rx_descq(struct idpf_rx_queue *rxq);\n+__rte_internal\n+void idpf_reset_split_rx_bufq(struct idpf_rx_queue *rxq);\n+__rte_internal\n+void idpf_reset_split_rx_queue(struct idpf_rx_queue *rxq);\n+__rte_internal\n+void idpf_reset_single_rx_queue(struct idpf_rx_queue *rxq);\n+__rte_internal\n+void idpf_reset_split_tx_descq(struct idpf_tx_queue *txq);\n+__rte_internal\n+void idpf_reset_split_tx_complq(struct idpf_tx_queue *cq);\n+__rte_internal\n+void idpf_reset_single_tx_queue(struct idpf_tx_queue *txq);\n+__rte_internal\n+void idpf_rx_queue_release(void *rxq);\n+__rte_internal\n+void idpf_tx_queue_release(void *txq);\n+__rte_internal\n+int idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq);\n+__rte_internal\n+int idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq);\n+\n #endif /* _IDPF_COMMON_RXTX_H_ */\ndiff --git a/drivers/common/idpf/meson.build b/drivers/common/idpf/meson.build\nindex c6cc7a196b..5ee071fdb2 100644\n--- a/drivers/common/idpf/meson.build\n+++ b/drivers/common/idpf/meson.build\n@@ -5,6 +5,7 @@ deps += ['mbuf']\n \n sources = files(\n     'idpf_common_device.c',\n+    'idpf_common_rxtx.c',\n     'idpf_common_virtchnl.c',\n )\n \ndiff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map\nindex 19de5c8122..8d98635e46 100644\n--- a/drivers/common/idpf/version.map\n+++ b/drivers/common/idpf/version.map\n@@ -3,6 +3,10 @@ INTERNAL {\n \n \tidpf_adapter_deinit;\n \tidpf_adapter_init;\n+\tidpf_alloc_single_rxq_mbufs;\n+\tidpf_alloc_split_rxq_mbufs;\n+\tidpf_check_rx_thresh;\n+\tidpf_check_tx_thresh;\n \tidpf_config_irq_map;\n \tidpf_config_irq_unmap;\n \tidpf_config_rss;\n@@ -15,7 +19,18 @@ INTERNAL {\n \tidpf_ctlq_send;\n \tidpf_execute_vc_cmd;\n \tidpf_read_one_msg;\n+\tidpf_release_rxq_mbufs;\n+\tidpf_release_txq_mbufs;\n+\tidpf_reset_single_rx_queue;\n+\tidpf_reset_single_tx_queue;\n+\tidpf_reset_split_rx_bufq;\n+\tidpf_reset_split_rx_descq;\n+\tidpf_reset_split_rx_queue;\n+\tidpf_reset_split_tx_complq;\n+\tidpf_reset_split_tx_descq;\n+\tidpf_rx_queue_release;\n \tidpf_switch_queue;\n+\tidpf_tx_queue_release;\n \tidpf_vc_alloc_vectors;\n \tidpf_vc_check_api_version;\n \tidpf_vc_config_irq_map_unmap;\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex f0eff493f8..852076c235 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -12,358 +12,141 @@\n \n static int idpf_timestamp_dynfield_offset = -1;\n \n-static int\n-check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n-{\n-\t/* The following constraints must be satisfied:\n-\t *   thresh < rxq->nb_rx_desc\n-\t */\n-\tif (thresh >= nb_desc) {\n-\t\tPMD_INIT_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n-\t\t\t     thresh, nb_desc);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n-\t\tuint16_t tx_free_thresh)\n+static uint64_t\n+idpf_rx_offload_convert(uint64_t offload)\n {\n-\t/* TX descriptors will have their RS bit set after tx_rs_thresh\n-\t * descriptors have been used. The TX descriptor ring will be cleaned\n-\t * after tx_free_thresh descriptors are used or if the number of\n-\t * descriptors required to transmit a packet is greater than the\n-\t * number of free TX descriptors.\n-\t *\n-\t * The following constraints must be satisfied:\n-\t *  - tx_rs_thresh must be less than the size of the ring minus 2.\n-\t *  - tx_free_thresh must be less than the size of the ring minus 3.\n-\t *  - tx_rs_thresh must be less than or equal to tx_free_thresh.\n-\t *  - tx_rs_thresh must be a divisor of the ring size.\n-\t *\n-\t * One descriptor in the TX ring is used as a sentinel to avoid a H/W\n-\t * race condition, hence the maximum threshold constraints. When set\n-\t * to zero use default values.\n-\t */\n-\tif (tx_rs_thresh >= (nb_desc - 2)) {\n-\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than the \"\n-\t\t\t     \"number of TX descriptors (%u) minus 2\",\n-\t\t\t     tx_rs_thresh, nb_desc);\n-\t\treturn -EINVAL;\n-\t}\n-\tif (tx_free_thresh >= (nb_desc - 3)) {\n-\t\tPMD_INIT_LOG(ERR, \"tx_free_thresh (%u) must be less than the \"\n-\t\t\t     \"number of TX descriptors (%u) minus 3.\",\n-\t\t\t     tx_free_thresh, nb_desc);\n-\t\treturn -EINVAL;\n-\t}\n-\tif (tx_rs_thresh > tx_free_thresh) {\n-\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than or \"\n-\t\t\t     \"equal to tx_free_thresh (%u).\",\n-\t\t\t     tx_rs_thresh, tx_free_thresh);\n-\t\treturn -EINVAL;\n-\t}\n-\tif ((nb_desc % tx_rs_thresh) != 0) {\n-\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be a divisor of the \"\n-\t\t\t     \"number of TX descriptors (%u).\",\n-\t\t\t     tx_rs_thresh, nb_desc);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\treturn 0;\n+\tuint64_t ol = 0;\n+\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_UDP_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_TCP_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_TIMESTAMP;\n+\n+\treturn ol;\n }\n \n-static void\n-release_rxq_mbufs(struct idpf_rx_queue *rxq)\n+static uint64_t\n+idpf_tx_offload_convert(uint64_t offload)\n {\n-\tuint16_t i;\n-\n-\tif (rxq->sw_ring == NULL)\n-\t\treturn;\n-\n-\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n-\t\tif (rxq->sw_ring[i] != NULL) {\n-\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i]);\n-\t\t\trxq->sw_ring[i] = NULL;\n-\t\t}\n-\t}\n-}\n-\n-static void\n-release_txq_mbufs(struct idpf_tx_queue *txq)\n-{\n-\tuint16_t nb_desc, i;\n-\n-\tif (txq == NULL || txq->sw_ring == NULL) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Pointer to rxq or sw_ring is NULL\");\n-\t\treturn;\n-\t}\n-\n-\tif (txq->sw_nb_desc != 0) {\n-\t\t/* For split queue model, descriptor ring */\n-\t\tnb_desc = txq->sw_nb_desc;\n-\t} else {\n-\t\t/* For single queue model */\n-\t\tnb_desc = txq->nb_tx_desc;\n-\t}\n-\tfor (i = 0; i < nb_desc; i++) {\n-\t\tif (txq->sw_ring[i].mbuf != NULL) {\n-\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n-\t\t\ttxq->sw_ring[i].mbuf = NULL;\n-\t\t}\n-\t}\n+\tuint64_t ol = 0;\n+\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_UDP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_TCP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_SCTP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_MULTI_SEGS;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_MBUF_FAST_FREE;\n+\n+\treturn ol;\n }\n \n static const struct idpf_rxq_ops def_rxq_ops = {\n-\t.release_mbufs = release_rxq_mbufs,\n+\t.release_mbufs = idpf_release_rxq_mbufs,\n };\n \n static const struct idpf_txq_ops def_txq_ops = {\n-\t.release_mbufs = release_txq_mbufs,\n+\t.release_mbufs = idpf_release_txq_mbufs,\n };\n \n-static void\n-reset_split_rx_descq(struct idpf_rx_queue *rxq)\n-{\n-\tuint16_t len;\n-\tuint32_t i;\n-\n-\tif (rxq == NULL)\n-\t\treturn;\n-\n-\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n-\n-\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n-\t     i++)\n-\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n-\n-\trxq->rx_tail = 0;\n-\trxq->expected_gen_id = 1;\n-}\n-\n-static void\n-reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n-{\n-\tuint16_t len;\n-\tuint32_t i;\n-\n-\tif (rxq == NULL)\n-\t\treturn;\n-\n-\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n-\n-\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n-\t     i++)\n-\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n-\n-\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n-\n-\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n-\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n-\n-\t/* The next descriptor id which can be received. */\n-\trxq->rx_next_avail = 0;\n-\n-\t/* The next descriptor id which can be refilled. */\n-\trxq->rx_tail = 0;\n-\t/* The number of descriptors which can be refilled. */\n-\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n-\n-\trxq->bufq1 = NULL;\n-\trxq->bufq2 = NULL;\n-}\n-\n-static void\n-idpf_rx_queue_release(void *rxq)\n-{\n-\tstruct idpf_rx_queue *q = rxq;\n-\n-\tif (q == NULL)\n-\t\treturn;\n-\n-\t/* Split queue */\n-\tif (q->bufq1 != NULL && q->bufq2 != NULL) {\n-\t\tq->bufq1->ops->release_mbufs(q->bufq1);\n-\t\trte_free(q->bufq1->sw_ring);\n-\t\trte_memzone_free(q->bufq1->mz);\n-\t\trte_free(q->bufq1);\n-\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n-\t\trte_free(q->bufq2->sw_ring);\n-\t\trte_memzone_free(q->bufq2->mz);\n-\t\trte_free(q->bufq2);\n-\t\trte_memzone_free(q->mz);\n-\t\trte_free(q);\n-\t\treturn;\n-\t}\n-\n-\t/* Single queue */\n-\tq->ops->release_mbufs(q);\n-\trte_free(q->sw_ring);\n-\trte_memzone_free(q->mz);\n-\trte_free(q);\n-}\n-\n-static void\n-idpf_tx_queue_release(void *txq)\n-{\n-\tstruct idpf_tx_queue *q = txq;\n-\n-\tif (q == NULL)\n-\t\treturn;\n-\n-\tif (q->complq) {\n-\t\trte_memzone_free(q->complq->mz);\n-\t\trte_free(q->complq);\n-\t}\n-\n-\tq->ops->release_mbufs(q);\n-\trte_free(q->sw_ring);\n-\trte_memzone_free(q->mz);\n-\trte_free(q);\n-}\n-\n-static inline void\n-reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+static const struct rte_memzone *\n+idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t      uint16_t len, uint16_t queue_type,\n+\t\t      unsigned int socket_id, bool splitq)\n {\n-\treset_split_rx_descq(rxq);\n-\treset_split_rx_bufq(rxq->bufq1);\n-\treset_split_rx_bufq(rxq->bufq2);\n-}\n-\n-static void\n-reset_single_rx_queue(struct idpf_rx_queue *rxq)\n-{\n-\tuint16_t len;\n-\tuint32_t i;\n-\n-\tif (rxq == NULL)\n-\t\treturn;\n-\n-\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n-\n-\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n-\t     i++)\n-\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n-\n-\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n-\n-\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n-\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n-\n-\trxq->rx_tail = 0;\n-\trxq->nb_rx_hold = 0;\n-\n-\trte_pktmbuf_free(rxq->pkt_first_seg);\n-\n-\trxq->pkt_first_seg = NULL;\n-\trxq->pkt_last_seg = NULL;\n-\trxq->rxrearm_start = 0;\n-\trxq->rxrearm_nb = 0;\n-}\n-\n-static void\n-reset_split_tx_descq(struct idpf_tx_queue *txq)\n-{\n-\tstruct idpf_tx_entry *txe;\n-\tuint32_t i, size;\n-\tuint16_t prev;\n-\n-\tif (txq == NULL) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n-\t\treturn;\n-\t}\n+\tchar ring_name[RTE_MEMZONE_NAMESIZE];\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n \n-\tsize = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;\n-\tfor (i = 0; i < size; i++)\n-\t\t((volatile char *)txq->desc_ring)[i] = 0;\n-\n-\ttxe = txq->sw_ring;\n-\tprev = (uint16_t)(txq->sw_nb_desc - 1);\n-\tfor (i = 0; i < txq->sw_nb_desc; i++) {\n-\t\ttxe[i].mbuf = NULL;\n-\t\ttxe[i].last_id = i;\n-\t\ttxe[prev].next_id = i;\n-\t\tprev = i;\n+\tmemset(ring_name, 0, RTE_MEMZONE_NAMESIZE);\n+\tswitch (queue_type) {\n+\tcase VIRTCHNL2_QUEUE_TYPE_TX:\n+\t\tif (splitq)\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc),\n+\t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\telse\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc),\n+\t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"idpf Tx ring\", sizeof(\"idpf Tx ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_RX:\n+\t\tif (splitq)\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\telse\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"idpf Rx ring\", sizeof(\"idpf Rx ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:\n+\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_splitq_tx_compl_desc),\n+\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"idpf Tx compl ring\", sizeof(\"idpf Tx compl ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:\n+\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"idpf Rx buf ring\", sizeof(\"idpf Rx buf ring\"));\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_INIT_LOG(ERR, \"Invalid queue type\");\n+\t\treturn NULL;\n \t}\n \n-\ttxq->tx_tail = 0;\n-\ttxq->nb_used = 0;\n-\n-\t/* Use this as next to clean for split desc queue */\n-\ttxq->last_desc_cleaned = 0;\n-\ttxq->sw_tail = 0;\n-\ttxq->nb_free = txq->nb_tx_desc - 1;\n-}\n-\n-static void\n-reset_split_tx_complq(struct idpf_tx_queue *cq)\n-{\n-\tuint32_t i, size;\n-\n-\tif (cq == NULL) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Pointer to complq is NULL\");\n-\t\treturn;\n+\tmz = rte_eth_dma_zone_reserve(dev, ring_name, queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for ring\");\n+\t\treturn NULL;\n \t}\n \n-\tsize = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;\n-\tfor (i = 0; i < size; i++)\n-\t\t((volatile char *)cq->compl_ring)[i] = 0;\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n \n-\tcq->tx_tail = 0;\n-\tcq->expected_gen_id = 1;\n+\treturn mz;\n }\n \n static void\n-reset_single_tx_queue(struct idpf_tx_queue *txq)\n+idpf_dma_zone_release(const struct rte_memzone *mz)\n {\n-\tstruct idpf_tx_entry *txe;\n-\tuint32_t i, size;\n-\tuint16_t prev;\n-\n-\tif (txq == NULL) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n-\t\treturn;\n-\t}\n-\n-\ttxe = txq->sw_ring;\n-\tsize = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;\n-\tfor (i = 0; i < size; i++)\n-\t\t((volatile char *)txq->tx_ring)[i] = 0;\n-\n-\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n-\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n-\t\ttxq->tx_ring[i].qw1.cmd_dtype =\n-\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);\n-\t\ttxe[i].mbuf =  NULL;\n-\t\ttxe[i].last_id = i;\n-\t\ttxe[prev].next_id = i;\n-\t\tprev = i;\n-\t}\n-\n-\ttxq->tx_tail = 0;\n-\ttxq->nb_used = 0;\n-\n-\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n-\ttxq->nb_free = txq->nb_tx_desc - 1;\n-\n-\ttxq->next_dd = txq->rs_thresh - 1;\n-\ttxq->next_rs = txq->rs_thresh - 1;\n+\trte_memzone_free(mz);\n }\n \n static int\n-idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,\n \t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n \t\t\t uint16_t nb_desc, unsigned int socket_id,\n-\t\t\t struct rte_mempool *mp)\n+\t\t\t struct rte_mempool *mp, uint8_t bufq_id)\n {\n \tstruct idpf_vport *vport = dev->data->dev_private;\n \tstruct idpf_adapter *adapter = vport->adapter;\n \tstruct idpf_hw *hw = &adapter->hw;\n \tconst struct rte_memzone *mz;\n-\tuint32_t ring_size;\n+\tstruct idpf_rx_queue *bufq;\n \tuint16_t len;\n+\tint ret;\n+\n+\tbufq = rte_zmalloc_socket(\"idpf bufq\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_bufq1_alloc;\n+\t}\n \n \tbufq->mp = mp;\n \tbufq->nb_rx_desc = nb_desc;\n@@ -376,8 +159,21 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n \tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n \tbufq->rx_buf_len = len;\n \n-\t/* Allocate the software ring. */\n+\t/* Allocate a little more to support bulk allocate. */\n \tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\n+\tmz = idpf_dma_zone_reserve(dev, queue_idx, len,\n+\t\t\t\t   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,\n+\t\t\t\t   socket_id, true);\n+\tif (mz == NULL) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n+\t}\n+\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\tbufq->mz = mz;\n+\n \tbufq->sw_ring =\n \t\trte_zmalloc_socket(\"idpf rx bufq sw ring\",\n \t\t\t\t   sizeof(struct rte_mbuf *) * len,\n@@ -385,55 +181,60 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n \t\t\t\t   socket_id);\n \tif (bufq->sw_ring == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\t/* Allocate a liitle more to support bulk allocate. */\n-\tlen = nb_desc + IDPF_RX_MAX_BURST;\n-\tring_size = RTE_ALIGN(len *\n-\t\t\t      sizeof(struct virtchnl2_splitq_rx_buf_desc),\n-\t\t\t      IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"rx_buf_ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n-\tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n-\t\trte_free(bufq->sw_ring);\n-\t\treturn -ENOMEM;\n+\t\tret = -ENOMEM;\n+\t\tgoto err_sw_ring_alloc;\n \t}\n \n-\t/* Zero all the descriptors in the ring. */\n-\tmemset(mz->addr, 0, ring_size);\n-\tbufq->rx_ring_phys_addr = mz->iova;\n-\tbufq->rx_ring = mz->addr;\n-\n-\tbufq->mz = mz;\n-\treset_split_rx_bufq(bufq);\n-\tbufq->q_set = true;\n+\tidpf_reset_split_rx_bufq(bufq);\n \tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n \t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n \tbufq->ops = &def_rxq_ops;\n+\tbufq->q_set = true;\n \n-\t/* TODO: allow bulk or vec */\n+\tif (bufq_id == 1) {\n+\t\trxq->bufq1 = bufq;\n+\t} else if (bufq_id == 2) {\n+\t\trxq->bufq2 = bufq;\n+\t} else {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid buffer queue index.\");\n+\t\tret = -EINVAL;\n+\t\tgoto err_bufq_id;\n+\t}\n \n \treturn 0;\n+\n+err_bufq_id:\n+\trte_free(bufq->sw_ring);\n+err_sw_ring_alloc:\n+\tidpf_dma_zone_release(mz);\n+err_mz_reserve:\n+\trte_free(bufq);\n+err_bufq1_alloc:\n+\treturn ret;\n }\n \n-static int\n-idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n-\t\t\t  const struct rte_eth_rxconf *rx_conf,\n-\t\t\t  struct rte_mempool *mp)\n+static void\n+idpf_rx_split_bufq_release(struct idpf_rx_queue *bufq)\n+{\n+\trte_free(bufq->sw_ring);\n+\tidpf_dma_zone_release(bufq->mz);\n+\trte_free(bufq);\n+}\n+\n+int\n+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n {\n \tstruct idpf_vport *vport = dev->data->dev_private;\n \tstruct idpf_adapter *adapter = vport->adapter;\n-\tstruct idpf_rx_queue *bufq1, *bufq2;\n+\tstruct idpf_hw *hw = &adapter->hw;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_rx_queue *rxq;\n \tuint16_t rx_free_thresh;\n-\tuint32_t ring_size;\n \tuint64_t offloads;\n-\tuint16_t qid;\n+\tbool is_splitq;\n \tuint16_t len;\n \tint ret;\n \n@@ -443,7 +244,7 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n \t\tIDPF_DEFAULT_RX_FREE_THRESH :\n \t\trx_conf->rx_free_thresh;\n-\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\tif (idpf_check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n \t\treturn -EINVAL;\n \n \t/* Free memory if needed */\n@@ -452,16 +253,19 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n-\t/* Setup Rx description queue */\n+\t/* Setup Rx queue */\n \trxq = rte_zmalloc_socket(\"idpf rxq\",\n \t\t\t\t sizeof(struct idpf_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n \tif (rxq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n-\t\treturn -ENOMEM;\n+\t\tret = -ENOMEM;\n+\t\tgoto err_rxq_alloc;\n \t}\n \n+\tis_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\n \trxq->mp = mp;\n \trxq->nb_rx_desc = nb_desc;\n \trxq->rx_free_thresh = rx_free_thresh;\n@@ -470,343 +274,129 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n \trxq->rx_hdr_len = 0;\n \trxq->adapter = adapter;\n-\trxq->offloads = offloads;\n+\trxq->offloads = idpf_rx_offload_convert(offloads);\n \n \tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n \trxq->rx_buf_len = len;\n \n-\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n-\tring_size = RTE_ALIGN(len *\n-\t\t\t      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n-\t\t\t      IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"rx_cpmpl_ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n-\n+\t/* Allocate a little more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tmz = idpf_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX,\n+\t\t\t\t   socket_id, is_splitq);\n \tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n \t\tret = -ENOMEM;\n-\t\tgoto free_rxq;\n+\t\tgoto err_mz_reserve;\n \t}\n-\n-\t/* Zero all the descriptors in the ring. */\n-\tmemset(mz->addr, 0, ring_size);\n \trxq->rx_ring_phys_addr = mz->iova;\n \trxq->rx_ring = mz->addr;\n-\n \trxq->mz = mz;\n-\treset_split_rx_descq(rxq);\n \n-\t/* TODO: allow bulk or vec */\n+\tif (!is_splitq) {\n+\t\trxq->sw_ring = rte_zmalloc_socket(\"idpf rxq sw ring\",\n+\t\t\t\t\t\t  sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t\t\t  RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\t  socket_id);\n+\t\tif (rxq->sw_ring == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto err_sw_ring_alloc;\n+\t\t}\n \n-\t/* setup Rx buffer queue */\n-\tbufq1 = rte_zmalloc_socket(\"idpf bufq1\",\n-\t\t\t\t   sizeof(struct idpf_rx_queue),\n-\t\t\t\t   RTE_CACHE_LINE_SIZE,\n-\t\t\t\t   socket_id);\n-\tif (bufq1 == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 1.\");\n-\t\tret = -ENOMEM;\n-\t\tgoto free_mz;\n-\t}\n-\tqid = 2 * queue_idx;\n-\tret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,\n-\t\t\t\t       nb_desc, socket_id, mp);\n-\tif (ret != 0) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n-\t\tret = -EINVAL;\n-\t\tgoto free_bufq1;\n-\t}\n-\trxq->bufq1 = bufq1;\n+\t\tidpf_reset_single_rx_queue(rxq);\n+\t\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\t\trxq->ops = &def_rxq_ops;\n+\t} else {\n+\t\tidpf_reset_split_rx_descq(rxq);\n \n-\tbufq2 = rte_zmalloc_socket(\"idpf bufq2\",\n-\t\t\t\t   sizeof(struct idpf_rx_queue),\n-\t\t\t\t   RTE_CACHE_LINE_SIZE,\n-\t\t\t\t   socket_id);\n-\tif (bufq2 == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 2.\");\n-\t\trte_free(bufq1->sw_ring);\n-\t\trte_memzone_free(bufq1->mz);\n-\t\tret = -ENOMEM;\n-\t\tgoto free_bufq1;\n-\t}\n-\tqid = 2 * queue_idx + 1;\n-\tret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,\n-\t\t\t\t       nb_desc, socket_id, mp);\n-\tif (ret != 0) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n-\t\trte_free(bufq1->sw_ring);\n-\t\trte_memzone_free(bufq1->mz);\n-\t\tret = -EINVAL;\n-\t\tgoto free_bufq2;\n+\t\t/* Setup Rx buffer queues */\n+\t\tret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,\n+\t\t\t\t\t       rx_free_thresh, nb_desc,\n+\t\t\t\t\t       socket_id, mp, 1);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto err_bufq1_setup;\n+\t\t}\n+\n+\t\tret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1,\n+\t\t\t\t\t       rx_free_thresh, nb_desc,\n+\t\t\t\t\t       socket_id, mp, 2);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto err_bufq2_setup;\n+\t\t}\n \t}\n-\trxq->bufq2 = bufq2;\n \n \trxq->q_set = true;\n \tdev->data->rx_queues[queue_idx] = rxq;\n \n \treturn 0;\n \n-free_bufq2:\n-\trte_free(bufq2);\n-free_bufq1:\n-\trte_free(bufq1);\n-free_mz:\n-\trte_memzone_free(mz);\n-free_rxq:\n+err_bufq2_setup:\n+\tidpf_rx_split_bufq_release(rxq->bufq1);\n+err_bufq1_setup:\n+err_sw_ring_alloc:\n+\tidpf_dma_zone_release(mz);\n+err_mz_reserve:\n \trte_free(rxq);\n-\n+err_rxq_alloc:\n \treturn ret;\n }\n \n static int\n-idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n-\t\t\t   const struct rte_eth_rxconf *rx_conf,\n-\t\t\t   struct rte_mempool *mp)\n+idpf_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n+\t\t     uint16_t queue_idx, uint16_t nb_desc,\n+\t\t     unsigned int socket_id)\n {\n \tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_adapter *adapter = vport->adapter;\n-\tstruct idpf_hw *hw = &adapter->hw;\n \tconst struct rte_memzone *mz;\n-\tstruct idpf_rx_queue *rxq;\n-\tuint16_t rx_free_thresh;\n-\tuint32_t ring_size;\n-\tuint64_t offloads;\n-\tuint16_t len;\n-\n-\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n-\n-\t/* Check free threshold */\n-\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n-\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n-\t\trx_conf->rx_free_thresh;\n-\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n-\t\treturn -EINVAL;\n-\n-\t/* Free memory if needed */\n-\tif (dev->data->rx_queues[queue_idx] != NULL) {\n-\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n-\t\tdev->data->rx_queues[queue_idx] = NULL;\n-\t}\n-\n-\t/* Setup Rx description queue */\n-\trxq = rte_zmalloc_socket(\"idpf rxq\",\n-\t\t\t\t sizeof(struct idpf_rx_queue),\n-\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t socket_id);\n-\tif (rxq == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\trxq->mp = mp;\n-\trxq->nb_rx_desc = nb_desc;\n-\trxq->rx_free_thresh = rx_free_thresh;\n-\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n-\trxq->port_id = dev->data->port_id;\n-\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n-\trxq->rx_hdr_len = 0;\n-\trxq->adapter = adapter;\n-\trxq->offloads = offloads;\n-\n-\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n-\trxq->rx_buf_len = len;\n-\n-\tlen = nb_desc + IDPF_RX_MAX_BURST;\n-\trxq->sw_ring =\n-\t\trte_zmalloc_socket(\"idpf rxq sw ring\",\n-\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n-\t\t\t\t   RTE_CACHE_LINE_SIZE,\n-\t\t\t\t   socket_id);\n-\tif (rxq->sw_ring == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n-\t\trte_free(rxq);\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\t/* Allocate a liitle more to support bulk allocate. */\n-\tlen = nb_desc + IDPF_RX_MAX_BURST;\n-\tring_size = RTE_ALIGN(len *\n-\t\t\t      sizeof(struct virtchnl2_singleq_rx_buf_desc),\n-\t\t\t      IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"rx ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n-\tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n-\t\trte_free(rxq->sw_ring);\n-\t\trte_free(rxq);\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\t/* Zero all the descriptors in the ring. */\n-\tmemset(mz->addr, 0, ring_size);\n-\trxq->rx_ring_phys_addr = mz->iova;\n-\trxq->rx_ring = mz->addr;\n-\n-\trxq->mz = mz;\n-\treset_single_rx_queue(rxq);\n-\trxq->q_set = true;\n-\tdev->data->rx_queues[queue_idx] = rxq;\n-\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n-\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n-\trxq->ops = &def_rxq_ops;\n-\n-\treturn 0;\n-}\n-\n-int\n-idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t    uint16_t nb_desc, unsigned int socket_id,\n-\t\t    const struct rte_eth_rxconf *rx_conf,\n-\t\t    struct rte_mempool *mp)\n-{\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\n-\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n-\t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n-\t\t\t\t\t\t  socket_id, rx_conf, mp);\n-\telse\n-\t\treturn idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,\n-\t\t\t\t\t\t socket_id, rx_conf, mp);\n-}\n-\n-static int\n-idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n-\t\t\t  const struct rte_eth_txconf *tx_conf)\n-{\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\tstruct idpf_adapter *adapter = vport->adapter;\n-\tuint16_t tx_rs_thresh, tx_free_thresh;\n-\tstruct idpf_hw *hw = &adapter->hw;\n-\tstruct idpf_tx_queue *txq, *cq;\n-\tconst struct rte_memzone *mz;\n-\tuint32_t ring_size;\n-\tuint64_t offloads;\n+\tstruct idpf_tx_queue *cq;\n \tint ret;\n \n-\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n-\n-\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh != 0) ?\n-\t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n-\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh != 0) ?\n-\t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n-\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n-\t\treturn -EINVAL;\n-\n-\t/* Free memory if needed. */\n-\tif (dev->data->tx_queues[queue_idx] != NULL) {\n-\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n-\t\tdev->data->tx_queues[queue_idx] = NULL;\n-\t}\n-\n-\t/* Allocate the TX queue data structure. */\n-\ttxq = rte_zmalloc_socket(\"idpf split txq\",\n-\t\t\t\t sizeof(struct idpf_tx_queue),\n-\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t socket_id);\n-\tif (txq == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\ttxq->nb_tx_desc = nb_desc;\n-\ttxq->rs_thresh = tx_rs_thresh;\n-\ttxq->free_thresh = tx_free_thresh;\n-\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n-\ttxq->port_id = dev->data->port_id;\n-\ttxq->offloads = offloads;\n-\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n-\n-\t/* Allocate software ring */\n-\ttxq->sw_nb_desc = 2 * nb_desc;\n-\ttxq->sw_ring =\n-\t\trte_zmalloc_socket(\"idpf split tx sw ring\",\n-\t\t\t\t   sizeof(struct idpf_tx_entry) *\n-\t\t\t\t   txq->sw_nb_desc,\n-\t\t\t\t   RTE_CACHE_LINE_SIZE,\n-\t\t\t\t   socket_id);\n-\tif (txq->sw_ring == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n-\t\tret = -ENOMEM;\n-\t\tgoto err_txq_sw_ring;\n-\t}\n-\n-\t/* Allocate TX hardware ring descriptors. */\n-\tring_size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;\n-\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"split_tx_ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n-\tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n-\t\tret = -ENOMEM;\n-\t\tgoto err_txq_mz;\n-\t}\n-\ttxq->tx_ring_phys_addr = mz->iova;\n-\ttxq->desc_ring = mz->addr;\n-\n-\ttxq->mz = mz;\n-\treset_split_tx_descq(txq);\n-\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n-\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n-\ttxq->ops = &def_txq_ops;\n-\n-\t/* Allocate the TX completion queue data structure. */\n-\ttxq->complq = rte_zmalloc_socket(\"idpf splitq cq\",\n-\t\t\t\t\t sizeof(struct idpf_tx_queue),\n-\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t socket_id);\n-\tcq = txq->complq;\n+\tcq = rte_zmalloc_socket(\"idpf splitq cq\",\n+\t\t\t\tsizeof(struct idpf_tx_queue),\n+\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\tsocket_id);\n \tif (cq == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for Tx compl queue\");\n \t\tret = -ENOMEM;\n-\t\tgoto err_cq;\n+\t\tgoto err_cq_alloc;\n \t}\n-\tcq->nb_tx_desc = 2 * nb_desc;\n+\n+\tcq->nb_tx_desc = nb_desc;\n \tcq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;\n \tcq->port_id = dev->data->port_id;\n \tcq->txqs = dev->data->tx_queues;\n \tcq->tx_start_qid = vport->chunks_info.tx_start_qid;\n \n-\tring_size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;\n-\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"tx_split_compl_ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n+\tmz = idpf_dma_zone_reserve(dev, queue_idx, nb_desc,\n+\t\t\t\t   VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION,\n+\t\t\t\t   socket_id, true);\n \tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX completion queue\");\n \t\tret = -ENOMEM;\n-\t\tgoto err_cq_mz;\n+\t\tgoto err_mz_reserve;\n \t}\n \tcq->tx_ring_phys_addr = mz->iova;\n \tcq->compl_ring = mz->addr;\n \tcq->mz = mz;\n-\treset_split_tx_complq(cq);\n+\tidpf_reset_split_tx_complq(cq);\n \n-\ttxq->q_set = true;\n-\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->complq = cq;\n \n \treturn 0;\n \n-err_cq_mz:\n+err_mz_reserve:\n \trte_free(cq);\n-err_cq:\n-\trte_memzone_free(txq->mz);\n-err_txq_mz:\n-\trte_free(txq->sw_ring);\n-err_txq_sw_ring:\n-\trte_free(txq);\n-\n+err_cq_alloc:\n \treturn ret;\n }\n \n-static int\n-idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n-\t\t\t   const struct rte_eth_txconf *tx_conf)\n+int\n+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_txconf *tx_conf)\n {\n \tstruct idpf_vport *vport = dev->data->dev_private;\n \tstruct idpf_adapter *adapter = vport->adapter;\n@@ -814,8 +404,10 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \tstruct idpf_hw *hw = &adapter->hw;\n \tconst struct rte_memzone *mz;\n \tstruct idpf_tx_queue *txq;\n-\tuint32_t ring_size;\n \tuint64_t offloads;\n+\tuint16_t len;\n+\tbool is_splitq;\n+\tint ret;\n \n \toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n \n@@ -823,7 +415,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n \ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?\n \t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n-\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n+\tif (idpf_check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n \t\treturn -EINVAL;\n \n \t/* Free memory if needed. */\n@@ -839,71 +431,74 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t socket_id);\n \tif (txq == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n-\t\treturn -ENOMEM;\n+\t\tret = -ENOMEM;\n+\t\tgoto err_txq_alloc;\n \t}\n \n-\t/* TODO: vlan offload */\n+\tis_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n \n \ttxq->nb_tx_desc = nb_desc;\n \ttxq->rs_thresh = tx_rs_thresh;\n \ttxq->free_thresh = tx_free_thresh;\n \ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n \ttxq->port_id = dev->data->port_id;\n-\ttxq->offloads = offloads;\n+\ttxq->offloads = idpf_tx_offload_convert(offloads);\n \ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n \n-\t/* Allocate software ring */\n-\ttxq->sw_ring =\n-\t\trte_zmalloc_socket(\"idpf tx sw ring\",\n-\t\t\t\t   sizeof(struct idpf_tx_entry) * nb_desc,\n-\t\t\t\t   RTE_CACHE_LINE_SIZE,\n-\t\t\t\t   socket_id);\n-\tif (txq->sw_ring == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n-\t\trte_free(txq);\n-\t\treturn -ENOMEM;\n-\t}\n+\tif (is_splitq)\n+\t\tlen = 2 * nb_desc;\n+\telse\n+\t\tlen = nb_desc;\n+\ttxq->sw_nb_desc = len;\n \n \t/* Allocate TX hardware ring descriptors. */\n-\tring_size = sizeof(struct idpf_flex_tx_desc) * nb_desc;\n-\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n-\tmz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n-\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n-\t\t\t\t      socket_id);\n+\tmz = idpf_dma_zone_reserve(dev, queue_idx, nb_desc, VIRTCHNL2_QUEUE_TYPE_TX,\n+\t\t\t\t   socket_id, is_splitq);\n \tif (mz == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n-\t\trte_free(txq->sw_ring);\n-\t\trte_free(txq);\n-\t\treturn -ENOMEM;\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n \t}\n-\n \ttxq->tx_ring_phys_addr = mz->iova;\n-\ttxq->tx_ring = mz->addr;\n-\n \ttxq->mz = mz;\n-\treset_single_tx_queue(txq);\n-\ttxq->q_set = true;\n-\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\ttxq->sw_ring = rte_zmalloc_socket(\"idpf tx sw ring\",\n+\t\t\t\t\t  sizeof(struct idpf_tx_entry) * len,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (txq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_sw_ring_alloc;\n+\t}\n+\n+\tif (!is_splitq) {\n+\t\ttxq->tx_ring = mz->addr;\n+\t\tidpf_reset_single_tx_queue(txq);\n+\t} else {\n+\t\ttxq->desc_ring = mz->addr;\n+\t\tidpf_reset_split_tx_descq(txq);\n+\n+\t\t/* Setup tx completion queue if split model */\n+\t\tret = idpf_tx_complq_setup(dev, txq, queue_idx,\n+\t\t\t\t\t   2 * nb_desc, socket_id);\n+\t\tif (ret != 0)\n+\t\t\tgoto err_complq_setup;\n+\t}\n+\n \ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n \t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n \ttxq->ops = &def_txq_ops;\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n \n \treturn 0;\n-}\n \n-int\n-idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\t    uint16_t nb_desc, unsigned int socket_id,\n-\t\t    const struct rte_eth_txconf *tx_conf)\n-{\n-\tstruct idpf_vport *vport = dev->data->dev_private;\n-\n-\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n-\t\treturn idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,\n-\t\t\t\t\t\t  socket_id, tx_conf);\n-\telse\n-\t\treturn idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,\n-\t\t\t\t\t\t socket_id, tx_conf);\n+err_complq_setup:\n+err_sw_ring_alloc:\n+\tidpf_dma_zone_release(mz);\n+err_mz_reserve:\n+\trte_free(txq);\n+err_txq_alloc:\n+\treturn ret;\n }\n \n static int\n@@ -916,89 +511,13 @@ idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)\n \t\t\t\t\t\t\t &idpf_timestamp_dynflag);\n \t\tif (err != 0) {\n \t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t\"Cannot register mbuf field/flag for timestamp\");\n+\t\t\t\t    \"Cannot register mbuf field/flag for timestamp\");\n \t\t\treturn -EINVAL;\n \t\t}\n \t}\n \treturn 0;\n }\n \n-static int\n-idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n-{\n-\tvolatile struct virtchnl2_singleq_rx_buf_desc *rxd;\n-\tstruct rte_mbuf *mbuf = NULL;\n-\tuint64_t dma_addr;\n-\tuint16_t i;\n-\n-\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n-\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n-\t\tif (unlikely(mbuf == NULL)) {\n-\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n-\t\t\treturn -ENOMEM;\n-\t\t}\n-\n-\t\trte_mbuf_refcnt_set(mbuf, 1);\n-\t\tmbuf->next = NULL;\n-\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\tmbuf->nb_segs = 1;\n-\t\tmbuf->port = rxq->port_id;\n-\n-\t\tdma_addr =\n-\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n-\n-\t\trxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];\n-\t\trxd->pkt_addr = dma_addr;\n-\t\trxd->hdr_addr = 0;\n-\t\trxd->rsvd1 = 0;\n-\t\trxd->rsvd2 = 0;\n-\t\trxq->sw_ring[i] = mbuf;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n-{\n-\tvolatile struct virtchnl2_splitq_rx_buf_desc *rxd;\n-\tstruct rte_mbuf *mbuf = NULL;\n-\tuint64_t dma_addr;\n-\tuint16_t i;\n-\n-\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n-\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n-\t\tif (unlikely(mbuf == NULL)) {\n-\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n-\t\t\treturn -ENOMEM;\n-\t\t}\n-\n-\t\trte_mbuf_refcnt_set(mbuf, 1);\n-\t\tmbuf->next = NULL;\n-\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\tmbuf->nb_segs = 1;\n-\t\tmbuf->port = rxq->port_id;\n-\n-\t\tdma_addr =\n-\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n-\n-\t\trxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];\n-\t\trxd->qword0.buf_id = i;\n-\t\trxd->qword0.rsvd0 = 0;\n-\t\trxd->qword0.rsvd1 = 0;\n-\t\trxd->pkt_addr = dma_addr;\n-\t\trxd->hdr_addr = 0;\n-\t\trxd->rsvd2 = 0;\n-\n-\t\trxq->sw_ring[i] = mbuf;\n-\t}\n-\n-\trxq->nb_rx_hold = 0;\n-\trxq->rx_tail = rxq->nb_rx_desc - 1;\n-\n-\treturn 0;\n-}\n-\n int\n idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n@@ -1164,11 +683,11 @@ idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \trxq = dev->data->rx_queues[rx_queue_id];\n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n \t\trxq->ops->release_mbufs(rxq);\n-\t\treset_single_rx_queue(rxq);\n+\t\tidpf_reset_single_rx_queue(rxq);\n \t} else {\n \t\trxq->bufq1->ops->release_mbufs(rxq->bufq1);\n \t\trxq->bufq2->ops->release_mbufs(rxq->bufq2);\n-\t\treset_split_rx_queue(rxq);\n+\t\tidpf_reset_split_rx_queue(rxq);\n \t}\n \tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n \n@@ -1195,10 +714,10 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \ttxq = dev->data->tx_queues[tx_queue_id];\n \ttxq->ops->release_mbufs(txq);\n \tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n-\t\treset_single_tx_queue(txq);\n+\t\tidpf_reset_single_tx_queue(txq);\n \t} else {\n-\t\treset_split_tx_descq(txq);\n-\t\treset_split_tx_complq(txq->complq);\n+\t\tidpf_reset_split_tx_descq(txq);\n+\t\tidpf_reset_split_tx_complq(txq->complq);\n \t}\n \tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n \ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex b8325f9b96..4efbf10295 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -51,7 +51,6 @@\n /* Base address of the HW descriptor ring should be 128B aligned. */\n #define IDPF_RING_BASE_ALIGN\t128\n \n-#define IDPF_RX_MAX_BURST\t\t32\n #define IDPF_DEFAULT_RX_FREE_THRESH\t32\n \n /* used for Vector PMD */\n@@ -101,14 +100,6 @@ union idpf_tx_offload {\n \t};\n };\n \n-struct idpf_rxq_ops {\n-\tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n-};\n-\n-struct idpf_txq_ops {\n-\tvoid (*release_mbufs)(struct idpf_tx_queue *txq);\n-};\n-\n int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_rxconf *rx_conf,\ndiff --git a/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/drivers/net/idpf/idpf_rxtx_vec_avx512.c\nindex fb2b6bb53c..71a6c59823 100644\n--- a/drivers/net/idpf/idpf_rxtx_vec_avx512.c\n+++ b/drivers/net/idpf/idpf_rxtx_vec_avx512.c\n@@ -562,7 +562,7 @@ idpf_tx_free_bufs_avx512(struct idpf_tx_queue *txq)\n \ttxep = (void *)txq->sw_ring;\n \ttxep += txq->next_dd - (n - 1);\n \n-\tif (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {\n+\tif (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {\n \t\tstruct rte_mempool *mp = txep[0].mbuf->pool;\n \t\tstruct rte_mempool_cache *cache = rte_mempool_default_cache(mp,\n \t\t\t\t\t\t\t\trte_lcore_id());\n",
    "prefixes": [
        "v3",
        "12/15"
    ]
}