get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118364/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118364,
    "url": "http://patches.dpdk.org/api/patches/118364/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221018111245.890651-5-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221018111245.890651-5-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221018111245.890651-5-junfeng.guo@intel.com",
    "date": "2022-10-18T11:12:34",
    "name": "[v3,04/15] net/idpf: add queue setup and release in split queue model",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "87c3e19509f43e2790baf11dd2e3a4301685b261",
    "submitter": {
        "id": 1785,
        "url": "http://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221018111245.890651-5-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25280,
            "url": "http://patches.dpdk.org/api/series/25280/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25280",
            "date": "2022-10-18T11:12:30",
            "name": "add support for idpf PMD in DPDK",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/25280/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118364/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/118364/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 54E61A0560;\n\tTue, 18 Oct 2022 13:14:48 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 58910427EA;\n\tTue, 18 Oct 2022 13:14:21 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id BB4E84021D\n for <dev@dpdk.org>; Tue, 18 Oct 2022 13:14:19 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Oct 2022 04:14:19 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by FMSMGA003.fm.intel.com with ESMTP; 18 Oct 2022 04:14:16 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666091659; x=1697627659;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=Aw3ia+uQsSxWdr0IiJD7mWyJ0yWZMjCLqXEERBsuZhc=;\n b=itOSpccL6VuqB5KZrAtmvvJVdMTQXqIS2+vjMVD4KvAvKNmgFCNzzour\n UZuUYNF33qJGH9KSTZSj8XMeStJHzMonspSNm3Kt7kmPiNuu0490BbUU/\n RGYnsuiAnv/Mldc3xlaCyndzakJ2DYKJMGmjIn3y7WYPby/pV8gwk2ad2\n dUO8cQFG8uQn7GkS07c5j+ltQN/RFY4VznlDQo5tiyMAFHHrtR2dJ9r/8\n 4QCYZVS5IutkNbKyTc31oUbY2QMrXTqRD8F9PImNl4f0LkQUjNHy/fT7S\n I+pWJbtpOvYfJCNrlnSkSk6IUm0SHPZtgyGJA0kyX+T8eSQc/aAaVMbTX A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10503\"; a=\"293441556\"",
            "E=Sophos;i=\"5.95,193,1661842800\"; d=\"scan'208\";a=\"293441556\"",
            "E=McAfee;i=\"6500,9779,10503\"; a=\"717884149\"",
            "E=Sophos;i=\"5.95,193,1661842800\"; d=\"scan'208\";a=\"717884149\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org,\n\tjunfeng.guo@intel.com,\n\tXiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v3 04/15] net/idpf: add queue setup and release in split queue\n model",
        "Date": "Tue, 18 Oct 2022 19:12:34 +0800",
        "Message-Id": "<20221018111245.890651-5-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221018111245.890651-1-junfeng.guo@intel.com>",
        "References": "<20220905105828.3190335-1-junfeng.guo@intel.com>\n <20221018111245.890651-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for queue operations in split queue model:\n - rx_queue_setup\n - rx_queue_release\n - tx_queue_setup\n - tx_queue_release\n\nIn the split queue model, \"RX buffer queues\" are used to pass\ndescriptor buffers from SW to HW while Rx queues are used only to\npass the descriptor completions, that is, descriptors that point\nto completed buffers, from HW to SW. This is contrary to the single\nqueue model in which Rx queues are used for both purposes.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |  34 ++-\n drivers/net/idpf/idpf_ethdev.h |  11 +\n drivers/net/idpf/idpf_rxtx.c   | 485 ++++++++++++++++++++++++++++++++-\n drivers/net/idpf/idpf_vchnl.c  | 127 ++++++++-\n 4 files changed, 644 insertions(+), 13 deletions(-)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex c0a6b3e90d..21520fa648 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -69,13 +69,25 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev)\n \t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n \n \tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n-\tif (adapter->txq_model) {\n+\tif (!adapter->txq_model) {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;\n+\t\tvport_info->num_tx_complq =\n+\t\t\tIDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;\n+\t} else {\n \t\tvport_info->txq_model =\n \t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n \t\tvport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;\n \t\tvport_info->num_tx_complq = 0;\n \t}\n-\tif (adapter->rxq_model) {\n+\tif (!adapter->rxq_model) {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;\n+\t\tvport_info->num_rx_bufq =\n+\t\t\tIDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;\n+\t} else {\n \t\tvport_info->rxq_model =\n \t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n \t\tvport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;\n@@ -113,7 +125,9 @@ idpf_init_vport(struct rte_eth_dev *dev)\n \tvport->txq_model = vport_info->txq_model;\n \tvport->rxq_model = vport_info->rxq_model;\n \tvport->num_tx_q = vport_info->num_tx_q;\n+\tvport->num_tx_complq = vport_info->num_tx_complq;\n \tvport->num_rx_q = vport_info->num_rx_q;\n+\tvport->num_rx_bufq = vport_info->num_rx_bufq;\n \tvport->max_mtu = vport_info->max_mtu;\n \trte_memcpy(vport->default_mac_addr,\n \t\t   vport_info->default_mac_addr, ETH_ALEN);\n@@ -136,6 +150,22 @@ idpf_init_vport(struct rte_eth_dev *dev)\n \t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n \t\t\tvport->chunks_info.rx_qtail_spacing =\n \t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) {\n+\t\t\tvport->chunks_info.tx_compl_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_compl_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_compl_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) {\n+\t\t\tvport->chunks_info.rx_buf_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_buf_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_buf_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n \t\t}\n \t}\n \ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex d90bcfa63a..9f8fcba4ca 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -73,11 +73,18 @@ enum idpf_vc_result {\n struct idpf_chunks_info {\n \tuint32_t tx_start_qid;\n \tuint32_t rx_start_qid;\n+\t/* Valid only if split queue model */\n+\tuint32_t tx_compl_start_qid;\n+\tuint32_t rx_buf_start_qid;\n \n \tuint64_t tx_qtail_start;\n \tuint32_t tx_qtail_spacing;\n \tuint64_t rx_qtail_start;\n \tuint32_t rx_qtail_spacing;\n+\tuint64_t tx_compl_qtail_start;\n+\tuint32_t tx_compl_qtail_spacing;\n+\tuint64_t rx_buf_qtail_start;\n+\tuint32_t rx_buf_qtail_spacing;\n };\n \n struct idpf_vport {\n@@ -86,7 +93,11 @@ struct idpf_vport {\n \tuint32_t txq_model;\n \tuint32_t rxq_model;\n \tuint16_t num_tx_q;\n+\t/* valid only if txq_model is split Q */\n+\tuint16_t num_tx_complq;\n \tuint16_t num_rx_q;\n+\t/* valid only if rxq_model is split Q */\n+\tuint16_t num_rx_bufq;\n \n \tuint16_t max_mtu;\n \tuint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 9fef467990..07ee8f1e14 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -97,7 +97,10 @@ release_txq_mbufs(struct idpf_tx_queue *txq)\n \t\treturn;\n \t}\n \n-\tif (!txq->sw_nb_desc) {\n+\tif (txq->sw_nb_desc) {\n+\t\t/* For split queue model, descriptor ring */\n+\t\tnb_desc = txq->sw_nb_desc;\n+\t} else {\n \t\t/* For single queue model */\n \t\tnb_desc = txq->nb_tx_desc;\n \t}\n@@ -125,6 +128,21 @@ idpf_rx_queue_release(void *rxq)\n \tif (!q)\n \t\treturn;\n \n+\t/* Split queue */\n+\tif (q->bufq1 && q->bufq2) {\n+\t\tq->bufq1->ops->release_mbufs(q->bufq1);\n+\t\trte_free(q->bufq1->sw_ring);\n+\t\trte_memzone_free(q->bufq1->mz);\n+\t\trte_free(q->bufq1);\n+\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n+\t\trte_free(q->bufq2->sw_ring);\n+\t\trte_memzone_free(q->bufq2->mz);\n+\t\trte_free(q->bufq2);\n+\t\trte_memzone_free(q->mz);\n+\t\trte_free(q);\n+\t\treturn;\n+\t}\n+\n \t/* Single queue */\n \tq->ops->release_mbufs(q);\n \trte_free(q->sw_ring);\n@@ -147,6 +165,65 @@ idpf_tx_queue_release(void *txq)\n \trte_free(q);\n }\n \n+static inline void\n+reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\trxq->rx_tail = 0;\n+\trxq->expected_gen_id = 1;\n+}\n+\n+static inline void\n+reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\t/* The next descriptor id which can be received. */\n+\trxq->rx_next_avail = 0;\n+\n+\t/* The next descriptor id which can be refilled. */\n+\trxq->rx_tail = 0;\n+\t/* The number of descriptors which can be refilled. */\n+\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n+\n+\trxq->bufq1 = NULL;\n+\trxq->bufq2 = NULL;\n+}\n+\n+static inline void\n+reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\treset_split_rx_descq(rxq);\n+\treset_split_rx_bufq(rxq->bufq1);\n+\treset_split_rx_bufq(rxq->bufq2);\n+}\n+\n static inline void\n reset_single_rx_queue(struct idpf_rx_queue *rxq)\n {\n@@ -179,6 +256,58 @@ reset_single_rx_queue(struct idpf_rx_queue *rxq)\n \trxq->rxrearm_nb = 0;\n }\n \n+static inline void\n+reset_split_tx_descq(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->desc_ring)[i] = 0;\n+\n+\ttxe = txq->sw_ring;\n+\tprev = (uint16_t)(txq->sw_nb_desc - 1);\n+\tfor (i = 0; i < txq->sw_nb_desc; i++) {\n+\t\ttxe[i].mbuf = NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\t/* Use this as next to clean for split desc queue */\n+\ttxq->last_desc_cleaned = 0;\n+\ttxq->sw_tail = 0;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+}\n+\n+static inline void\n+reset_split_tx_complq(struct idpf_tx_queue *cq)\n+{\n+\tuint32_t i, size;\n+\n+\tif (!cq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to complq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)cq->compl_ring)[i] = 0;\n+\n+\tcq->tx_tail = 0;\n+\tcq->expected_gen_id = 1;\n+}\n+\n static inline void\n reset_single_tx_queue(struct idpf_tx_queue *txq)\n {\n@@ -216,6 +345,224 @@ reset_single_tx_queue(struct idpf_tx_queue *txq)\n \ttxq->next_rs = txq->rs_thresh - 1;\n }\n \n+static int\n+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+\t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->rx_free_thresh = rx_free_thresh;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\tbufq->rx_hdr_len = 0;\n+\tbufq->adapter = adapter;\n+\n+\tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n+\tbufq->rx_buf_len = len;\n+\n+\t/* Allocate the software ring. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tbufq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rx bufq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_buf_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(bufq->sw_ring);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\n+\tbufq->mz = mz;\n+\treset_split_rx_bufq(bufq);\n+\tbufq->q_set = true;\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\tbufq->ops = &def_rxq_ops;\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_rxconf *rx_conf,\n+\t\t\t  struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_rx_queue *bufq1, *bufq2;\n+\tconst struct rte_memzone *mz;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint16_t qid;\n+\tuint16_t len;\n+\tuint64_t offloads;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_cpmpl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_rxq;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_split_rx_descq(rxq);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\t/* setup Rx buffer queue */\n+\tbufq1 = rte_zmalloc_socket(\"idpf bufq1\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq1) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 1.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_mz;\n+\t}\n+\tqid = 2 * queue_idx;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq1;\n+\t}\n+\trxq->bufq1 = bufq1;\n+\n+\tbufq2 = rte_zmalloc_socket(\"idpf bufq2\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!bufq2) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 2.\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -ENOMEM;\n+\t\tgoto free_bufq1;\n+\t}\n+\tqid = 2 * queue_idx + 1;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq2;\n+\t}\n+\trxq->bufq2 = bufq2;\n+\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn 0;\n+\n+free_bufq2:\n+\trte_free(bufq2);\n+free_bufq1:\n+\trte_free(bufq1);\n+free_mz:\n+\trte_memzone_free(mz);\n+free_rxq:\n+\trte_free(rxq);\n+\n+\treturn ret;\n+}\n+\n static int\n idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t   uint16_t nb_desc, unsigned int socket_id,\n@@ -335,7 +682,138 @@ idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n \t\t\t\t\t\t  socket_id, rx_conf, mp);\n \telse\n-\t\treturn -1;\n+\t\treturn idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, rx_conf, mp);\n+}\n+\n+static int\n+idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct idpf_tx_queue *txq, *cq;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?\n+\t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n+\t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n+\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"idpf split txq\",\n+\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->rs_thresh = tx_rs_thresh;\n+\ttxq->free_thresh = tx_free_thresh;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_nb_desc = 2 * nb_desc;\n+\ttxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf split tx sw ring\",\n+\t\t\t\t   sizeof(struct idpf_tx_entry) *\n+\t\t\t\t   txq->sw_nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"split_tx_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->desc_ring = (struct idpf_flex_tx_sched_desc *)mz->addr;\n+\n+\ttxq->mz = mz;\n+\treset_split_tx_descq(txq);\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->ops = &def_txq_ops;\n+\n+\t/* Allocate the TX completion queue data structure. */\n+\ttxq->complq = rte_zmalloc_socket(\"idpf splitq cq\",\n+\t\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t socket_id);\n+\tcq = txq->complq;\n+\tif (!cq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tcq->nb_tx_desc = 2 * nb_desc;\n+\tcq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;\n+\tcq->port_id = dev->data->port_id;\n+\tcq->txqs = dev->data->tx_queues;\n+\tcq->tx_start_qid = vport->chunks_info.tx_start_qid;\n+\n+\tring_size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"tx_split_compl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX completion queue\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\tcq->tx_ring_phys_addr = mz->iova;\n+\tcq->compl_ring = (struct idpf_splitq_tx_compl_desc *)mz->addr;\n+\tcq->mz = mz;\n+\treset_split_tx_complq(cq);\n+\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn 0;\n }\n \n static int\n@@ -447,7 +925,8 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\treturn idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,\n \t\t\t\t\t\t  socket_id, tx_conf);\n \telse\n-\t\treturn -1;\n+\t\treturn idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, tx_conf);\n }\n \n void\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex 02a55e0658..87a024b6db 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -481,10 +481,10 @@ idpf_vc_config_rxqs(struct idpf_vport *vport)\n \tstruct virtchnl2_rxq_info *rxq_info;\n \tstruct idpf_cmd_info args;\n \tuint16_t total_qs, num_qs;\n-\tint size, err, i;\n+\tint size, err, i, j;\n \tint k = 0;\n \n-\ttotal_qs = vport->num_rx_q;\n+\ttotal_qs = vport->num_rx_q + vport->num_rx_bufq;\n \twhile (total_qs) {\n \t\tif (total_qs > adapter->max_rxq_per_msg) {\n \t\t\tnum_qs = adapter->max_rxq_per_msg;\n@@ -520,7 +520,46 @@ idpf_vc_config_rxqs(struct idpf_vport *vport)\n \t\t\t\trxq_info->ring_len = rxq[k]->nb_rx_desc;\n \t\t\t}\n \t\t} else {\n-\t\t\treturn -1;\n+\t\t\tfor (i = 0; i < num_qs / 3; i++, k++) {\n+\t\t\t\t/* Rx queue */\n+\t\t\t\trxq_info = &vc_rxqs->qinfo[i * 3];\n+\t\t\t\trxq_info->dma_ring_addr =\n+\t\t\t\t\trxq[k]->rx_ring_phys_addr;\n+\t\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\t\t\trxq_info->queue_id = rxq[k]->queue_id;\n+\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\trxq_info->data_buffer_size = rxq[k]->rx_buf_len;\n+\t\t\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\t\t\trxq_info->ring_len = rxq[k]->nb_rx_desc;\n+\t\t\t\trxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;\n+\t\t\t\trxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;\n+\t\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\n+\t\t\t\t/* Buffer queue */\n+\t\t\t\tfor (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {\n+\t\t\t\t\tstruct idpf_rx_queue *bufq = j == 1 ?\n+\t\t\t\t\t\trxq[k]->bufq1 : rxq[k]->bufq2;\n+\t\t\t\t\trxq_info = &vc_rxqs->qinfo[i * 3 + j];\n+\t\t\t\t\trxq_info->dma_ring_addr =\n+\t\t\t\t\t\tbufq->rx_ring_phys_addr;\n+\t\t\t\t\trxq_info->type =\n+\t\t\t\t\t\tVIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\t\t\t\trxq_info->queue_id = bufq->queue_id;\n+\t\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\t\trxq_info->data_buffer_size = bufq->rx_buf_len;\n+\t\t\t\t\trxq_info->desc_ids =\n+\t\t\t\t\t\tVIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\t\t\trxq_info->ring_len = bufq->nb_rx_desc;\n+\n+\t\t\t\t\trxq_info->buffer_notif_stride =\n+\t\t\t\t\t\tIDPF_RX_BUF_STRIDE;\n+\t\t\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\t\t\t\t}\n+\t\t\t}\n \t\t}\n \t\tmemset(&args, 0, sizeof(args));\n \t\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n@@ -550,7 +589,7 @@ idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)\n \tstruct virtchnl2_rxq_info *rxq_info;\n \tstruct idpf_cmd_info args;\n \tuint16_t num_qs;\n-\tint size, err;\n+\tint size, err, i;\n \n \tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n \t\tnum_qs = IDPF_RXQ_PER_GRP;\n@@ -581,7 +620,39 @@ idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)\n \n \t\trxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;\n \t}  else {\n-\t\treturn -1;\n+\t\t/* Rx queue */\n+\t\trxq_info = &vc_rxqs->qinfo[0];\n+\t\trxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;\n+\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\trxq_info->queue_id = rxq[rxq_id]->queue_id;\n+\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\trxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;\n+\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\trxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;\n+\t\trxq_info->rx_bufq1_id = rxq[rxq_id]->bufq1->queue_id;\n+\t\trxq_info->rx_bufq2_id = rxq[rxq_id]->bufq2->queue_id;\n+\t\trxq_info->rx_buffer_low_watermark = 64;\n+\n+\t\t/* Buffer queue */\n+\t\tfor (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {\n+\t\t\tstruct idpf_rx_queue *bufq =\n+\t\t\t\ti == 1 ? rxq[rxq_id]->bufq1 : rxq[rxq_id]->bufq2;\n+\t\t\trxq_info = &vc_rxqs->qinfo[i];\n+\t\t\trxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;\n+\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\t\trxq_info->queue_id = bufq->queue_id;\n+\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\trxq_info->data_buffer_size = bufq->rx_buf_len;\n+\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;\n+\t\t\trxq_info->ring_len = bufq->nb_rx_desc;\n+\n+\t\t\trxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;\n+\t\t\trxq_info->rx_buffer_low_watermark = 64;\n+\t\t}\n \t}\n \n \tmemset(&args, 0, sizeof(args));\n@@ -612,7 +683,7 @@ idpf_vc_config_txqs(struct idpf_vport *vport)\n \tint size, err, i;\n \tint k = 0;\n \n-\ttotal_qs = vport->num_tx_q;\n+\ttotal_qs = vport->num_tx_q + vport->num_tx_complq;\n \twhile (total_qs) {\n \t\tif (total_qs > adapter->max_txq_per_msg) {\n \t\t\tnum_qs = adapter->max_txq_per_msg;\n@@ -642,7 +713,29 @@ idpf_vc_config_txqs(struct idpf_vport *vport)\n \t\t\t\ttxq_info->ring_len = txq[k]->nb_tx_desc;\n \t\t\t}\n \t\t} else {\n-\t\t\treturn -1;\n+\t\t\tfor (i = 0; i < num_qs / 2; i++, k++) {\n+\t\t\t\t/* txq info */\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[2 * i];\n+\t\t\t\ttxq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\t\t\ttxq_info->queue_id = txq[k]->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\t\t\ttxq_info->ring_len = txq[k]->nb_tx_desc;\n+\t\t\t\ttxq_info->tx_compl_queue_id =\n+\t\t\t\t\ttxq[k]->complq->queue_id;\n+\t\t\t\ttxq_info->relative_queue_id = txq_info->queue_id;\n+\n+\t\t\t\t/* tx completion queue info */\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[2 * i + 1];\n+\t\t\t\ttxq_info->dma_ring_addr =\n+\t\t\t\t\ttxq[k]->complq->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\t\t\ttxq_info->queue_id = txq[k]->complq->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\t\t\ttxq_info->ring_len = txq[k]->complq->nb_tx_desc;\n+\t\t\t}\n \t\t}\n \n \t\tmemset(&args, 0, sizeof(args));\n@@ -700,7 +793,25 @@ idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)\n \t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n \t\ttxq_info->ring_len = txq[txq_id]->nb_tx_desc;\n \t} else {\n-\t\treturn -1;\n+\t\t/* txq info */\n+\t\ttxq_info = &vc_txqs->qinfo[0];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\ttxq_info->queue_id = txq[txq_id]->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\ttxq_info->ring_len = txq[txq_id]->nb_tx_desc;\n+\t\ttxq_info->tx_compl_queue_id = txq[txq_id]->complq->queue_id;\n+\t\ttxq_info->relative_queue_id = txq_info->queue_id;\n+\n+\t\t/* tx completion queue info */\n+\t\ttxq_info = &vc_txqs->qinfo[1];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->complq->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\ttxq_info->queue_id = txq[txq_id]->complq->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;\n+\t\ttxq_info->ring_len = txq[txq_id]->complq->nb_tx_desc;\n \t}\n \n \tmemset(&args, 0, sizeof(args));\n",
    "prefixes": [
        "v3",
        "04/15"
    ]
}