get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/13160/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 13160,
    "url": "https://patches.dpdk.org/api/patches/13160/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1464797533-127157-1-git-send-email-huawei.xie@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1464797533-127157-1-git-send-email-huawei.xie@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1464797533-127157-1-git-send-email-huawei.xie@intel.com",
    "date": "2016-06-01T16:12:13",
    "name": "[dpdk-dev,v3] virtio: split virtio rx/tx queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "abaa945836c8b6bea1a4de70d54080e804439abe",
    "submitter": {
        "id": 16,
        "url": "https://patches.dpdk.org/api/people/16/?format=api",
        "name": "Huawei Xie",
        "email": "huawei.xie@intel.com"
    },
    "delegate": {
        "id": 355,
        "url": "https://patches.dpdk.org/api/users/355/?format=api",
        "username": "yliu",
        "first_name": "Yuanhan",
        "last_name": "Liu",
        "email": "yuanhan.liu@linux.intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1464797533-127157-1-git-send-email-huawei.xie@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/13160/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/13160/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id C47ED2E8B;\n\tThu,  2 Jun 2016 10:07:09 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 4DDF12C10\n\tfor <dev@dpdk.org>; Thu,  2 Jun 2016 10:07:08 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n\tby fmsmga102.fm.intel.com with ESMTP; 02 Jun 2016 01:07:08 -0700",
            "from dpdk15.sh.intel.com ([10.239.129.25])\n\tby orsmga003.jf.intel.com with ESMTP; 02 Jun 2016 01:07:07 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.26,405,1459839600\"; d=\"scan'208\";a=\"820037008\"",
        "From": "Huawei Xie <huawei.xie@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "yuanhan.liu@intel.com,\n\tHuawei Xie <huawei.xie@intel.com>",
        "Date": "Thu,  2 Jun 2016 00:12:13 +0800",
        "Message-Id": "<1464797533-127157-1-git-send-email-huawei.xie@intel.com>",
        "X-Mailer": "git-send-email 1.8.1.4",
        "In-Reply-To": "<1464599180-76004-1-git-send-email-huawei.xie@intel.com>",
        "References": "<1464599180-76004-1-git-send-email-huawei.xie@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3] virtio: split virtio rx/tx queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "We keep a common vq structure, containing only vq related fields,\nand then split others into RX, TX and control queue respectively.\n\nSigned-off-by: Huawei Xie <huawei.xie@intel.com>\n---\nv2:\n- don't split virtio_dev_rx/tx_queue_setup\nv3:\n- fix some 80 char warnings\n- fix other newer version checkpatch warnings\n\n- remove hdr zone allocation for RX queue\nv4:\n- remove '\\n' in PMD_RX_LOG\n- fix some conversions between vq and rx/txvq in virtio_dev_free_mbufs\n\n drivers/net/virtio/virtio_ethdev.c      | 374 ++++++++++++++++++--------------\n drivers/net/virtio/virtio_ethdev.h      |   2 +-\n drivers/net/virtio/virtio_pci.c         |   4 +-\n drivers/net/virtio/virtio_pci.h         |   3 +-\n drivers/net/virtio/virtio_rxtx.c        | 282 +++++++++++++-----------\n drivers/net/virtio/virtio_rxtx.h        |  56 ++++-\n drivers/net/virtio/virtio_rxtx_simple.c |  83 +++----\n drivers/net/virtio/virtqueue.h          |  70 +++---\n 8 files changed, 496 insertions(+), 378 deletions(-)",
    "diff": "diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex c3fb628..cba01d1 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -114,40 +114,61 @@ struct rte_virtio_xstats_name_off {\n };\n \n /* [rt]x_qX_ is prepended to the name string here */\n-static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = {\n-\t{\"good_packets\",           offsetof(struct virtqueue, packets)},\n-\t{\"good_bytes\",             offsetof(struct virtqueue, bytes)},\n-\t{\"errors\",                 offsetof(struct virtqueue, errors)},\n-\t{\"multicast_packets\",      offsetof(struct virtqueue, multicast)},\n-\t{\"broadcast_packets\",      offsetof(struct virtqueue, broadcast)},\n-\t{\"undersize_packets\",      offsetof(struct virtqueue, size_bins[0])},\n-\t{\"size_64_packets\",        offsetof(struct virtqueue, size_bins[1])},\n-\t{\"size_65_127_packets\",    offsetof(struct virtqueue, size_bins[2])},\n-\t{\"size_128_255_packets\",   offsetof(struct virtqueue, size_bins[3])},\n-\t{\"size_256_511_packets\",   offsetof(struct virtqueue, size_bins[4])},\n-\t{\"size_512_1023_packets\",  offsetof(struct virtqueue, size_bins[5])},\n-\t{\"size_1024_1517_packets\", offsetof(struct virtqueue, size_bins[6])},\n-\t{\"size_1518_max_packets\",  offsetof(struct virtqueue, size_bins[7])},\n+static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {\n+\t{\"good_packets\",           offsetof(struct virtnet_rx, stats.packets)},\n+\t{\"good_bytes\",             offsetof(struct virtnet_rx, stats.bytes)},\n+\t{\"errors\",                 offsetof(struct virtnet_rx, stats.errors)},\n+\t{\"multicast_packets\",      offsetof(struct virtnet_rx, stats.multicast)},\n+\t{\"broadcast_packets\",      offsetof(struct virtnet_rx, stats.broadcast)},\n+\t{\"undersize_packets\",      offsetof(struct virtnet_rx, stats.size_bins[0])},\n+\t{\"size_64_packets\",        offsetof(struct virtnet_rx, stats.size_bins[1])},\n+\t{\"size_65_127_packets\",    offsetof(struct virtnet_rx, stats.size_bins[2])},\n+\t{\"size_128_255_packets\",   offsetof(struct virtnet_rx, stats.size_bins[3])},\n+\t{\"size_256_511_packets\",   offsetof(struct virtnet_rx, stats.size_bins[4])},\n+\t{\"size_512_1023_packets\",  offsetof(struct virtnet_rx, stats.size_bins[5])},\n+\t{\"size_1024_1517_packets\", offsetof(struct virtnet_rx, stats.size_bins[6])},\n+\t{\"size_1518_max_packets\",  offsetof(struct virtnet_rx, stats.size_bins[7])},\n };\n \n-#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \\\n-\t\t\t    sizeof(rte_virtio_q_stat_strings[0]))\n+/* [rt]x_qX_ is prepended to the name string here */\n+static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {\n+\t{\"good_packets\",           offsetof(struct virtnet_tx, stats.packets)},\n+\t{\"good_bytes\",             offsetof(struct virtnet_tx, stats.bytes)},\n+\t{\"errors\",                 offsetof(struct virtnet_tx, stats.errors)},\n+\t{\"multicast_packets\",      offsetof(struct virtnet_tx, stats.multicast)},\n+\t{\"broadcast_packets\",      offsetof(struct virtnet_tx, stats.broadcast)},\n+\t{\"undersize_packets\",      offsetof(struct virtnet_tx, stats.size_bins[0])},\n+\t{\"size_64_packets\",        offsetof(struct virtnet_tx, stats.size_bins[1])},\n+\t{\"size_65_127_packets\",    offsetof(struct virtnet_tx, stats.size_bins[2])},\n+\t{\"size_128_255_packets\",   offsetof(struct virtnet_tx, stats.size_bins[3])},\n+\t{\"size_256_511_packets\",   offsetof(struct virtnet_tx, stats.size_bins[4])},\n+\t{\"size_512_1023_packets\",  offsetof(struct virtnet_tx, stats.size_bins[5])},\n+\t{\"size_1024_1517_packets\", offsetof(struct virtnet_tx, stats.size_bins[6])},\n+\t{\"size_1518_max_packets\",  offsetof(struct virtnet_tx, stats.size_bins[7])},\n+};\n+\n+#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \\\n+\t\t\t    sizeof(rte_virtio_rxq_stat_strings[0]))\n+#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \\\n+\t\t\t    sizeof(rte_virtio_txq_stat_strings[0]))\n \n static int\n-virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,\n+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,\n \t\tint *dlen, int pkt_num)\n {\n \tuint32_t head, i;\n \tint k, sum = 0;\n \tvirtio_net_ctrl_ack status = ~0;\n \tstruct virtio_pmd_ctrl result;\n+\tstruct virtqueue *vq;\n \n \tctrl->status = status;\n \n-\tif (!(vq && vq->hw->cvq)) {\n+\tif (!cvq && !cvq->vq) {\n \t\tPMD_INIT_LOG(ERR, \"Control queue is not supported.\");\n \t\treturn -1;\n \t}\n+\tvq = cvq->vq;\n \thead = vq->vq_desc_head_idx;\n \n \tPMD_INIT_LOG(DEBUG, \"vq->vq_desc_head_idx = %d, status = %d, \"\n@@ -157,7 +178,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,\n \tif ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))\n \t\treturn -1;\n \n-\tmemcpy(vq->virtio_net_hdr_mz->addr, ctrl,\n+\tmemcpy(cvq->virtio_net_hdr_mz->addr, ctrl,\n \t\tsizeof(struct virtio_pmd_ctrl));\n \n \t/*\n@@ -167,14 +188,14 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,\n \t * One RX packet for ACK.\n \t */\n \tvq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;\n-\tvq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr;\n+\tvq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mz->phys_addr;\n \tvq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);\n \tvq->vq_free_cnt--;\n \ti = vq->vq_ring.desc[head].next;\n \n \tfor (k = 0; k < pkt_num; k++) {\n \t\tvq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;\n-\t\tvq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr\n+\t\tvq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr\n \t\t\t+ sizeof(struct virtio_net_ctrl_hdr)\n \t\t\t+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;\n \t\tvq->vq_ring.desc[i].len = dlen[k];\n@@ -184,7 +205,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,\n \t}\n \n \tvq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;\n-\tvq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr\n+\tvq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr\n \t\t\t+ sizeof(struct virtio_net_ctrl_hdr);\n \tvq->vq_ring.desc[i].len = sizeof(ctrl->status);\n \tvq->vq_free_cnt--;\n@@ -229,7 +250,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,\n \tPMD_INIT_LOG(DEBUG, \"vq->vq_free_cnt=%d\\nvq->vq_desc_head_idx=%d\",\n \t\t\tvq->vq_free_cnt, vq->vq_desc_head_idx);\n \n-\tmemcpy(&result, vq->virtio_net_hdr_mz->addr,\n+\tmemcpy(&result, cvq->virtio_net_hdr_mz->addr,\n \t\t\tsizeof(struct virtio_pmd_ctrl));\n \n \treturn result.status;\n@@ -269,10 +290,6 @@ virtio_dev_queue_release(struct virtqueue *vq)\n \t\tif (vq->configured)\n \t\t\thw->vtpci_ops->del_queue(hw, vq);\n \n-\t\trte_memzone_free(vq->mz);\n-\t\tif (vq->virtio_net_hdr_mz)\n-\t\t\trte_memzone_free(vq->virtio_net_hdr_mz);\n-\n \t\trte_free(vq->sw_ring);\n \t\trte_free(vq);\n \t}\n@@ -284,14 +301,21 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\t\tuint16_t vtpci_queue_idx,\n \t\t\tuint16_t nb_desc,\n \t\t\tunsigned int socket_id,\n-\t\t\tstruct virtqueue **pvq)\n+\t\t\tvoid **pvq)\n {\n \tchar vq_name[VIRTQUEUE_MAX_NAME_SZ];\n-\tconst struct rte_memzone *mz;\n+\tchar vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];\n+\tconst struct rte_memzone *mz = NULL, *hdr_mz = NULL;\n \tunsigned int vq_size, size;\n \tstruct virtio_hw *hw = dev->data->dev_private;\n-\tstruct virtqueue *vq = NULL;\n+\tstruct virtnet_rx *rxvq;\n+\tstruct virtnet_tx *txvq;\n+\tstruct virtnet_ctl *cvq;\n+\tstruct virtqueue *vq;\n \tconst char *queue_names[] = {\"rvq\", \"txq\", \"cvq\"};\n+\tsize_t sz_vq, sz_q = 0, sz_hdr_mz = 0;\n+\tvoid *sw_ring = NULL;\n+\tint ret;\n \n \tPMD_INIT_LOG(DEBUG, \"setting up queue: %u\", vtpci_queue_idx);\n \n@@ -313,32 +337,28 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \n \tsnprintf(vq_name, sizeof(vq_name), \"port%d_%s%d\",\n \t\t dev->data->port_id, queue_names[queue_type], queue_idx);\n-\tvq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +\n-\t\t\t vq_size * sizeof(struct vq_desc_extra),\n-\t\t\t RTE_CACHE_LINE_SIZE);\n-\tif (vq == NULL) {\n-\t\tPMD_INIT_LOG(ERR, \"Can not allocate virtqueue\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n+\tsz_vq = sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra);\n \tif (queue_type == VTNET_RQ) {\n-\t\tsize_t sz_sw;\n-\n-\t\tsz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *\n-\t\t\tsizeof(vq->sw_ring[0]);\n-\t\tvq->sw_ring = rte_zmalloc_socket(\"rxq->sw_ring\", sz_sw,\n-\t\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t\t socket_id);\n-\t\tif (!vq->sw_ring) {\n-\t\t\tPMD_INIT_LOG(ERR, \"Can not allocate RX soft ring\");\n-\t\t\tvirtio_dev_queue_release(vq);\n-\t\t\treturn -ENOMEM;\n-\t\t}\n+\t\tsz_q = sz_vq + sizeof(*rxvq);\n+\t} else if (queue_type == VTNET_TQ) {\n+\t\tsz_q = sz_vq + sizeof(*txvq);\n+\t\t/*\n+\t\t * For each xmit packet, allocate a virtio_net_hdr\n+\t\t * and indirect ring elements\n+\t\t */\n+\t\tsz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);\n+\t} else if (queue_type == VTNET_CQ) {\n+\t\tsz_q = sz_vq + sizeof(*cvq);\n+\t\t/* Allocate a page for control vq command, data and status */\n+\t\tsz_hdr_mz = PAGE_SIZE;\n \t}\n \n+\tvq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (vq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"can not allocate vq\");\n+\t\treturn -ENOMEM;\n+\t}\n \tvq->hw = hw;\n-\tvq->port_id = dev->data->port_id;\n-\tvq->queue_id = queue_idx;\n \tvq->vq_queue_index = vtpci_queue_idx;\n \tvq->vq_nentries = vq_size;\n \n@@ -351,16 +371,17 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t */\n \tsize = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);\n \tvq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);\n-\tPMD_INIT_LOG(DEBUG, \"vring_size: %d, rounded_vring_size: %d\", size, vq->vq_ring_size);\n+\tPMD_INIT_LOG(DEBUG, \"vring_size: %d, rounded_vring_size: %d\",\n+\t\t     size, vq->vq_ring_size);\n \n-\tmz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,\n-\t\tsocket_id, 0, VIRTIO_PCI_VRING_ALIGN);\n+\tmz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,\n+\t\t\t\t\t 0, VIRTIO_PCI_VRING_ALIGN);\n \tif (mz == NULL) {\n \t\tif (rte_errno == EEXIST)\n \t\t\tmz = rte_memzone_lookup(vq_name);\n \t\tif (mz == NULL) {\n-\t\t\tvirtio_dev_queue_release(vq);\n-\t\t\treturn -ENOMEM;\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto fail_q_alloc;\n \t\t}\n \t}\n \n@@ -371,44 +392,65 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t */\n \tif ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {\n \t\tPMD_INIT_LOG(ERR, \"vring address shouldn't be above 16TB!\");\n-\t\tvirtio_dev_queue_release(vq);\n-\t\treturn -ENOMEM;\n+\t\tret = -ENOMEM;\n+\t\tgoto fail_q_alloc;\n \t}\n-\n \tmemset(mz->addr, 0, sizeof(mz->len));\n-\tvq->mz = mz;\n+\n \tvq->vq_ring_mem = mz->phys_addr;\n \tvq->vq_ring_virt_mem = mz->addr;\n-\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_mem:      0x%\"PRIx64, (uint64_t)mz->phys_addr);\n-\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_virt_mem: 0x%\"PRIx64, (uint64_t)(uintptr_t)mz->addr);\n-\tvq->virtio_net_hdr_mz  = NULL;\n-\tvq->virtio_net_hdr_mem = 0;\n-\n-\tif (queue_type == VTNET_TQ) {\n-\t\tconst struct rte_memzone *hdr_mz;\n-\t\tstruct virtio_tx_region *txr;\n-\t\tunsigned int i;\n-\n-\t\t/*\n-\t\t * For each xmit packet, allocate a virtio_net_hdr\n-\t\t * and indirect ring elements\n-\t\t */\n-\t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_tvq%d_hdrzone\",\n-\t\t\t dev->data->port_id, queue_idx);\n-\t\thdr_mz = rte_memzone_reserve_aligned(vq_name,\n-\t\t\t\t\t\t     vq_size * sizeof(*txr),\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_mem:      0x%\"PRIx64,\n+\t\t     (uint64_t)mz->phys_addr);\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_virt_mem: 0x%\"PRIx64,\n+\t\t     (uint64_t)(uintptr_t)mz->addr);\n+\n+\tif (sz_hdr_mz) {\n+\t\tsnprintf(vq_hdr_name, sizeof(vq_hdr_name), \"port%d_%s%d_hdr\",\n+\t\t\t dev->data->port_id, queue_names[queue_type],\n+\t\t\t queue_idx);\n+\t\thdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,\n \t\t\t\t\t\t     socket_id, 0,\n \t\t\t\t\t\t     RTE_CACHE_LINE_SIZE);\n \t\tif (hdr_mz == NULL) {\n \t\t\tif (rte_errno == EEXIST)\n-\t\t\t\thdr_mz = rte_memzone_lookup(vq_name);\n+\t\t\t\thdr_mz = rte_memzone_lookup(vq_hdr_name);\n \t\t\tif (hdr_mz == NULL) {\n-\t\t\t\tvirtio_dev_queue_release(vq);\n-\t\t\t\treturn -ENOMEM;\n+\t\t\t\tret = -ENOMEM;\n+\t\t\t\tgoto fail_q_alloc;\n \t\t\t}\n \t\t}\n-\t\tvq->virtio_net_hdr_mz = hdr_mz;\n-\t\tvq->virtio_net_hdr_mem = hdr_mz->phys_addr;\n+\t}\n+\n+\tif (queue_type == VTNET_RQ) {\n+\t\tsize_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *\n+\t\t\t       sizeof(vq->sw_ring[0]);\n+\n+\t\tsw_ring = rte_zmalloc_socket(\"sw_ring\", sz_sw,\n+\t\t\t\t\t     RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (!sw_ring) {\n+\t\t\tPMD_INIT_LOG(ERR, \"can not allocate RX soft ring\");\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto fail_q_alloc;\n+\t\t}\n+\n+\t\tvq->sw_ring = sw_ring;\n+\t\trxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);\n+\t\trxvq->vq = vq;\n+\t\trxvq->port_id = dev->data->port_id;\n+\t\trxvq->queue_id = queue_idx;\n+\t\trxvq->mz = mz;\n+\t\t*pvq = rxvq;\n+\t} else if (queue_type == VTNET_TQ) {\n+\t\tstruct virtio_tx_region *txr;\n+\t\tunsigned int i;\n+\n+\t\ttxvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);\n+\t\ttxvq->vq = vq;\n+\t\ttxvq->port_id = dev->data->port_id;\n+\t\ttxvq->queue_id = queue_idx;\n+\t\ttxvq->mz = mz;\n+\t\ttxvq->virtio_net_hdr_mz = hdr_mz;\n+\t\ttxvq->virtio_net_hdr_mem = hdr_mz->phys_addr;\n \n \t\ttxr = hdr_mz->addr;\n \t\tmemset(txr, 0, vq_size * sizeof(*txr));\n@@ -418,58 +460,55 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\t\tvring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));\n \n \t\t\t/* first indirect descriptor is always the tx header */\n-\t\t\tstart_dp->addr = vq->virtio_net_hdr_mem\n+\t\t\tstart_dp->addr = txvq->virtio_net_hdr_mem\n \t\t\t\t+ i * sizeof(*txr)\n \t\t\t\t+ offsetof(struct virtio_tx_region, tx_hdr);\n \n-\t\t\tstart_dp->len = vq->hw->vtnet_hdr_size;\n+\t\t\tstart_dp->len = hw->vtnet_hdr_size;\n \t\t\tstart_dp->flags = VRING_DESC_F_NEXT;\n \t\t}\n \n+\t\t*pvq = txvq;\n \t} else if (queue_type == VTNET_CQ) {\n-\t\t/* Allocate a page for control vq command, data and status */\n-\t\tsnprintf(vq_name, sizeof(vq_name), \"port%d_cvq_hdrzone\",\n-\t\t\tdev->data->port_id);\n-\t\tvq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,\n-\t\t\tPAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);\n-\t\tif (vq->virtio_net_hdr_mz == NULL) {\n-\t\t\tif (rte_errno == EEXIST)\n-\t\t\t\tvq->virtio_net_hdr_mz =\n-\t\t\t\t\trte_memzone_lookup(vq_name);\n-\t\t\tif (vq->virtio_net_hdr_mz == NULL) {\n-\t\t\t\tvirtio_dev_queue_release(vq);\n-\t\t\t\treturn -ENOMEM;\n-\t\t\t}\n-\t\t}\n-\t\tvq->virtio_net_hdr_mem =\n-\t\t\tvq->virtio_net_hdr_mz->phys_addr;\n-\t\tmemset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);\n+\t\tcvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);\n+\t\tcvq->vq = vq;\n+\t\tcvq->mz = mz;\n+\t\tcvq->virtio_net_hdr_mz = hdr_mz;\n+\t\tcvq->virtio_net_hdr_mem = hdr_mz->phys_addr;\n+\t\tmemset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);\n+\t\t*pvq = cvq;\n \t}\n \n \thw->vtpci_ops->setup_queue(hw, vq);\n-\n \tvq->configured = 1;\n-\t*pvq = vq;\n \treturn 0;\n+\n+fail_q_alloc:\n+\trte_free(sw_ring);\n+\trte_memzone_free(hdr_mz);\n+\trte_memzone_free(mz);\n+\trte_free(vq);\n+\n+\treturn ret;\n }\n \n static int\n virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,\n \t\tuint32_t socket_id)\n {\n-\tstruct virtqueue *vq;\n+\tstruct virtnet_ctl *cvq;\n \tint ret;\n \tstruct virtio_hw *hw = dev->data->dev_private;\n \n \tPMD_INIT_FUNC_TRACE();\n \tret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,\n-\t\t\tvtpci_queue_idx, 0, socket_id, &vq);\n+\t\t\tvtpci_queue_idx, 0, socket_id, (void **)&cvq);\n \tif (ret < 0) {\n \t\tPMD_INIT_LOG(ERR, \"control vq initialization failed\");\n \t\treturn ret;\n \t}\n \n-\thw->cvq = vq;\n+\thw->cvq = cvq;\n \treturn 0;\n }\n \n@@ -676,32 +715,32 @@ virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n \tunsigned i;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tconst struct virtqueue *txvq = dev->data->tx_queues[i];\n+\t\tconst struct virtnet_tx *txvq = dev->data->tx_queues[i];\n \t\tif (txvq == NULL)\n \t\t\tcontinue;\n \n-\t\tstats->opackets += txvq->packets;\n-\t\tstats->obytes += txvq->bytes;\n-\t\tstats->oerrors += txvq->errors;\n+\t\tstats->opackets += txvq->stats.packets;\n+\t\tstats->obytes += txvq->stats.bytes;\n+\t\tstats->oerrors += txvq->stats.errors;\n \n \t\tif (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {\n-\t\t\tstats->q_opackets[i] = txvq->packets;\n-\t\t\tstats->q_obytes[i] = txvq->bytes;\n+\t\t\tstats->q_opackets[i] = txvq->stats.packets;\n+\t\t\tstats->q_obytes[i] = txvq->stats.bytes;\n \t\t}\n \t}\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tconst struct virtqueue *rxvq = dev->data->rx_queues[i];\n+\t\tconst struct virtnet_rx *rxvq = dev->data->rx_queues[i];\n \t\tif (rxvq == NULL)\n \t\t\tcontinue;\n \n-\t\tstats->ipackets += rxvq->packets;\n-\t\tstats->ibytes += rxvq->bytes;\n-\t\tstats->ierrors += rxvq->errors;\n+\t\tstats->ipackets += rxvq->stats.packets;\n+\t\tstats->ibytes += rxvq->stats.bytes;\n+\t\tstats->ierrors += rxvq->stats.errors;\n \n \t\tif (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {\n-\t\t\tstats->q_ipackets[i] = rxvq->packets;\n-\t\t\tstats->q_ibytes[i] = rxvq->bytes;\n+\t\t\tstats->q_ipackets[i] = rxvq->stats.packets;\n+\t\t\tstats->q_ibytes[i] = rxvq->stats.bytes;\n \t\t}\n \t}\n \n@@ -715,44 +754,44 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,\n \tunsigned i;\n \tunsigned count = 0;\n \n-\tunsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +\n-\t\tdev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;\n+\tunsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +\n+\t\tdev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;\n \n \tif (n < nstats)\n \t\treturn nstats;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tstruct virtqueue *rxvq = dev->data->rx_queues[i];\n+\t\tstruct virtnet_rx *rxvq = dev->data->rx_queues[i];\n \n \t\tif (rxvq == NULL)\n \t\t\tcontinue;\n \n \t\tunsigned t;\n \n-\t\tfor (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {\n+\t\tfor (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {\n \t\t\tsnprintf(xstats[count].name, sizeof(xstats[count].name),\n \t\t\t\t \"rx_q%u_%s\", i,\n-\t\t\t\t rte_virtio_q_stat_strings[t].name);\n+\t\t\t\t rte_virtio_rxq_stat_strings[t].name);\n \t\t\txstats[count].value = *(uint64_t *)(((char *)rxvq) +\n-\t\t\t\trte_virtio_q_stat_strings[t].offset);\n+\t\t\t\trte_virtio_rxq_stat_strings[t].offset);\n \t\t\tcount++;\n \t\t}\n \t}\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tstruct virtqueue *txvq = dev->data->tx_queues[i];\n+\t\tstruct virtnet_tx *txvq = dev->data->tx_queues[i];\n \n \t\tif (txvq == NULL)\n \t\t\tcontinue;\n \n \t\tunsigned t;\n \n-\t\tfor (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {\n+\t\tfor (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {\n \t\t\tsnprintf(xstats[count].name, sizeof(xstats[count].name),\n \t\t\t\t \"tx_q%u_%s\", i,\n-\t\t\t\t rte_virtio_q_stat_strings[t].name);\n+\t\t\t\t rte_virtio_txq_stat_strings[t].name);\n \t\t\txstats[count].value = *(uint64_t *)(((char *)txvq) +\n-\t\t\t\trte_virtio_q_stat_strings[t].offset);\n+\t\t\t\trte_virtio_txq_stat_strings[t].offset);\n \t\t\tcount++;\n \t\t}\n \t}\n@@ -772,29 +811,31 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)\n \tunsigned int i;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tstruct virtqueue *txvq = dev->data->tx_queues[i];\n+\t\tstruct virtnet_tx *txvq = dev->data->tx_queues[i];\n \t\tif (txvq == NULL)\n \t\t\tcontinue;\n \n-\t\ttxvq->packets = 0;\n-\t\ttxvq->bytes = 0;\n-\t\ttxvq->errors = 0;\n-\t\ttxvq->multicast = 0;\n-\t\ttxvq->broadcast = 0;\n-\t\tmemset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8);\n+\t\ttxvq->stats.packets = 0;\n+\t\ttxvq->stats.bytes = 0;\n+\t\ttxvq->stats.errors = 0;\n+\t\ttxvq->stats.multicast = 0;\n+\t\ttxvq->stats.broadcast = 0;\n+\t\tmemset(txvq->stats.size_bins, 0,\n+\t\t       sizeof(txvq->stats.size_bins[0]) * 8);\n \t}\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tstruct virtqueue *rxvq = dev->data->rx_queues[i];\n+\t\tstruct virtnet_rx *rxvq = dev->data->rx_queues[i];\n \t\tif (rxvq == NULL)\n \t\t\tcontinue;\n \n-\t\trxvq->packets = 0;\n-\t\trxvq->bytes = 0;\n-\t\trxvq->errors = 0;\n-\t\trxvq->multicast = 0;\n-\t\trxvq->broadcast = 0;\n-\t\tmemset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8);\n+\t\trxvq->stats.packets = 0;\n+\t\trxvq->stats.bytes = 0;\n+\t\trxvq->stats.errors = 0;\n+\t\trxvq->stats.multicast = 0;\n+\t\trxvq->stats.broadcast = 0;\n+\t\tmemset(rxvq->stats.size_bins, 0,\n+\t\t       sizeof(rxvq->stats.size_bins[0]) * 8);\n \t}\n }\n \n@@ -1187,7 +1228,8 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)\n \teth_dev->tx_pkt_burst = NULL;\n \teth_dev->rx_pkt_burst = NULL;\n \n-\tvirtio_dev_queue_release(hw->cvq);\n+\tif (hw->cvq)\n+\t\tvirtio_dev_queue_release(hw->cvq->vq);\n \n \trte_free(eth_dev->data->mac_addrs);\n \teth_dev->data->mac_addrs = NULL;\n@@ -1275,6 +1317,8 @@ virtio_dev_start(struct rte_eth_dev *dev)\n {\n \tuint16_t nb_queues, i;\n \tstruct virtio_hw *hw = dev->data->dev_private;\n+\tstruct virtnet_rx *rxvq;\n+\tstruct virtnet_tx *txvq __rte_unused;\n \n \t/* check if lsc interrupt feature is enabled */\n \tif (dev->data->dev_conf.intr_conf.lsc) {\n@@ -1314,16 +1358,22 @@ virtio_dev_start(struct rte_eth_dev *dev)\n \n \tPMD_INIT_LOG(DEBUG, \"nb_queues=%d\", nb_queues);\n \n-\tfor (i = 0; i < nb_queues; i++)\n-\t\tvirtqueue_notify(dev->data->rx_queues[i]);\n+\tfor (i = 0; i < nb_queues; i++) {\n+\t\trxvq = dev->data->rx_queues[i];\n+\t\tvirtqueue_notify(rxvq->vq);\n+\t}\n \n \tPMD_INIT_LOG(DEBUG, \"Notified backend at initialization\");\n \n-\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxvq = dev->data->rx_queues[i];\n+\t\tVIRTQUEUE_DUMP(rxvq->vq);\n+\t}\n \n-\tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxvq = dev->data->tx_queues[i];\n+\t\tVIRTQUEUE_DUMP(txvq->vq);\n+\t}\n \n \treturn 0;\n }\n@@ -1334,14 +1384,14 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \tint i, mbuf_num = 0;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct virtnet_rx *rxvq = dev->data->rx_queues[i];\n+\n \t\tPMD_INIT_LOG(DEBUG,\n \t\t\t     \"Before freeing rxq[%d] used and unused buf\", i);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);\n+\t\tVIRTQUEUE_DUMP(rxvq->vq);\n \n-\t\tPMD_INIT_LOG(DEBUG, \"rx_queues[%d]=%p\",\n-\t\t\t\ti, dev->data->rx_queues[i]);\n-\t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n-\t\t\t\t\tdev->data->rx_queues[i])) != NULL) {\n+\t\tPMD_INIT_LOG(DEBUG, \"rx_queues[%d]=%p\", i, rxvq);\n+\t\twhile ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {\n \t\t\trte_pktmbuf_free(buf);\n \t\t\tmbuf_num++;\n \t\t}\n@@ -1349,27 +1399,27 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)\n \t\tPMD_INIT_LOG(DEBUG, \"free %d mbufs\", mbuf_num);\n \t\tPMD_INIT_LOG(DEBUG,\n \t\t\t     \"After freeing rxq[%d] used and unused buf\", i);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);\n+\t\tVIRTQUEUE_DUMP(rxvq->vq);\n \t}\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct virtnet_tx *txvq = dev->data->tx_queues[i];\n+\n \t\tPMD_INIT_LOG(DEBUG,\n \t\t\t     \"Before freeing txq[%d] used and unused bufs\",\n \t\t\t     i);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);\n+\t\tVIRTQUEUE_DUMP(txvq->vq);\n \n \t\tmbuf_num = 0;\n-\t\twhile ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(\n-\t\t\t\t\tdev->data->tx_queues[i])) != NULL) {\n+\t\twhile ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {\n \t\t\trte_pktmbuf_free(buf);\n-\n \t\t\tmbuf_num++;\n \t\t}\n \n \t\tPMD_INIT_LOG(DEBUG, \"free %d mbufs\", mbuf_num);\n \t\tPMD_INIT_LOG(DEBUG,\n \t\t\t     \"After freeing txq[%d] used and unused buf\", i);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);\n+\t\tVIRTQUEUE_DUMP(txvq->vq);\n \t}\n }\n \ndiff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h\nindex 66423a0..7e77259 100644\n--- a/drivers/net/virtio/virtio_ethdev.h\n+++ b/drivers/net/virtio/virtio_ethdev.h\n@@ -81,7 +81,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,\n \t\t\tuint16_t vtpci_queue_idx,\n \t\t\tuint16_t nb_desc,\n \t\t\tunsigned int socket_id,\n-\t\t\tstruct virtqueue **pvq);\n+\t\t\tvoid **pvq);\n \n void virtio_dev_queue_release(struct virtqueue *vq);\n \ndiff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c\nindex 9cdca06..d0f2428 100644\n--- a/drivers/net/virtio/virtio_pci.c\n+++ b/drivers/net/virtio/virtio_pci.c\n@@ -150,7 +150,7 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)\n \n \trte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,\n \t\t\t VIRTIO_PCI_QUEUE_SEL);\n-\tsrc = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;\n+\tsrc = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;\n \trte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);\n }\n \n@@ -373,7 +373,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)\n \tuint64_t desc_addr, avail_addr, used_addr;\n \tuint16_t notify_off;\n \n-\tdesc_addr = vq->mz->phys_addr;\n+\tdesc_addr = vq->vq_ring_mem;\n \tavail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);\n \tused_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,\n \t\t\t\t\t\t\t ring[vq->vq_nentries]),\ndiff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h\nindex 554efea..f20468a 100644\n--- a/drivers/net/virtio/virtio_pci.h\n+++ b/drivers/net/virtio/virtio_pci.h\n@@ -40,6 +40,7 @@\n #include <rte_ethdev.h>\n \n struct virtqueue;\n+struct virtnet_ctl;\n \n /* VirtIO PCI vendor/device ID. */\n #define VIRTIO_PCI_VENDORID     0x1AF4\n@@ -242,7 +243,7 @@ struct virtio_pci_ops {\n struct virtio_net_config;\n \n struct virtio_hw {\n-\tstruct virtqueue *cvq;\n+\tstruct virtnet_ctl *cvq;\n \tstruct rte_pci_ioport io;\n \tuint64_t    guest_features;\n \tuint32_t    max_tx_queues;\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex f326222..0d51fc0 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -209,23 +209,24 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)\n }\n \n static inline void\n-virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,\n+virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t\t       uint16_t needed, int use_indirect, int can_push)\n {\n \tstruct vq_desc_extra *dxp;\n+\tstruct virtqueue *vq = txvq->vq;\n \tstruct vring_desc *start_dp;\n \tuint16_t seg_num = cookie->nb_segs;\n \tuint16_t head_idx, idx;\n-\tuint16_t head_size = txvq->hw->vtnet_hdr_size;\n+\tuint16_t head_size = vq->hw->vtnet_hdr_size;\n \tunsigned long offs;\n \n-\thead_idx = txvq->vq_desc_head_idx;\n+\thead_idx = vq->vq_desc_head_idx;\n \tidx = head_idx;\n-\tdxp = &txvq->vq_descx[idx];\n+\tdxp = &vq->vq_descx[idx];\n \tdxp->cookie = (void *)cookie;\n \tdxp->ndescs = needed;\n \n-\tstart_dp = txvq->vq_ring.desc;\n+\tstart_dp = vq->vq_ring.desc;\n \n \tif (can_push) {\n \t\t/* put on zero'd transmit header (no offloads) */\n@@ -259,7 +260,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,\n \t\t\t+ offsetof(struct virtio_tx_region, tx_hdr);\n \n \t\tstart_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;\n-\t\tstart_dp[idx].len   = txvq->hw->vtnet_hdr_size;\n+\t\tstart_dp[idx].len   = vq->hw->vtnet_hdr_size;\n \t\tstart_dp[idx].flags = VRING_DESC_F_NEXT;\n \t\tidx = start_dp[idx].next;\n \t}\n@@ -272,13 +273,13 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,\n \t} while ((cookie = cookie->next) != NULL);\n \n \tif (use_indirect)\n-\t\tidx = txvq->vq_ring.desc[head_idx].next;\n+\t\tidx = vq->vq_ring.desc[head_idx].next;\n \n-\ttxvq->vq_desc_head_idx = idx;\n-\tif (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n-\t\ttxvq->vq_desc_tail_idx = idx;\n-\ttxvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);\n-\tvq_update_avail_ring(txvq, head_idx);\n+\tvq->vq_desc_head_idx = idx;\n+\tif (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n+\t\tvq->vq_desc_tail_idx = idx;\n+\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);\n+\tvq_update_avail_ring(vq, head_idx);\n }\n \n static inline struct rte_mbuf *\n@@ -293,10 +294,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)\n }\n \n static void\n-virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n+virtio_dev_vring_start(struct virtqueue *vq)\n {\n-\tstruct rte_mbuf *m;\n-\tint i, nbufs, error, size = vq->vq_nentries;\n+\tint size = vq->vq_nentries;\n \tstruct vring *vr = &vq->vq_ring;\n \tuint8_t *ring_mem = vq->vq_ring_virt_mem;\n \n@@ -320,10 +320,42 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t * Disable device(host) interrupting guest\n \t */\n \tvirtqueue_disable_intr(vq);\n+}\n \n-\t/* Only rx virtqueue needs mbufs to be allocated at initialization */\n-\tif (queue_type == VTNET_RQ) {\n-\t\tif (vq->mpool == NULL)\n+void\n+virtio_dev_cq_start(struct rte_eth_dev *dev)\n+{\n+\tstruct virtio_hw *hw = dev->data->dev_private;\n+\n+\tif (hw->cvq && hw->cvq->vq) {\n+\t\tvirtio_dev_vring_start(hw->cvq->vq);\n+\t\tVIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);\n+\t}\n+}\n+\n+void\n+virtio_dev_rxtx_start(struct rte_eth_dev *dev)\n+{\n+\t/*\n+\t * Start receive and transmit vrings\n+\t * -\tSetup vring structure for all queues\n+\t * -\tInitialize descriptor for the rx vring\n+\t * -\tAllocate blank mbufs for the each rx descriptor\n+\t *\n+\t */\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Start rx vring. */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tstruct virtnet_rx *rxvq = dev->data->rx_queues[i];\n+\t\tstruct virtqueue *vq = rxvq->vq;\n+\t\tint error, nbufs;\n+\t\tstruct rte_mbuf *m;\n+\n+\t\tvirtio_dev_vring_start(vq);\n+\t\tif (rxvq->mpool == NULL)\n \t\t\trte_exit(EXIT_FAILURE,\n \t\t\t\"Cannot allocate initial mbufs for rx virtqueue\");\n \n@@ -338,12 +370,12 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\t\t\tvq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;\n \t\t\t}\n #endif\n-\t\tmemset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf));\n+\t\tmemset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));\n \t\tfor (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++)\n-\t\t\tvq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;\n+\t\t\tvq->sw_ring[vq->vq_nentries + i] = &rxvq->fake_mbuf;\n \n \t\twhile (!virtqueue_full(vq)) {\n-\t\t\tm = rte_rxmbuf_alloc(vq->mpool);\n+\t\t\tm = rte_rxmbuf_alloc(rxvq->mpool);\n \t\t\tif (m == NULL)\n \t\t\t\tbreak;\n \n@@ -366,7 +398,16 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\tvq_update_avail_idx(vq);\n \n \t\tPMD_INIT_LOG(DEBUG, \"Allocated %d bufs\", nbufs);\n-\t} else if (queue_type == VTNET_TQ) {\n+\n+\t\tVIRTQUEUE_DUMP(vq);\n+\t}\n+\n+\t/* Start tx vring. */\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct virtnet_tx *txvq = dev->data->tx_queues[i];\n+\t\tstruct virtqueue *vq = txvq->vq;\n+\n+\t\tvirtio_dev_vring_start(vq);\n #ifdef RTE_MACHINE_CPUFLAG_SSSE3\n \t\tif (use_simple_rxtx) {\n \t\t\tint mid_idx  = vq->vq_nentries >> 1;\n@@ -374,7 +415,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\t\t\tvq->vq_ring.avail->ring[i] = i + mid_idx;\n \t\t\t\tvq->vq_ring.desc[i + mid_idx].next = i;\n \t\t\t\tvq->vq_ring.desc[i + mid_idx].addr =\n-\t\t\t\t\tvq->virtio_net_hdr_mem +\n+\t\t\t\t\ttxvq->virtio_net_hdr_mem +\n \t\t\t\t\toffsetof(struct virtio_tx_region, tx_hdr);\n \t\t\t\tvq->vq_ring.desc[i + mid_idx].len =\n \t\t\t\t\tvq->hw->vtnet_hdr_size;\n@@ -386,44 +427,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)\n \t\t\t\tvq->vq_ring.avail->ring[i] = i;\n \t\t}\n #endif\n-\t}\n-}\n-\n-void\n-virtio_dev_cq_start(struct rte_eth_dev *dev)\n-{\n-\tstruct virtio_hw *hw = dev->data->dev_private;\n-\n-\tif (hw->cvq) {\n-\t\tvirtio_dev_vring_start(hw->cvq, VTNET_CQ);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);\n-\t}\n-}\n-\n-void\n-virtio_dev_rxtx_start(struct rte_eth_dev *dev)\n-{\n-\t/*\n-\t * Start receive and transmit vrings\n-\t * -\tSetup vring structure for all queues\n-\t * -\tInitialize descriptor for the rx vring\n-\t * -\tAllocate blank mbufs for the each rx descriptor\n-\t *\n-\t */\n-\tint i;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* Start rx vring. */\n-\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tvirtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);\n-\t}\n-\n-\t/* Start tx vring. */\n-\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tvirtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);\n-\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);\n+\t\tVIRTQUEUE_DUMP(vq);\n \t}\n }\n \n@@ -436,24 +440,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\tstruct rte_mempool *mp)\n {\n \tuint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;\n-\tstruct virtqueue *vq;\n+\tstruct virtnet_rx *rxvq;\n \tint ret;\n \n \tPMD_INIT_FUNC_TRACE();\n \tret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,\n-\t\t\tnb_desc, socket_id, &vq);\n+\t\t\tnb_desc, socket_id, (void **)&rxvq);\n \tif (ret < 0) {\n \t\tPMD_INIT_LOG(ERR, \"rvq initialization failed\");\n \t\treturn ret;\n \t}\n \n \t/* Create mempool for rx mbuf allocation */\n-\tvq->mpool = mp;\n+\trxvq->mpool = mp;\n \n-\tdev->data->rx_queues[queue_idx] = vq;\n+\tdev->data->rx_queues[queue_idx] = rxvq;\n \n #ifdef RTE_MACHINE_CPUFLAG_SSSE3\n-\tvirtio_rxq_vec_setup(vq);\n+\tvirtio_rxq_vec_setup(rxvq);\n #endif\n \n \treturn 0;\n@@ -462,7 +466,16 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,\n void\n virtio_dev_rx_queue_release(void *rxq)\n {\n-\tvirtio_dev_queue_release(rxq);\n+\tstruct virtnet_rx *rxvq = rxq;\n+\tstruct virtqueue *vq = rxvq->vq;\n+\t/* rxvq is freed when vq is freed, and as mz should be freed after the\n+\t * del_queue, so we reserve the mz pointer first.\n+\t */\n+\tconst struct rte_memzone *mz = rxvq->mz;\n+\n+\t/* no need to free rxq as vq and rxq are allocated together */\n+\tvirtio_dev_queue_release(vq);\n+\trte_memzone_free(mz);\n }\n \n /*\n@@ -484,6 +497,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,\n #ifdef RTE_MACHINE_CPUFLAG_SSSE3\n \tstruct virtio_hw *hw = dev->data->dev_private;\n #endif\n+\tstruct virtnet_tx *txvq;\n \tstruct virtqueue *vq;\n \tuint16_t tx_free_thresh;\n \tint ret;\n@@ -508,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,\n #endif\n \n \tret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,\n-\t\t\tnb_desc, socket_id, &vq);\n+\t\t\tnb_desc, socket_id, (void **)&txvq);\n \tif (ret < 0) {\n-\t\tPMD_INIT_LOG(ERR, \"rvq initialization failed\");\n+\t\tPMD_INIT_LOG(ERR, \"tvq initialization failed\");\n \t\treturn ret;\n \t}\n+\tvq = txvq->vq;\n \n \ttx_free_thresh = tx_conf->tx_free_thresh;\n \tif (tx_free_thresh == 0)\n@@ -530,14 +545,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \n \tvq->vq_free_thresh = tx_free_thresh;\n \n-\tdev->data->tx_queues[queue_idx] = vq;\n+\tdev->data->tx_queues[queue_idx] = txvq;\n \treturn 0;\n }\n \n void\n virtio_dev_tx_queue_release(void *txq)\n {\n-\tvirtio_dev_queue_release(txq);\n+\tstruct virtnet_tx *txvq = txq;\n+\tstruct virtqueue *vq = txvq->vq;\n+\t/* txvq is freed when vq is freed, and as mz should be freed after the\n+\t * del_queue, so we reserve the mz pointer first.\n+\t */\n+\tconst struct rte_memzone *hdr_mz = txvq->virtio_net_hdr_mz;\n+\tconst struct rte_memzone *mz = txvq->mz;\n+\n+\tvirtio_dev_queue_release(vq);\n+\trte_memzone_free(mz);\n+\trte_memzone_free(hdr_mz);\n }\n \n static void\n@@ -556,34 +581,34 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)\n }\n \n static void\n-virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)\n+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)\n {\n \tuint32_t s = mbuf->pkt_len;\n \tstruct ether_addr *ea;\n \n \tif (s == 64) {\n-\t\tvq->size_bins[1]++;\n+\t\tstats->size_bins[1]++;\n \t} else if (s > 64 && s < 1024) {\n \t\tuint32_t bin;\n \n \t\t/* count zeros, and offset into correct bin */\n \t\tbin = (sizeof(s) * 8) - __builtin_clz(s) - 5;\n-\t\tvq->size_bins[bin]++;\n+\t\tstats->size_bins[bin]++;\n \t} else {\n \t\tif (s < 64)\n-\t\t\tvq->size_bins[0]++;\n+\t\t\tstats->size_bins[0]++;\n \t\telse if (s < 1519)\n-\t\t\tvq->size_bins[6]++;\n+\t\t\tstats->size_bins[6]++;\n \t\telse if (s >= 1519)\n-\t\t\tvq->size_bins[7]++;\n+\t\t\tstats->size_bins[7]++;\n \t}\n \n \tea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);\n \tif (is_multicast_ether_addr(ea)) {\n \t\tif (is_broadcast_ether_addr(ea))\n-\t\t\tvq->broadcast++;\n+\t\t\tstats->broadcast++;\n \t\telse\n-\t\t\tvq->multicast++;\n+\t\t\tstats->multicast++;\n \t}\n }\n \n@@ -592,7 +617,8 @@ virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)\n uint16_t\n virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n {\n-\tstruct virtqueue *rxvq = rx_queue;\n+\tstruct virtnet_rx *rxvq = rx_queue;\n+\tstruct virtqueue *vq = rxvq->vq;\n \tstruct virtio_hw *hw;\n \tstruct rte_mbuf *rxm, *new_mbuf;\n \tuint16_t nb_used, num, nb_rx;\n@@ -602,19 +628,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \tuint32_t i, nb_enqueued;\n \tuint32_t hdr_size;\n \n-\tnb_used = VIRTQUEUE_NUSED(rxvq);\n+\tnb_used = VIRTQUEUE_NUSED(vq);\n \n \tvirtio_rmb();\n \n \tnum = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);\n \tnum = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);\n \tif (likely(num > DESC_PER_CACHELINE))\n-\t\tnum = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);\n+\t\tnum = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);\n \n-\tnum = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);\n+\tnum = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);\n \tPMD_RX_LOG(DEBUG, \"used:%d dequeue:%d\", nb_used, num);\n \n-\thw = rxvq->hw;\n+\thw = vq->hw;\n \tnb_rx = 0;\n \tnb_enqueued = 0;\n \thdr_size = hw->vtnet_hdr_size;\n@@ -627,8 +653,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\tif (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n-\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n-\t\t\trxvq->errors++;\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n \t\t\tcontinue;\n \t\t}\n \n@@ -649,15 +675,15 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \n \t\trx_pkts[nb_rx++] = rxm;\n \n-\t\trxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;\n-\t\tvirtio_update_packet_stats(rxvq, rxm);\n+\t\trxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;\n+\t\tvirtio_update_packet_stats(&rxvq->stats, rxm);\n \t}\n \n-\trxvq->packets += nb_rx;\n+\trxvq->stats.packets += nb_rx;\n \n \t/* Allocate new mbuf for the used descriptor */\n \terror = ENOSPC;\n-\twhile (likely(!virtqueue_full(rxvq))) {\n+\twhile (likely(!virtqueue_full(vq))) {\n \t\tnew_mbuf = rte_rxmbuf_alloc(rxvq->mpool);\n \t\tif (unlikely(new_mbuf == NULL)) {\n \t\t\tstruct rte_eth_dev *dev\n@@ -665,7 +691,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t\tdev->data->rx_mbuf_alloc_failed++;\n \t\t\tbreak;\n \t\t}\n-\t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n+\t\terror = virtqueue_enqueue_recv_refill(vq, new_mbuf);\n \t\tif (unlikely(error)) {\n \t\t\trte_pktmbuf_free(new_mbuf);\n \t\t\tbreak;\n@@ -674,10 +700,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t}\n \n \tif (likely(nb_enqueued)) {\n-\t\tvq_update_avail_idx(rxvq);\n+\t\tvq_update_avail_idx(vq);\n \n-\t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n-\t\t\tvirtqueue_notify(rxvq);\n+\t\tif (unlikely(virtqueue_kick_prepare(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n \t\t\tPMD_RX_LOG(DEBUG, \"Notified\");\n \t\t}\n \t}\n@@ -690,7 +716,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\tstruct rte_mbuf **rx_pkts,\n \t\t\tuint16_t nb_pkts)\n {\n-\tstruct virtqueue *rxvq = rx_queue;\n+\tstruct virtnet_rx *rxvq = rx_queue;\n+\tstruct virtqueue *vq = rxvq->vq;\n \tstruct virtio_hw *hw;\n \tstruct rte_mbuf *rxm, *new_mbuf;\n \tuint16_t nb_used, num, nb_rx;\n@@ -704,13 +731,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \tuint32_t seg_res;\n \tuint32_t hdr_size;\n \n-\tnb_used = VIRTQUEUE_NUSED(rxvq);\n+\tnb_used = VIRTQUEUE_NUSED(vq);\n \n \tvirtio_rmb();\n \n \tPMD_RX_LOG(DEBUG, \"used:%d\", nb_used);\n \n-\thw = rxvq->hw;\n+\thw = vq->hw;\n \tnb_rx = 0;\n \ti = 0;\n \tnb_enqueued = 0;\n@@ -725,7 +752,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\tif (nb_rx == nb_pkts)\n \t\t\tbreak;\n \n-\t\tnum = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);\n+\t\tnum = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);\n \t\tif (num != 1)\n \t\t\tcontinue;\n \n@@ -739,8 +766,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\tif (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {\n \t\t\tPMD_RX_LOG(ERR, \"Packet drop\");\n \t\t\tnb_enqueued++;\n-\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n-\t\t\trxvq->errors++;\n+\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\trxvq->stats.errors++;\n \t\t\tcontinue;\n \t\t}\n \n@@ -771,9 +798,9 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\t */\n \t\t\tuint16_t  rcv_cnt =\n \t\t\t\tRTE_MIN(seg_res, RTE_DIM(rcv_pkts));\n-\t\t\tif (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {\n+\t\t\tif (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {\n \t\t\t\tuint32_t rx_num =\n-\t\t\t\t\tvirtqueue_dequeue_burst_rx(rxvq,\n+\t\t\t\t\tvirtqueue_dequeue_burst_rx(vq,\n \t\t\t\t\trcv_pkts, len, rcv_cnt);\n \t\t\t\ti += rx_num;\n \t\t\t\trcv_cnt = rx_num;\n@@ -781,8 +808,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\t\tPMD_RX_LOG(ERR,\n \t\t\t\t\t   \"No enough segments for packet.\");\n \t\t\t\tnb_enqueued++;\n-\t\t\t\tvirtio_discard_rxbuf(rxvq, rxm);\n-\t\t\t\trxvq->errors++;\n+\t\t\t\tvirtio_discard_rxbuf(vq, rxm);\n+\t\t\t\trxvq->stats.errors++;\n \t\t\t\tbreak;\n \t\t\t}\n \n@@ -812,16 +839,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\tVIRTIO_DUMP_PACKET(rx_pkts[nb_rx],\n \t\t\trx_pkts[nb_rx]->data_len);\n \n-\t\trxvq->bytes += rx_pkts[nb_rx]->pkt_len;\n-\t\tvirtio_update_packet_stats(rxvq, rx_pkts[nb_rx]);\n+\t\trxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;\n+\t\tvirtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);\n \t\tnb_rx++;\n \t}\n \n-\trxvq->packets += nb_rx;\n+\trxvq->stats.packets += nb_rx;\n \n \t/* Allocate new mbuf for the used descriptor */\n \terror = ENOSPC;\n-\twhile (likely(!virtqueue_full(rxvq))) {\n+\twhile (likely(!virtqueue_full(vq))) {\n \t\tnew_mbuf = rte_rxmbuf_alloc(rxvq->mpool);\n \t\tif (unlikely(new_mbuf == NULL)) {\n \t\t\tstruct rte_eth_dev *dev\n@@ -829,7 +856,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t\t\tdev->data->rx_mbuf_alloc_failed++;\n \t\t\tbreak;\n \t\t}\n-\t\terror = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);\n+\t\terror = virtqueue_enqueue_recv_refill(vq, new_mbuf);\n \t\tif (unlikely(error)) {\n \t\t\trte_pktmbuf_free(new_mbuf);\n \t\t\tbreak;\n@@ -838,10 +865,10 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n \t}\n \n \tif (likely(nb_enqueued)) {\n-\t\tvq_update_avail_idx(rxvq);\n+\t\tvq_update_avail_idx(vq);\n \n-\t\tif (unlikely(virtqueue_kick_prepare(rxvq))) {\n-\t\t\tvirtqueue_notify(rxvq);\n+\t\tif (unlikely(virtqueue_kick_prepare(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n \t\t\tPMD_RX_LOG(DEBUG, \"Notified\");\n \t\t}\n \t}\n@@ -852,8 +879,9 @@ virtio_recv_mergeable_pkts(void *rx_queue,\n uint16_t\n virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n {\n-\tstruct virtqueue *txvq = tx_queue;\n-\tstruct virtio_hw *hw = txvq->hw;\n+\tstruct virtnet_tx *txvq = tx_queue;\n+\tstruct virtqueue *vq = txvq->vq;\n+\tstruct virtio_hw *hw = vq->hw;\n \tuint16_t hdr_size = hw->vtnet_hdr_size;\n \tuint16_t nb_used, nb_tx;\n \tint error;\n@@ -862,11 +890,11 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\treturn nb_pkts;\n \n \tPMD_TX_LOG(DEBUG, \"%d packets to xmit\", nb_pkts);\n-\tnb_used = VIRTQUEUE_NUSED(txvq);\n+\tnb_used = VIRTQUEUE_NUSED(vq);\n \n \tvirtio_rmb();\n-\tif (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))\n-\t\tvirtio_xmit_cleanup(txvq, nb_used);\n+\tif (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))\n+\t\tvirtio_xmit_cleanup(vq, nb_used);\n \n \tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n \t\tstruct rte_mbuf *txm = tx_pkts[nb_tx];\n@@ -899,16 +927,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t * default    => number of segments + 1\n \t\t */\n \t\tslots = use_indirect ? 1 : (txm->nb_segs + !can_push);\n-\t\tneed = slots - txvq->vq_free_cnt;\n+\t\tneed = slots - vq->vq_free_cnt;\n \n \t\t/* Positive value indicates it need free vring descriptors */\n \t\tif (unlikely(need > 0)) {\n-\t\t\tnb_used = VIRTQUEUE_NUSED(txvq);\n+\t\t\tnb_used = VIRTQUEUE_NUSED(vq);\n \t\t\tvirtio_rmb();\n \t\t\tneed = RTE_MIN(need, (int)nb_used);\n \n-\t\t\tvirtio_xmit_cleanup(txvq, need);\n-\t\t\tneed = slots - txvq->vq_free_cnt;\n+\t\t\tvirtio_xmit_cleanup(vq, need);\n+\t\t\tneed = slots - vq->vq_free_cnt;\n \t\t\tif (unlikely(need > 0)) {\n \t\t\t\tPMD_TX_LOG(ERR,\n \t\t\t\t\t   \"No free tx descriptors to transmit\");\n@@ -919,17 +947,17 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t/* Enqueue Packet buffers */\n \t\tvirtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);\n \n-\t\ttxvq->bytes += txm->pkt_len;\n-\t\tvirtio_update_packet_stats(txvq, txm);\n+\t\ttxvq->stats.bytes += txm->pkt_len;\n+\t\tvirtio_update_packet_stats(&txvq->stats, txm);\n \t}\n \n-\ttxvq->packets += nb_tx;\n+\ttxvq->stats.packets += nb_tx;\n \n \tif (likely(nb_tx)) {\n-\t\tvq_update_avail_idx(txvq);\n+\t\tvq_update_avail_idx(vq);\n \n-\t\tif (unlikely(virtqueue_kick_prepare(txvq))) {\n-\t\t\tvirtqueue_notify(txvq);\n+\t\tif (unlikely(virtqueue_kick_prepare(vq))) {\n+\t\t\tvirtqueue_notify(vq);\n \t\t\tPMD_TX_LOG(DEBUG, \"Notified backend after xmit\");\n \t\t}\n \t}\ndiff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h\nindex a76c3e5..058b56a 100644\n--- a/drivers/net/virtio/virtio_rxtx.h\n+++ b/drivers/net/virtio/virtio_rxtx.h\n@@ -31,11 +31,65 @@\n  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n  */\n \n+#ifndef _VIRTIO_RXTX_H_\n+#define _VIRTIO_RXTX_H_\n+\n #define RTE_PMD_VIRTIO_RX_MAX_BURST 64\n \n+struct virtnet_stats {\n+\tuint64_t\tpackets;\n+\tuint64_t\tbytes;\n+\tuint64_t\terrors;\n+\tuint64_t\tmulticast;\n+\tuint64_t\tbroadcast;\n+\t/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */\n+\tuint64_t\tsize_bins[8];\n+};\n+\n+struct virtnet_rx {\n+\tstruct virtqueue *vq;\n+\t/* dummy mbuf, for wraparound when processing RX ring. */\n+\tstruct rte_mbuf fake_mbuf;\n+\tuint64_t mbuf_initializer; /**< value to init mbufs. */\n+\tstruct rte_mempool *mpool; /**< mempool for mbuf allocation */\n+\n+\tuint16_t queue_id;   /**< DPDK queue index. */\n+\tuint8_t port_id;     /**< Device port identifier. */\n+\n+\t/* Statistics */\n+\tstruct virtnet_stats stats;\n+\n+\tconst struct rte_memzone *mz; /**< mem zone to populate RX ring. */\n+};\n+\n+struct virtnet_tx {\n+\tstruct virtqueue *vq;\n+\t/**< memzone to populate hdr. */\n+\tconst struct rte_memzone *virtio_net_hdr_mz;\n+\tphys_addr_t virtio_net_hdr_mem;  /**< hdr for each xmit packet */\n+\n+\tuint16_t    queue_id;            /**< DPDK queue index. */\n+\tuint8_t     port_id;             /**< Device port identifier. */\n+\n+\t/* Statistics */\n+\tstruct virtnet_stats stats;\n+\n+\tconst struct rte_memzone *mz;    /**< mem zone to populate TX ring. */\n+};\n+\n+struct virtnet_ctl {\n+\tstruct virtqueue *vq;\n+\t/**< memzone to populate hdr. */\n+\tconst struct rte_memzone *virtio_net_hdr_mz;\n+\tphys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */\n+\tuint8_t port_id;                /**< Device port identifier. */\n+\tconst struct rte_memzone *mz;   /**< mem zone to populate RX ring. */\n+};\n+\n #ifdef RTE_MACHINE_CPUFLAG_SSSE3\n-int virtio_rxq_vec_setup(struct virtqueue *rxq);\n+int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);\n \n int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,\n \tstruct rte_mbuf *m);\n #endif\n+#endif /* _VIRTIO_RXTX_H_ */\ndiff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c\nindex 8f5293d..fdd655d 100644\n--- a/drivers/net/virtio/virtio_rxtx_simple.c\n+++ b/drivers/net/virtio/virtio_rxtx_simple.c\n@@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,\n }\n \n static inline void\n-virtio_rxq_rearm_vec(struct virtqueue *rxvq)\n+virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)\n {\n \tint i;\n \tuint16_t desc_idx;\n \tstruct rte_mbuf **sw_ring;\n \tstruct vring_desc *start_dp;\n \tint ret;\n+\tstruct virtqueue *vq = rxvq->vq;\n \n-\tdesc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1);\n-\tsw_ring = &rxvq->sw_ring[desc_idx];\n-\tstart_dp = &rxvq->vq_ring.desc[desc_idx];\n+\tdesc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);\n+\tsw_ring = &vq->sw_ring[desc_idx];\n+\tstart_dp = &vq->vq_ring.desc[desc_idx];\n \n \tret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,\n \t\tRTE_VIRTIO_VPMD_RX_REARM_THRESH);\n@@ -120,14 +121,14 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq)\n \n \t\tstart_dp[i].addr =\n \t\t\t(uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr +\n-\t\t\tRTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size);\n+\t\t\tRTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size);\n \t\tstart_dp[i].len = sw_ring[i]->buf_len -\n-\t\t\tRTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size;\n+\t\t\tRTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;\n \t}\n \n-\trxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;\n-\trxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;\n-\tvq_update_avail_idx(rxvq);\n+\tvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;\n+\tvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;\n+\tvq_update_avail_idx(vq);\n }\n \n /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)\n@@ -143,7 +144,8 @@ uint16_t\n virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tuint16_t nb_pkts)\n {\n-\tstruct virtqueue *rxvq = rx_queue;\n+\tstruct virtnet_rx *rxvq = rx_queue;\n+\tstruct virtqueue *vq = rxvq->vq;\n \tuint16_t nb_used;\n \tuint16_t desc_idx;\n \tstruct vring_used_elem *rused;\n@@ -175,15 +177,15 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tlen_adjust = _mm_set_epi16(\n \t\t0, 0,\n \t\t0,\n-\t\t(uint16_t)-rxvq->hw->vtnet_hdr_size,\n-\t\t0, (uint16_t)-rxvq->hw->vtnet_hdr_size,\n+\t\t(uint16_t)-vq->hw->vtnet_hdr_size,\n+\t\t0, (uint16_t)-vq->hw->vtnet_hdr_size,\n \t\t0, 0);\n \n \tif (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))\n \t\treturn 0;\n \n-\tnb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx -\n-\t\trxvq->vq_used_cons_idx;\n+\tnb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx -\n+\t\tvq->vq_used_cons_idx;\n \n \trte_compiler_barrier();\n \n@@ -193,17 +195,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tnb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);\n \tnb_used = RTE_MIN(nb_used, nb_pkts);\n \n-\tdesc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1));\n-\trused = &rxvq->vq_ring.used->ring[desc_idx];\n-\tsw_ring  = &rxvq->sw_ring[desc_idx];\n-\tsw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries];\n+\tdesc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n+\trused = &vq->vq_ring.used->ring[desc_idx];\n+\tsw_ring  = &vq->sw_ring[desc_idx];\n+\tsw_ring_end = &vq->sw_ring[vq->vq_nentries];\n \n \t_mm_prefetch((const void *)rused, _MM_HINT_T0);\n \n-\tif (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {\n+\tif (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {\n \t\tvirtio_rxq_rearm_vec(rxvq);\n-\t\tif (unlikely(virtqueue_kick_prepare(rxvq)))\n-\t\t\tvirtqueue_notify(rxvq);\n+\t\tif (unlikely(virtqueue_kick_prepare(vq)))\n+\t\t\tvirtqueue_notify(vq);\n \t}\n \n \tfor (nb_pkts_received = 0;\n@@ -286,9 +288,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t}\n \t}\n \n-\trxvq->vq_used_cons_idx += nb_pkts_received;\n-\trxvq->vq_free_cnt += nb_pkts_received;\n-\trxvq->packets += nb_pkts_received;\n+\tvq->vq_used_cons_idx += nb_pkts_received;\n+\tvq->vq_free_cnt += nb_pkts_received;\n+\trxvq->stats.packets += nb_pkts_received;\n \treturn nb_pkts_received;\n }\n \n@@ -342,28 +344,29 @@ uint16_t\n virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint16_t nb_pkts)\n {\n-\tstruct virtqueue *txvq = tx_queue;\n+\tstruct virtnet_tx *txvq = tx_queue;\n+\tstruct virtqueue *vq = txvq->vq;\n \tuint16_t nb_used;\n \tuint16_t desc_idx;\n \tstruct vring_desc *start_dp;\n \tuint16_t nb_tail, nb_commit;\n \tint i;\n-\tuint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1;\n+\tuint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;\n \n-\tnb_used = VIRTQUEUE_NUSED(txvq);\n+\tnb_used = VIRTQUEUE_NUSED(vq);\n \trte_compiler_barrier();\n \n \tif (nb_used >= VIRTIO_TX_FREE_THRESH)\n-\t\tvirtio_xmit_cleanup(tx_queue);\n+\t\tvirtio_xmit_cleanup(vq);\n \n-\tnb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts);\n-\tdesc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max);\n-\tstart_dp = txvq->vq_ring.desc;\n+\tnb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);\n+\tdesc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);\n+\tstart_dp = vq->vq_ring.desc;\n \tnb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);\n \n \tif (nb_commit >= nb_tail) {\n \t\tfor (i = 0; i < nb_tail; i++)\n-\t\t\ttxvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];\n+\t\t\tvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];\n \t\tfor (i = 0; i < nb_tail; i++) {\n \t\t\tstart_dp[desc_idx].addr =\n \t\t\t\trte_mbuf_data_dma_addr(*tx_pkts);\n@@ -375,7 +378,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tdesc_idx = 0;\n \t}\n \tfor (i = 0; i < nb_commit; i++)\n-\t\ttxvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];\n+\t\tvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];\n \tfor (i = 0; i < nb_commit; i++) {\n \t\tstart_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts);\n \t\tstart_dp[desc_idx].len = (*tx_pkts)->pkt_len;\n@@ -385,21 +388,21 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \trte_compiler_barrier();\n \n-\ttxvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);\n-\ttxvq->vq_avail_idx += nb_pkts;\n-\ttxvq->vq_ring.avail->idx = txvq->vq_avail_idx;\n-\ttxvq->packets += nb_pkts;\n+\tvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);\n+\tvq->vq_avail_idx += nb_pkts;\n+\tvq->vq_ring.avail->idx = vq->vq_avail_idx;\n+\ttxvq->stats.packets += nb_pkts;\n \n \tif (likely(nb_pkts)) {\n-\t\tif (unlikely(virtqueue_kick_prepare(txvq)))\n-\t\t\tvirtqueue_notify(txvq);\n+\t\tif (unlikely(virtqueue_kick_prepare(vq)))\n+\t\t\tvirtqueue_notify(vq);\n \t}\n \n \treturn nb_pkts;\n }\n \n int __attribute__((cold))\n-virtio_rxq_vec_setup(struct virtqueue *rxq)\n+virtio_rxq_vec_setup(struct virtnet_rx *rxq)\n {\n \tuintptr_t p;\n \tstruct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 4e543d2..3d0e443 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -153,23 +153,29 @@ struct virtio_pmd_ctrl {\n \tuint8_t data[VIRTIO_MAX_CTRL_DATA];\n };\n \n+struct vq_desc_extra {\n+\tvoid *cookie;\n+\tuint16_t ndescs;\n+};\n+\n struct virtqueue {\n-\tstruct virtio_hw         *hw;     /**< virtio_hw structure pointer. */\n-\tconst struct rte_memzone *mz;     /**< mem zone to populate RX ring. */\n-\tconst struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */\n-\tstruct rte_mempool       *mpool;  /**< mempool for mbuf allocation */\n-\tuint16_t    queue_id;             /**< DPDK queue index. */\n-\tuint8_t     port_id;              /**< Device port identifier. */\n-\tuint16_t    vq_queue_index;       /**< PCI queue index */\n-\n-\tvoid        *vq_ring_virt_mem;    /**< linear address of vring*/\n+\tstruct virtio_hw  *hw; /**< virtio_hw structure pointer. */\n+\tstruct vring vq_ring;  /**< vring keeping desc, used and avail */\n+\t/**\n+\t * Last consumed descriptor in the used table,\n+\t * trails vq_ring.used->idx.\n+\t */\n+\tuint16_t vq_used_cons_idx;\n+\tuint16_t vq_nentries;  /**< vring desc numbers */\n+\tuint16_t vq_free_cnt;  /**< num of desc available */\n+\tuint16_t vq_avail_idx; /**< sync until needed */\n+\tuint16_t vq_free_thresh; /**< free threshold */\n+\n+\tvoid *vq_ring_virt_mem;  /**< linear address of vring*/\n \tunsigned int vq_ring_size;\n-\tphys_addr_t vq_ring_mem;          /**< physical address of vring */\n \n-\tstruct vring vq_ring;    /**< vring keeping desc, used and avail */\n-\tuint16_t    vq_free_cnt; /**< num of desc available */\n-\tuint16_t    vq_nentries; /**< vring desc numbers */\n-\tuint16_t    vq_free_thresh; /**< free threshold */\n+\tphys_addr_t vq_ring_mem; /**< physical address of vring */\n+\n \t/**\n \t * Head of the free chain in the descriptor table. If\n \t * there are no free descriptors, this will be set to\n@@ -177,38 +183,14 @@ struct virtqueue {\n \t */\n \tuint16_t  vq_desc_head_idx;\n \tuint16_t  vq_desc_tail_idx;\n-\t/**\n-\t * Last consumed descriptor in the used table,\n-\t * trails vq_ring.used->idx.\n-\t */\n-\tuint16_t vq_used_cons_idx;\n-\tuint16_t vq_avail_idx;\n-\tuint64_t mbuf_initializer; /**< value to init mbufs. */\n-\tphys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */\n-\n-\tstruct rte_mbuf **sw_ring; /**< RX software ring. */\n-\t/* dummy mbuf, for wraparound when processing RX ring. */\n-\tstruct rte_mbuf fake_mbuf;\n-\n-\t/* Statistics */\n-\tuint64_t\tpackets;\n-\tuint64_t\tbytes;\n-\tuint64_t\terrors;\n-\tuint64_t\tmulticast;\n-\tuint64_t\tbroadcast;\n-\t/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */\n-\tuint64_t\tsize_bins[8];\n-\n-\tuint16_t\t*notify_addr;\n-\n-\tint\t\tconfigured;\n-\n-\tstruct vq_desc_extra {\n-\t\tvoid              *cookie;\n-\t\tuint16_t          ndescs;\n-\t} vq_descx[0];\n+\tuint16_t  vq_queue_index;   /**< PCI queue index */\n+\tuint16_t  *notify_addr;\n+\tint configured;\n+\tstruct rte_mbuf **sw_ring;  /**< RX software ring. */\n+\tstruct vq_desc_extra vq_descx[0];\n };\n \n+\n /* If multiqueue is provided by host, then we suppport it. */\n #define VIRTIO_NET_CTRL_MQ   4\n #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET        0\n",
    "prefixes": [
        "dpdk-dev",
        "v3"
    ]
}