get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/114116/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 114116,
    "url": "https://patches.dpdk.org/api/patches/114116/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220722135320.109269-1-david.marchand@redhat.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220722135320.109269-1-david.marchand@redhat.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220722135320.109269-1-david.marchand@redhat.com",
    "date": "2022-07-22T13:53:19",
    "name": "[1/2] vhost: keep a reference to virtqueue index",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "75754afdadae4d8aa94504ebd539669243eb217a",
    "submitter": {
        "id": 1173,
        "url": "https://patches.dpdk.org/api/people/1173/?format=api",
        "name": "David Marchand",
        "email": "david.marchand@redhat.com"
    },
    "delegate": {
        "id": 2642,
        "url": "https://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220722135320.109269-1-david.marchand@redhat.com/mbox/",
    "series": [
        {
            "id": 24052,
            "url": "https://patches.dpdk.org/api/series/24052/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=24052",
            "date": "2022-07-22T13:53:20",
            "name": "[1/2] vhost: keep a reference to virtqueue index",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/24052/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/114116/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/114116/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AAEEBA0032;\n\tFri, 22 Jul 2022 15:53:51 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 00775427F9;\n\tFri, 22 Jul 2022 15:53:50 +0200 (CEST)",
            "from us-smtp-delivery-124.mimecast.com\n (us-smtp-delivery-124.mimecast.com [170.10.133.124])\n by mails.dpdk.org (Postfix) with ESMTP id 3168C427F9\n for <dev@dpdk.org>; Fri, 22 Jul 2022 15:53:48 +0200 (CEST)",
            "from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com\n [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS\n (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n us-mta-470--FUgTvuSPwyyD_o4ElLydA-1; Fri, 22 Jul 2022 09:53:38 -0400",
            "from smtp.corp.redhat.com (int-mx06.intmail.prod.int.rdu2.redhat.com\n [10.11.54.6])\n (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n (No client certificate requested)\n by mimecast-mx02.redhat.com (Postfix) with ESMTPS id C49EF811E81;\n Fri, 22 Jul 2022 13:53:37 +0000 (UTC)",
            "from localhost.localdomain (unknown [10.40.192.6])\n by smtp.corp.redhat.com (Postfix) with ESMTP id CBE112166B26;\n Fri, 22 Jul 2022 13:53:36 +0000 (UTC)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;\n s=mimecast20190719; t=1658498027;\n h=from:from:reply-to:subject:subject:date:date:message-id:message-id:\n to:to:cc:cc:mime-version:mime-version:content-type:content-type:\n content-transfer-encoding:content-transfer-encoding;\n bh=LM77dc1Ae4RaKHCXX5LvcPKOMudjTOcncQQa7n+W5Y8=;\n b=AoiexMbE0IKsYVmyVUzqZBrTJx4Ik7uaq90zREujk0IPtOfBdiQUj80NcSH9YSQP+mG9HC\n vTDOWg0VEwq9Qyu4pyFAfsA7LvSh+L9hezLuRBIOF4rx+T28Cf2OHz/AKh61JkITu0/2jp\n D9Nr/nlBCZz7XxJvJ3vAhj26m0cAN6U=",
        "X-MC-Unique": "-FUgTvuSPwyyD_o4ElLydA-1",
        "From": "David Marchand <david.marchand@redhat.com>",
        "To": "dev@dpdk.org",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>,\n Chenbo Xia <chenbo.xia@intel.com>",
        "Subject": "[PATCH 1/2] vhost: keep a reference to virtqueue index",
        "Date": "Fri, 22 Jul 2022 15:53:19 +0200",
        "Message-Id": "<20220722135320.109269-1-david.marchand@redhat.com>",
        "MIME-Version": "1.0",
        "X-Scanned-By": "MIMEDefang 2.78 on 10.11.54.6",
        "Authentication-Results": "relay.mimecast.com;\n auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=david.marchand@redhat.com",
        "X-Mimecast-Spam-Score": "0",
        "X-Mimecast-Originator": "redhat.com",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain; charset=\"US-ASCII\"; x-default=true",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Having a back reference to the index of the vq in the dev->virtqueue[]\narray makes it possible to unify the internal API with only passing dev\nand vq.\nIt also allows displaying the vq index in log messages.\n\nRemove virtqueue index checks were unneeded (like in static helpers\ncalled from a loop on all available virtqueue).\nMove virtqueue index validity checks the sooner possible.\n\nSigned-off-by: David Marchand <david.marchand@redhat.com>\n---\n lib/vhost/iotlb.c      |  5 +--\n lib/vhost/iotlb.h      |  2 +-\n lib/vhost/vhost.c      | 74 ++++++++++++++----------------------\n lib/vhost/vhost.h      |  3 ++\n lib/vhost/vhost_user.c | 58 +++++++++++++---------------\n lib/vhost/virtio_net.c | 86 +++++++++++++++++++-----------------------\n 6 files changed, 98 insertions(+), 130 deletions(-)",
    "diff": "diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c\nindex 35b4193606..dd35338ec0 100644\n--- a/lib/vhost/iotlb.c\n+++ b/lib/vhost/iotlb.c\n@@ -293,10 +293,9 @@ vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)\n }\n \n int\n-vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)\n+vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n \tchar pool_name[RTE_MEMPOOL_NAMESIZE];\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[vq_index];\n \tint socket = 0;\n \n \tif (vq->iotlb_pool) {\n@@ -319,7 +318,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)\n \tTAILQ_INIT(&vq->iotlb_pending_list);\n \n \tsnprintf(pool_name, sizeof(pool_name), \"iotlb_%u_%d_%d\",\n-\t\t\tgetpid(), dev->vid, vq_index);\n+\t\t\tgetpid(), dev->vid, vq->index);\n \tVHOST_LOG_CONFIG(dev->ifname, DEBUG, \"IOTLB cache name: %s\\n\", pool_name);\n \n \t/* If already created, free it and recreate */\ndiff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h\nindex 8d0ff7473b..738e31e7b9 100644\n--- a/lib/vhost/iotlb.h\n+++ b/lib/vhost/iotlb.h\n@@ -47,6 +47,6 @@ void vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqu\n void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,\n \t\t\t\t\t\tuint64_t size, uint8_t perm);\n void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);\n-int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);\n+int vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq);\n \n #endif /* _VHOST_IOTLB_H_ */\ndiff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c\nindex 60cb05a0ff..97bae0de91 100644\n--- a/lib/vhost/vhost.c\n+++ b/lib/vhost/vhost.c\n@@ -575,23 +575,10 @@ vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)\n }\n \n static void\n-init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n+init_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n-\tstruct vhost_virtqueue *vq;\n \tint numa_node = SOCKET_ID_ANY;\n \n-\tif (vring_idx >= VHOST_MAX_VRING) {\n-\t\tVHOST_LOG_CONFIG(dev->ifname, ERR, \"failed to init vring, out of bound (%d)\\n\",\n-\t\t\tvring_idx);\n-\t\treturn;\n-\t}\n-\n-\tvq = dev->virtqueue[vring_idx];\n-\tif (!vq) {\n-\t\tVHOST_LOG_CONFIG(dev->ifname, ERR, \"virtqueue not allocated (%d)\\n\", vring_idx);\n-\t\treturn;\n-\t}\n-\n \tmemset(vq, 0, sizeof(struct vhost_virtqueue));\n \n \tvq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;\n@@ -607,32 +594,20 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n #endif\n \tvq->numa_node = numa_node;\n \n-\tvhost_user_iotlb_init(dev, vring_idx);\n+\tvhost_user_iotlb_init(dev, vq);\n }\n \n static void\n-reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n+reset_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n-\tstruct vhost_virtqueue *vq;\n+\tuint32_t vring_idx;\n \tint callfd;\n \n-\tif (vring_idx >= VHOST_MAX_VRING) {\n-\t\tVHOST_LOG_CONFIG(dev->ifname, ERR, \"failed to reset vring, out of bound (%d)\\n\",\n-\t\t\tvring_idx);\n-\t\treturn;\n-\t}\n-\n-\tvq = dev->virtqueue[vring_idx];\n-\tif (!vq) {\n-\t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n-\t\t\t\"failed to reset vring, virtqueue not allocated (%d)\\n\",\n-\t\t\tvring_idx);\n-\t\treturn;\n-\t}\n-\n \tcallfd = vq->callfd;\n-\tinit_vring_queue(dev, vring_idx);\n+\tvring_idx = vq->index;\n+\tinit_vring_queue(dev, vq);\n \tvq->callfd = callfd;\n+\tvq->index = vring_idx;\n }\n \n int\n@@ -655,8 +630,9 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \t\t}\n \n \t\tdev->virtqueue[i] = vq;\n-\t\tinit_vring_queue(dev, i);\n+\t\tinit_vring_queue(dev, vq);\n \t\trte_spinlock_init(&vq->access_lock);\n+\t\tvq->index = vring_idx;\n \t\tvq->avail_wrap_counter = 1;\n \t\tvq->used_wrap_counter = 1;\n \t\tvq->signalled_used_valid = false;\n@@ -681,8 +657,16 @@ reset_device(struct virtio_net *dev)\n \tdev->protocol_features = 0;\n \tdev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;\n \n-\tfor (i = 0; i < dev->nr_vring; i++)\n-\t\treset_vring_queue(dev, i);\n+\tfor (i = 0; i < dev->nr_vring; i++) {\n+\t\tstruct vhost_virtqueue *vq = dev->virtqueue[i];\n+\n+\t\tif (!vq) {\n+\t\t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n+\t\t\t\t\"failed to reset vring, virtqueue not allocated (%d)\\n\", i);\n+\t\t\tcontinue;\n+\t\t}\n+\t\treset_vring_queue(dev, vq);\n+\t}\n }\n \n /*\n@@ -1661,17 +1645,15 @@ rte_vhost_extern_callback_register(int vid,\n }\n \n static __rte_always_inline int\n-async_channel_register(int vid, uint16_t queue_id)\n+async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n-\tstruct virtio_net *dev = get_device(vid);\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[queue_id];\n \tstruct vhost_async *async;\n \tint node = vq->numa_node;\n \n \tif (unlikely(vq->async)) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"async register failed: already registered (qid: %d)\\n\",\n-\t\t\tqueue_id);\n+\t\t\tvq->index);\n \t\treturn -1;\n \t}\n \n@@ -1679,7 +1661,7 @@ async_channel_register(int vid, uint16_t queue_id)\n \tif (!async) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"failed to allocate async metadata (qid: %d)\\n\",\n-\t\t\tqueue_id);\n+\t\t\tvq->index);\n \t\treturn -1;\n \t}\n \n@@ -1688,7 +1670,7 @@ async_channel_register(int vid, uint16_t queue_id)\n \tif (!async->pkts_info) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"failed to allocate async_pkts_info (qid: %d)\\n\",\n-\t\t\tqueue_id);\n+\t\t\tvq->index);\n \t\tgoto out_free_async;\n \t}\n \n@@ -1697,7 +1679,7 @@ async_channel_register(int vid, uint16_t queue_id)\n \tif (!async->pkts_cmpl_flag) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"failed to allocate async pkts_cmpl_flag (qid: %d)\\n\",\n-\t\t\tqueue_id);\n+\t\t\tvq->index);\n \t\tgoto out_free_async;\n \t}\n \n@@ -1708,7 +1690,7 @@ async_channel_register(int vid, uint16_t queue_id)\n \t\tif (!async->buffers_packed) {\n \t\t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\t\"failed to allocate async buffers (qid: %d)\\n\",\n-\t\t\t\tqueue_id);\n+\t\t\t\tvq->index);\n \t\t\tgoto out_free_inflight;\n \t\t}\n \t} else {\n@@ -1718,7 +1700,7 @@ async_channel_register(int vid, uint16_t queue_id)\n \t\tif (!async->descs_split) {\n \t\t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\t\"failed to allocate async descs (qid: %d)\\n\",\n-\t\t\t\tqueue_id);\n+\t\t\t\tvq->index);\n \t\t\tgoto out_free_inflight;\n \t\t}\n \t}\n@@ -1753,7 +1735,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)\n \t\treturn -1;\n \n \trte_spinlock_lock(&vq->access_lock);\n-\tret = async_channel_register(vid, queue_id);\n+\tret = async_channel_register(dev, vq);\n \trte_spinlock_unlock(&vq->access_lock);\n \n \treturn ret;\n@@ -1782,7 +1764,7 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)\n \t\treturn -1;\n \t}\n \n-\treturn async_channel_register(vid, queue_id);\n+\treturn async_channel_register(dev, vq);\n }\n \n int\ndiff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex 40fac3b7c6..c6260b54cc 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -309,6 +309,9 @@ struct vhost_virtqueue {\n \t/* Currently unused as polling mode is enabled */\n \tint\t\t\tkickfd;\n \n+\t/* Index of this vq in dev->virtqueue[] */\n+\tuint32_t\t\tindex;\n+\n \t/* inflight share memory info */\n \tunion {\n \t\tstruct rte_vhost_inflight_info_split *inflight_split;\ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex 4ad28bac45..73e69450fd 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -240,22 +240,20 @@ vhost_backend_cleanup(struct virtio_net *dev)\n }\n \n static void\n-vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,\n-\t\t\t      int enable)\n+vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq,\n+\tint enable)\n {\n \tstruct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[index];\n \n \t/* Configure guest notifications on enable */\n \tif (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)\n \t\tvhost_enable_guest_notification(dev, vq, vq->notif_enable);\n \n \tif (vdpa_dev && vdpa_dev->ops->set_vring_state)\n-\t\tvdpa_dev->ops->set_vring_state(dev->vid, index, enable);\n+\t\tvdpa_dev->ops->set_vring_state(dev->vid, vq->index, enable);\n \n \tif (dev->notify_ops->vring_state_changed)\n-\t\tdev->notify_ops->vring_state_changed(dev->vid,\n-\t\t\t\tindex, enable);\n+\t\tdev->notify_ops->vring_state_changed(dev->vid, vq->index, enable);\n }\n \n /*\n@@ -493,12 +491,11 @@ vhost_user_set_vring_num(struct virtio_net **pdev,\n  * make them on the same numa node as the memory of vring descriptor.\n  */\n #ifdef RTE_LIBRTE_VHOST_NUMA\n-static struct virtio_net*\n-numa_realloc(struct virtio_net *dev, int index)\n+static struct virtio_net *\n+numa_realloc(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n \tint node, dev_node;\n \tstruct virtio_net *old_dev;\n-\tstruct vhost_virtqueue *vq;\n \tstruct batch_copy_elem *bce;\n \tstruct guest_page *gp;\n \tstruct rte_vhost_memory *mem;\n@@ -506,7 +503,6 @@ numa_realloc(struct virtio_net *dev, int index)\n \tint ret;\n \n \told_dev = dev;\n-\tvq = dev->virtqueue[index];\n \n \t/*\n \t * If VQ is ready, it is too late to reallocate, it certainly already\n@@ -519,7 +515,7 @@ numa_realloc(struct virtio_net *dev, int index)\n \tif (ret) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"unable to get virtqueue %d numa information.\\n\",\n-\t\t\tindex);\n+\t\t\tvq->index);\n \t\treturn dev;\n \t}\n \n@@ -530,14 +526,14 @@ numa_realloc(struct virtio_net *dev, int index)\n \tif (!vq) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, ERR,\n \t\t\t\"failed to realloc virtqueue %d on node %d\\n\",\n-\t\t\tindex, node);\n+\t\t\tvq->index, node);\n \t\treturn dev;\n \t}\n \n-\tif (vq != dev->virtqueue[index]) {\n+\tif (vq != dev->virtqueue[vq->index]) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, INFO, \"reallocated virtqueue on node %d\\n\", node);\n-\t\tdev->virtqueue[index] = vq;\n-\t\tvhost_user_iotlb_init(dev, index);\n+\t\tdev->virtqueue[vq->index] = vq;\n+\t\tvhost_user_iotlb_init(dev, vq);\n \t}\n \n \tif (vq_is_packed(dev)) {\n@@ -665,8 +661,8 @@ numa_realloc(struct virtio_net *dev, int index)\n \treturn dev;\n }\n #else\n-static struct virtio_net*\n-numa_realloc(struct virtio_net *dev, int index __rte_unused)\n+static struct virtio_net *\n+numa_realloc(struct virtio_net *dev, struct vhost_virtqueue *vq __rte_unused)\n {\n \treturn dev;\n }\n@@ -739,9 +735,8 @@ log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)\n }\n \n static struct virtio_net *\n-translate_ring_addresses(struct virtio_net *dev, int vq_index)\n+translate_ring_addresses(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[vq_index];\n \tstruct vhost_vring_addr *addr = &vq->ring_addrs;\n \tuint64_t len, expected_len;\n \n@@ -765,8 +760,8 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)\n \t\t\treturn dev;\n \t\t}\n \n-\t\tdev = numa_realloc(dev, vq_index);\n-\t\tvq = dev->virtqueue[vq_index];\n+\t\tdev = numa_realloc(dev, vq);\n+\t\tvq = dev->virtqueue[vq->index];\n \t\taddr = &vq->ring_addrs;\n \n \t\tlen = sizeof(struct vring_packed_desc_event);\n@@ -807,8 +802,8 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)\n \t\treturn dev;\n \t}\n \n-\tdev = numa_realloc(dev, vq_index);\n-\tvq = dev->virtqueue[vq_index];\n+\tdev = numa_realloc(dev, vq);\n+\tvq = dev->virtqueue[vq->index];\n \taddr = &vq->ring_addrs;\n \n \tlen = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;\n@@ -887,7 +882,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,\n \tif ((vq->enabled && (dev->features &\n \t\t\t\t(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||\n \t\t\taccess_ok) {\n-\t\tdev = translate_ring_addresses(dev, ctx->msg.payload.addr.index);\n+\t\tdev = translate_ring_addresses(dev, vq);\n \t\tif (!dev)\n \t\t\treturn RTE_VHOST_MSG_RESULT_ERR;\n \n@@ -1396,7 +1391,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,\n \t\t\t */\n \t\t\tvring_invalidate(dev, vq);\n \n-\t\t\tdev = translate_ring_addresses(dev, i);\n+\t\t\tdev = translate_ring_addresses(dev, vq);\n \t\t\tif (!dev) {\n \t\t\t\tdev = *pdev;\n \t\t\t\tgoto free_mem_table;\n@@ -1781,7 +1776,7 @@ vhost_user_set_vring_call(struct virtio_net **pdev,\n \n \tif (vq->ready) {\n \t\tvq->ready = false;\n-\t\tvhost_user_notify_queue_state(dev, file.index, 0);\n+\t\tvhost_user_notify_queue_state(dev, vq, 0);\n \t}\n \n \tif (vq->callfd >= 0)\n@@ -2029,7 +2024,8 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,\n \t\tfile.index, file.fd);\n \n \t/* Interpret ring addresses only when ring is started. */\n-\tdev = translate_ring_addresses(dev, file.index);\n+\tvq = dev->virtqueue[file.index];\n+\tdev = translate_ring_addresses(dev, vq);\n \tif (!dev) {\n \t\tif (file.fd != VIRTIO_INVALID_EVENTFD)\n \t\t\tclose(file.fd);\n@@ -2039,8 +2035,6 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,\n \n \t*pdev = dev;\n \n-\tvq = dev->virtqueue[file.index];\n-\n \t/*\n \t * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,\n \t * the ring starts already enabled. Otherwise, it is enabled via\n@@ -2052,7 +2046,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,\n \n \tif (vq->ready) {\n \t\tvq->ready = false;\n-\t\tvhost_user_notify_queue_state(dev, file.index, 0);\n+\t\tvhost_user_notify_queue_state(dev, vq, 0);\n \t}\n \n \tif (vq->kickfd >= 0)\n@@ -2595,7 +2589,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,\n \n \t\t\tif (is_vring_iotlb(dev, vq, imsg)) {\n \t\t\t\trte_spinlock_lock(&vq->access_lock);\n-\t\t\t\t*pdev = dev = translate_ring_addresses(dev, i);\n+\t\t\t\t*pdev = dev = translate_ring_addresses(dev, vq);\n \t\t\t\trte_spinlock_unlock(&vq->access_lock);\n \t\t\t}\n \t\t}\n@@ -3159,7 +3153,7 @@ vhost_user_msg_handler(int vid, int fd)\n \n \t\tif (cur_ready != (vq && vq->ready)) {\n \t\t\tvq->ready = cur_ready;\n-\t\t\tvhost_user_notify_queue_state(dev, i, cur_ready);\n+\t\t\tvhost_user_notify_queue_state(dev, vq, cur_ready);\n \t\t}\n \t}\n \ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex 35fa4670fd..467dfb203f 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -1555,22 +1555,12 @@ virtio_dev_rx_packed(struct virtio_net *dev,\n }\n \n static __rte_always_inline uint32_t\n-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,\n+virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tstruct rte_mbuf **pkts, uint32_t count)\n {\n-\tstruct vhost_virtqueue *vq;\n \tuint32_t nb_tx = 0;\n \n \tVHOST_LOG_DATA(dev->ifname, DEBUG, \"%s\\n\", __func__);\n-\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(dev->ifname, ERR,\n-\t\t\t\"%s: invalid virtqueue idx %d.\\n\",\n-\t\t\t__func__, queue_id);\n-\t\treturn 0;\n-\t}\n-\n-\tvq = dev->virtqueue[queue_id];\n-\n \trte_spinlock_lock(&vq->access_lock);\n \n \tif (unlikely(!vq->enabled))\n@@ -1620,7 +1610,14 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,\n \t\treturn 0;\n \t}\n \n-\treturn virtio_dev_rx(dev, queue_id, pkts, count);\n+\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n+\t\tVHOST_LOG_DATA(dev->ifname, ERR,\n+\t\t\t\"%s: invalid virtqueue idx %d.\\n\",\n+\t\t\t__func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\treturn virtio_dev_rx(dev, dev->virtqueue[queue_id], pkts, count);\n }\n \n static __rte_always_inline uint16_t\n@@ -1669,8 +1666,7 @@ store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,\n \n static __rte_noinline uint32_t\n virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,\n-\t\tuint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,\n-\t\tint16_t dma_id, uint16_t vchan_id)\n+\tstruct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)\n {\n \tstruct buf_vector buf_vec[BUF_VECTOR_MAX];\n \tuint32_t pkt_idx = 0;\n@@ -1732,7 +1728,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue\n \n \t\tVHOST_LOG_DATA(dev->ifname, DEBUG,\n \t\t\t\"%s: failed to transfer %u packets for queue %u.\\n\",\n-\t\t\t__func__, pkt_err, queue_id);\n+\t\t\t__func__, pkt_err, vq->index);\n \n \t\t/* update number of completed packets */\n \t\tpkt_idx = n_xfer;\n@@ -1878,8 +1874,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,\n \n static __rte_noinline uint32_t\n virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n-\t\tuint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,\n-\t\tint16_t dma_id, uint16_t vchan_id)\n+\tstruct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)\n {\n \tuint32_t pkt_idx = 0;\n \tuint32_t remained = count;\n@@ -1924,7 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue\n \tif (unlikely(pkt_err)) {\n \t\tVHOST_LOG_DATA(dev->ifname, DEBUG,\n \t\t\t\"%s: failed to transfer %u packets for queue %u.\\n\",\n-\t\t\t__func__, pkt_err, queue_id);\n+\t\t\t__func__, pkt_err, vq->index);\n \t\tdma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);\n \t}\n \n@@ -2045,11 +2040,9 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,\n }\n \n static __rte_always_inline uint16_t\n-vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,\n-\t\tstruct rte_mbuf **pkts, uint16_t count, int16_t dma_id,\n-\t\tuint16_t vchan_id)\n+vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,\n+\tstruct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t vchan_id)\n {\n-\tstruct vhost_virtqueue *vq = dev->virtqueue[queue_id];\n \tstruct vhost_async *async = vq->async;\n \tstruct async_inflight_info *pkts_info = async->pkts_info;\n \tuint16_t nr_cpl_pkts = 0;\n@@ -2156,7 +2149,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \t\tgoto out;\n \t}\n \n-\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);\n+\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);\n \n \tvhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);\n \tvq->stats.inflight_completed += n_pkts_cpl;\n@@ -2216,12 +2209,11 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,\n \t}\n \n \tif ((queue_id & 1) == 0)\n-\t\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,\n-\t\t\t\t\tpkts, count, dma_id, vchan_id);\n-\telse {\n+\t\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,\n+\t\t\tdma_id, vchan_id);\n+\telse\n \t\tn_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,\n-\t\t\t\t\tdma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);\n-\t}\n+\t\t\tdma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);\n \n \tvhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);\n \tvq->stats.inflight_completed += n_pkts_cpl;\n@@ -2275,12 +2267,11 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,\n \t}\n \n \tif ((queue_id & 1) == 0)\n-\t\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,\n-\t\t\t\tpkts, count, dma_id, vchan_id);\n-\telse {\n+\t\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,\n+\t\t\tdma_id, vchan_id);\n+\telse\n \t\tn_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,\n-\t\t\t\t\tdma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);\n-\t}\n+\t\t\tdma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);\n \n \tvhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);\n \tvq->stats.inflight_completed += n_pkts_cpl;\n@@ -2292,19 +2283,12 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,\n }\n \n static __rte_always_inline uint32_t\n-virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n+virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tstruct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)\n {\n-\tstruct vhost_virtqueue *vq;\n \tuint32_t nb_tx = 0;\n \n \tVHOST_LOG_DATA(dev->ifname, DEBUG, \"%s\\n\", __func__);\n-\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(dev->ifname, ERR,\n-\t\t\t\"%s: invalid virtqueue idx %d.\\n\",\n-\t\t\t__func__, queue_id);\n-\t\treturn 0;\n-\t}\n \n \tif (unlikely(!dma_copy_track[dma_id].vchans ||\n \t\t\t\t!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {\n@@ -2314,8 +2298,6 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n \t\treturn 0;\n \t}\n \n-\tvq = dev->virtqueue[queue_id];\n-\n \trte_spinlock_lock(&vq->access_lock);\n \n \tif (unlikely(!vq->enabled || !vq->async))\n@@ -2333,11 +2315,11 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n \t\tgoto out;\n \n \tif (vq_is_packed(dev))\n-\t\tnb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,\n-\t\t\t\tpkts, count, dma_id, vchan_id);\n+\t\tnb_tx = virtio_dev_rx_async_submit_packed(dev, vq, pkts, count,\n+\t\t\tdma_id, vchan_id);\n \telse\n-\t\tnb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,\n-\t\t\t\tpkts, count, dma_id, vchan_id);\n+\t\tnb_tx = virtio_dev_rx_async_submit_split(dev, vq, pkts, count,\n+\t\t\tdma_id, vchan_id);\n \n \tvq->stats.inflight_submitted += nb_tx;\n \n@@ -2368,7 +2350,15 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,\n \t\treturn 0;\n \t}\n \n-\treturn virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);\n+\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n+\t\tVHOST_LOG_DATA(dev->ifname, ERR,\n+\t\t\t\"%s: invalid virtqueue idx %d.\\n\",\n+\t\t\t__func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\treturn virtio_dev_rx_async_submit(dev, dev->virtqueue[queue_id], pkts, count,\n+\t\tdma_id, vchan_id);\n }\n \n static inline bool\n",
    "prefixes": [
        "1/2"
    ]
}