get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/73360/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 73360,
    "url": "https://patches.dpdk.org/api/patches/73360/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200707050709.205480-2-patrick.fu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200707050709.205480-2-patrick.fu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200707050709.205480-2-patrick.fu@intel.com",
    "date": "2020-07-07T05:07:08",
    "name": "[v6,1/2] vhost: introduce async enqueue registration API",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "18c3fd2b2c846158d6164ff8bb622310fb2a4776",
    "submitter": {
        "id": 1781,
        "url": "https://patches.dpdk.org/api/people/1781/?format=api",
        "name": "Patrick Fu",
        "email": "patrick.fu@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200707050709.205480-2-patrick.fu@intel.com/mbox/",
    "series": [
        {
            "id": 10832,
            "url": "https://patches.dpdk.org/api/series/10832/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10832",
            "date": "2020-07-07T05:07:07",
            "name": "introduce asynchronous data path for vhost",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/10832/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/73360/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/73360/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4609AA00BE;\n\tTue,  7 Jul 2020 07:07:16 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6190D1DA3D;\n\tTue,  7 Jul 2020 07:07:06 +0200 (CEST)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by dpdk.org (Postfix) with ESMTP id B30F51D9B9\n for <dev@dpdk.org>; Tue,  7 Jul 2020 07:07:04 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 06 Jul 2020 22:07:04 -0700",
            "from npg-dpdk-patrickfu-casc2.sh.intel.com ([10.67.119.92])\n by orsmga004.jf.intel.com with ESMTP; 06 Jul 2020 22:07:01 -0700"
        ],
        "IronPort-SDR": [
            "\n 05YrGrvlQ7kjhXcLK3tmE1OOu9QECi4LbkgSdrYVePaE/JZn6JkAQDGVvCV0Eud+UOaxGBNLkX\n v4YP9GYh4h2A==",
            "\n a9pntyeCJWStM606ClgSBF3dHGGjdioIS5KPCUxNT1FHtUiyYjyow9LJiScrMGKsvmlZvSFM4b\n Zzow3nfzUrcg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9674\"; a=\"135005151\"",
            "E=Sophos;i=\"5.75,321,1589266800\"; d=\"scan'208\";a=\"135005151\"",
            "E=Sophos;i=\"5.75,321,1589266800\"; d=\"scan'208\";a=\"427346657\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "patrick.fu@intel.com",
        "To": "dev@dpdk.org, maxime.coquelin@redhat.com, chenbo.xia@intel.com,\n zhihong.wang@intel.com",
        "Cc": "patrick.fu@intel.com, yinan.wang@intel.com, cheng1.jiang@intel.com,\n cunming.liang@intel.com",
        "Date": "Tue,  7 Jul 2020 13:07:08 +0800",
        "Message-Id": "<20200707050709.205480-2-patrick.fu@intel.com>",
        "X-Mailer": "git-send-email 2.18.4",
        "In-Reply-To": "<20200707050709.205480-1-patrick.fu@intel.com>",
        "References": "<1591869725-13331-1-git-send-email-patrick.fu@intel.com>\n <20200707050709.205480-1-patrick.fu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v6 1/2] vhost: introduce async enqueue\n\tregistration API",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Patrick Fu <patrick.fu@intel.com>\n\nPerforming large memory copies usually takes up a major part of CPU\ncycles and becomes the hot spot in vhost-user enqueue operation. To\noffload the large copies from CPU to the DMA devices, asynchronous\nAPIs are introduced, with which the CPU just submits copy jobs to\nthe DMA but without waiting for its copy completion. Thus, there is\nno CPU intervention during data transfer. We can save precious CPU\ncycles and improve the overall throughput for vhost-user based\napplications. This patch introduces registration/un-registration\nAPIs for vhost async data enqueue operation. Together with the\nregistration APIs implementations, data structures and the prototype\nof the async callback functions required for async enqueue data path\nare also defined.\n\nSigned-off-by: Patrick Fu <patrick.fu@intel.com>\n---\n lib/librte_vhost/Makefile              |   2 +-\n lib/librte_vhost/meson.build           |   2 +-\n lib/librte_vhost/rte_vhost.h           |   1 +\n lib/librte_vhost/rte_vhost_async.h     | 136 +++++++++++++++++++++++++\n lib/librte_vhost/rte_vhost_version.map |   4 +\n lib/librte_vhost/socket.c              |  27 +++++\n lib/librte_vhost/vhost.c               | 127 ++++++++++++++++++++++-\n lib/librte_vhost/vhost.h               |  30 +++++-\n lib/librte_vhost/vhost_user.c          |  23 ++++-\n 9 files changed, 345 insertions(+), 7 deletions(-)\n create mode 100644 lib/librte_vhost/rte_vhost_async.h",
    "diff": "diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile\nindex b7ff7dc4b..4f2f3e47d 100644\n--- a/lib/librte_vhost/Makefile\n+++ b/lib/librte_vhost/Makefile\n@@ -42,7 +42,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := fd_man.c iotlb.c socket.c vhost.c \\\n \n # install includes\n SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost.h rte_vdpa.h \\\n-\t\t\t\t\t\trte_vdpa_dev.h\n+\t\t\t\t\t\trte_vdpa_dev.h rte_vhost_async.h\n \n # only compile vhost crypto when cryptodev is enabled\n ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)\ndiff --git a/lib/librte_vhost/meson.build b/lib/librte_vhost/meson.build\nindex 882a0eaf4..cc9aa65c6 100644\n--- a/lib/librte_vhost/meson.build\n+++ b/lib/librte_vhost/meson.build\n@@ -22,5 +22,5 @@ sources = files('fd_man.c', 'iotlb.c', 'socket.c', 'vdpa.c',\n \t\t'vhost.c', 'vhost_user.c',\n \t\t'virtio_net.c', 'vhost_crypto.c')\n headers = files('rte_vhost.h', 'rte_vdpa.h', 'rte_vdpa_dev.h',\n-\t\t'rte_vhost_crypto.h')\n+\t\t'rte_vhost_crypto.h', 'rte_vhost_async.h')\n deps += ['ethdev', 'cryptodev', 'hash', 'pci']\ndiff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h\nindex 8a5c332c8..f93f9595a 100644\n--- a/lib/librte_vhost/rte_vhost.h\n+++ b/lib/librte_vhost/rte_vhost.h\n@@ -35,6 +35,7 @@ extern \"C\" {\n #define RTE_VHOST_USER_EXTBUF_SUPPORT\t(1ULL << 5)\n /* support only linear buffers (no chained mbufs) */\n #define RTE_VHOST_USER_LINEARBUF_SUPPORT\t(1ULL << 6)\n+#define RTE_VHOST_USER_ASYNC_COPY\t(1ULL << 7)\n \n /* Features. */\n #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE\ndiff --git a/lib/librte_vhost/rte_vhost_async.h b/lib/librte_vhost/rte_vhost_async.h\nnew file mode 100644\nindex 000000000..d5a59279a\n--- /dev/null\n+++ b/lib/librte_vhost/rte_vhost_async.h\n@@ -0,0 +1,136 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _RTE_VHOST_ASYNC_H_\n+#define _RTE_VHOST_ASYNC_H_\n+\n+#include \"rte_vhost.h\"\n+\n+/**\n+ * iovec iterator\n+ */\n+struct rte_vhost_iov_iter {\n+\t/** offset to the first byte of interesting data */\n+\tsize_t offset;\n+\t/** total bytes of data in this iterator */\n+\tsize_t count;\n+\t/** pointer to the iovec array */\n+\tstruct iovec *iov;\n+\t/** number of iovec in this iterator */\n+\tunsigned long nr_segs;\n+};\n+\n+/**\n+ * dma transfer descriptor pair\n+ */\n+struct rte_vhost_async_desc {\n+\t/** source memory iov_iter */\n+\tstruct rte_vhost_iov_iter *src;\n+\t/** destination memory iov_iter */\n+\tstruct rte_vhost_iov_iter *dst;\n+};\n+\n+/**\n+ * dma transfer status\n+ */\n+struct rte_vhost_async_status {\n+\t/** An array of application specific data for source memory */\n+\tuintptr_t *src_opaque_data;\n+\t/** An array of application specific data for destination memory */\n+\tuintptr_t *dst_opaque_data;\n+};\n+\n+/**\n+ * dma operation callbacks to be implemented by applications\n+ */\n+struct rte_vhost_async_channel_ops {\n+\t/**\n+\t * instruct async engines to perform copies for a batch of packets\n+\t *\n+\t * @param vid\n+\t *  id of vhost device to perform data copies\n+\t * @param queue_id\n+\t *  queue id to perform data copies\n+\t * @param descs\n+\t *  an array of DMA transfer memory descriptors\n+\t * @param opaque_data\n+\t *  opaque data pair sending to DMA engine\n+\t * @param count\n+\t *  number of elements in the \"descs\" array\n+\t * @return\n+\t *  -1 on failure, number of descs processed on success\n+\t */\n+\tint (*transfer_data)(int vid, uint16_t queue_id,\n+\t\tstruct rte_vhost_async_desc *descs,\n+\t\tstruct rte_vhost_async_status *opaque_data,\n+\t\tuint16_t count);\n+\t/**\n+\t * check copy-completed packets from the async engine\n+\t * @param vid\n+\t *  id of vhost device to check copy completion\n+\t * @param queue_id\n+\t *  queue id to check copyp completion\n+\t * @param opaque_data\n+\t *  buffer to receive the opaque data pair from DMA engine\n+\t * @param max_packets\n+\t *  max number of packets could be completed\n+\t * @return\n+\t *  -1 on failure, number of iov segments completed on success\n+\t */\n+\tint (*check_completed_copies)(int vid, uint16_t queue_id,\n+\t\tstruct rte_vhost_async_status *opaque_data,\n+\t\tuint16_t max_packets);\n+};\n+\n+/**\n+ *  dma channel feature bit definition\n+ */\n+struct rte_vhost_async_features {\n+\tunion {\n+\t\tuint32_t intval;\n+\t\tstruct {\n+\t\t\tuint32_t async_inorder:1;\n+\t\t\tuint32_t resvd_0:15;\n+\t\t\tuint32_t async_threshold:12;\n+\t\t\tuint32_t resvd_1:4;\n+\t\t};\n+\t};\n+};\n+\n+/**\n+ * register a async channel for vhost\n+ *\n+ * @param vid\n+ *  vhost device id async channel to be attached to\n+ * @param queue_id\n+ *  vhost queue id async channel to be attached to\n+ * @param features\n+ *  DMA channel feature bit\n+ *    b0       : DMA supports inorder data transfer\n+ *    b1  - b15: reserved\n+ *    b16 - b27: Packet length threshold for DMA transfer\n+ *    b28 - b31: reserved\n+ * @param ops\n+ *  DMA operation callbacks\n+ * @return\n+ *  0 on success, -1 on failures\n+ */\n+__rte_experimental\n+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n+\tuint32_t features, struct rte_vhost_async_channel_ops *ops);\n+\n+/**\n+ * unregister a dma channel for vhost\n+ *\n+ * @param vid\n+ *  vhost device id DMA channel to be detached\n+ * @param queue_id\n+ *  vhost queue id DMA channel to be detached\n+ * @return\n+ *  0 on success, -1 on failures\n+ */\n+__rte_experimental\n+int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);\n+\n+#endif /* _RTE_VHOST_ASYNC_H_ */\ndiff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map\nindex 86784405a..13ec53b63 100644\n--- a/lib/librte_vhost/rte_vhost_version.map\n+++ b/lib/librte_vhost/rte_vhost_version.map\n@@ -71,4 +71,8 @@ EXPERIMENTAL {\n \trte_vdpa_get_queue_num;\n \trte_vdpa_get_features;\n \trte_vdpa_get_protocol_features;\n+\trte_vhost_async_channel_register;\n+\trte_vhost_async_channel_unregister;\n+\trte_vhost_submit_enqueue_burst;\n+\trte_vhost_poll_enqueue_completed;\n };\ndiff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c\nindex 49267cebf..c4626d2c4 100644\n--- a/lib/librte_vhost/socket.c\n+++ b/lib/librte_vhost/socket.c\n@@ -42,6 +42,7 @@ struct vhost_user_socket {\n \tbool use_builtin_virtio_net;\n \tbool extbuf;\n \tbool linearbuf;\n+\tbool async_copy;\n \n \t/*\n \t * The \"supported_features\" indicates the feature bits the\n@@ -205,6 +206,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)\n \tsize_t size;\n \tstruct vhost_user_connection *conn;\n \tint ret;\n+\tstruct virtio_net *dev;\n \n \tif (vsocket == NULL)\n \t\treturn;\n@@ -236,6 +238,13 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)\n \tif (vsocket->linearbuf)\n \t\tvhost_enable_linearbuf(vid);\n \n+\tif (vsocket->async_copy) {\n+\t\tdev = get_device(vid);\n+\n+\t\tif (dev)\n+\t\t\tdev->async_copy = 1;\n+\t}\n+\n \tVHOST_LOG_CONFIG(INFO, \"new device, handle is %d\\n\", vid);\n \n \tif (vsocket->notify_ops->new_connection) {\n@@ -881,6 +890,17 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \t\tgoto out_mutex;\n \t}\n \n+\tvsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;\n+\n+\tif (vsocket->async_copy &&\n+\t\t(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |\n+\t\tRTE_VHOST_USER_POSTCOPY_SUPPORT))) {\n+\t\tVHOST_LOG_CONFIG(ERR, \"error: enabling async copy and IOMMU \"\n+\t\t\t\"or post-copy feature simultaneously is not \"\n+\t\t\t\"supported\\n\");\n+\t\tgoto out_mutex;\n+\t}\n+\n \t/*\n \t * Set the supported features correctly for the builtin vhost-user\n \t * net driver.\n@@ -931,6 +951,13 @@ rte_vhost_driver_register(const char *path, uint64_t flags)\n \t\t\t~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);\n \t}\n \n+\tif (vsocket->async_copy) {\n+\t\tvsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);\n+\t\tvsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);\n+\t\tVHOST_LOG_CONFIG(INFO,\n+\t\t\t\"Logging feature is disabled in async copy mode\\n\");\n+\t}\n+\n \t/*\n \t * We'll not be able to receive a buffer from guest in linear mode\n \t * without external buffer if it will not fit in a single mbuf, which is\ndiff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c\nindex 0d822d6a3..a11385f39 100644\n--- a/lib/librte_vhost/vhost.c\n+++ b/lib/librte_vhost/vhost.c\n@@ -332,8 +332,13 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n \tif (vq_is_packed(dev))\n \t\trte_free(vq->shadow_used_packed);\n-\telse\n+\telse {\n \t\trte_free(vq->shadow_used_split);\n+\t\tif (vq->async_pkts_pending)\n+\t\t\trte_free(vq->async_pkts_pending);\n+\t\tif (vq->async_pending_info)\n+\t\t\trte_free(vq->async_pending_info);\n+\t}\n \trte_free(vq->batch_copy_elems);\n \trte_mempool_free(vq->iotlb_pool);\n \trte_free(vq);\n@@ -1522,3 +1527,123 @@ RTE_INIT(vhost_log_init)\n \tif (vhost_data_log_level >= 0)\n \t\trte_log_set_level(vhost_data_log_level, RTE_LOG_WARNING);\n }\n+\n+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n+\t\t\t\t\tuint32_t features,\n+\t\t\t\t\tstruct rte_vhost_async_channel_ops *ops)\n+{\n+\tstruct vhost_virtqueue *vq;\n+\tstruct virtio_net *dev = get_device(vid);\n+\tstruct rte_vhost_async_features f;\n+\n+\tif (dev == NULL || ops == NULL)\n+\t\treturn -1;\n+\n+\tf.intval = features;\n+\n+\tvq = dev->virtqueue[queue_id];\n+\n+\tif (unlikely(vq == NULL || !dev->async_copy))\n+\t\treturn -1;\n+\n+\t/* packed queue is not supported */\n+\tif (unlikely(vq_is_packed(dev) || !f.async_inorder)) {\n+\t\tVHOST_LOG_CONFIG(ERR,\n+\t\t\t\"async copy is not supported on packed queue or non-inorder mode \"\n+\t\t\t\"(vid %d, qid: %d)\\n\", vid, queue_id);\n+\t\treturn -1;\n+\t}\n+\n+\tif (unlikely(ops->check_completed_copies == NULL ||\n+\t\tops->transfer_data == NULL))\n+\t\treturn -1;\n+\n+\trte_spinlock_lock(&vq->access_lock);\n+\n+\tif (unlikely(vq->async_registered)) {\n+\t\tVHOST_LOG_CONFIG(ERR,\n+\t\t\t\"async register failed: channel already registered \"\n+\t\t\t\"(vid %d, qid: %d)\\n\", vid, queue_id);\n+\t\tgoto reg_out;\n+\t}\n+\n+\tvq->async_pkts_pending = rte_malloc(NULL,\n+\t\t\tvq->size * sizeof(uintptr_t),\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\tvq->async_pending_info = rte_malloc(NULL,\n+\t\t\tvq->size * sizeof(uint64_t),\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\tif (!vq->async_pkts_pending || !vq->async_pending_info) {\n+\t\tif (vq->async_pkts_pending)\n+\t\t\trte_free(vq->async_pkts_pending);\n+\n+\t\tif (vq->async_pending_info)\n+\t\t\trte_free(vq->async_pending_info);\n+\n+\t\tVHOST_LOG_CONFIG(ERR,\n+\t\t\t\t\"async register failed: cannot allocate memory for vq data \"\n+\t\t\t\t\"(vid %d, qid: %d)\\n\", vid, queue_id);\n+\t\tgoto reg_out;\n+\t}\n+\n+\tvq->async_ops.check_completed_copies = ops->check_completed_copies;\n+\tvq->async_ops.transfer_data = ops->transfer_data;\n+\n+\tvq->async_inorder = f.async_inorder;\n+\tvq->async_threshold = f.async_threshold;\n+\n+\tvq->async_registered = true;\n+\n+reg_out:\n+\trte_spinlock_unlock(&vq->access_lock);\n+\n+\treturn 0;\n+}\n+\n+int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)\n+{\n+\tstruct vhost_virtqueue *vq;\n+\tstruct virtio_net *dev = get_device(vid);\n+\tint ret = -1;\n+\n+\tif (dev == NULL)\n+\t\treturn ret;\n+\n+\tvq = dev->virtqueue[queue_id];\n+\n+\tif (vq == NULL)\n+\t\treturn ret;\n+\n+\tret = 0;\n+\trte_spinlock_lock(&vq->access_lock);\n+\n+\tif (!vq->async_registered)\n+\t\tgoto out;\n+\n+\tif (vq->async_pkts_inflight_n) {\n+\t\tVHOST_LOG_CONFIG(ERR, \"Failed to unregister async channel. \"\n+\t\t\t\"async inflight packets must be completed before unregistration.\\n\");\n+\t\tret = -1;\n+\t\tgoto out;\n+\t}\n+\n+\tif (vq->async_pkts_pending) {\n+\t\trte_free(vq->async_pkts_pending);\n+\t\tvq->async_pkts_pending = NULL;\n+\t}\n+\n+\tif (vq->async_pending_info) {\n+\t\trte_free(vq->async_pending_info);\n+\t\tvq->async_pending_info = NULL;\n+\t}\n+\n+\tvq->async_ops.transfer_data = NULL;\n+\tvq->async_ops.check_completed_copies = NULL;\n+\tvq->async_registered = false;\n+\n+out:\n+\trte_spinlock_unlock(&vq->access_lock);\n+\n+\treturn ret;\n+}\n+\ndiff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h\nindex 034463699..f3731982b 100644\n--- a/lib/librte_vhost/vhost.h\n+++ b/lib/librte_vhost/vhost.h\n@@ -24,6 +24,8 @@\n #include \"rte_vdpa.h\"\n #include \"rte_vdpa_dev.h\"\n \n+#include \"rte_vhost_async.h\"\n+\n /* Used to indicate that the device is running on a data core */\n #define VIRTIO_DEV_RUNNING 1\n /* Used to indicate that the device is ready to operate */\n@@ -40,6 +42,11 @@\n \n #define VHOST_LOG_CACHE_NR 32\n \n+#define MAX_PKT_BURST 32\n+\n+#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)\n+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)\n+\n #define PACKED_DESC_ENQUEUE_USED_FLAG(w)\t\\\n \t((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \\\n \t\tVRING_DESC_F_WRITE)\n@@ -202,6 +209,25 @@ struct vhost_virtqueue {\n \tTAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;\n \tint\t\t\t\tiotlb_cache_nr;\n \tTAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;\n+\n+\t/* operation callbacks for async dma */\n+\tstruct rte_vhost_async_channel_ops\tasync_ops;\n+\n+\tstruct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];\n+\tstruct iovec vec_pool[VHOST_MAX_ASYNC_VEC];\n+\n+\t/* async data transfer status */\n+\tuintptr_t\t**async_pkts_pending;\n+\t#define\t\tASYNC_PENDING_INFO_N_MSK 0xFFFF\n+\t#define\t\tASYNC_PENDING_INFO_N_SFT 16\n+\tuint64_t\t*async_pending_info;\n+\tuint16_t\tasync_pkts_idx;\n+\tuint16_t\tasync_pkts_inflight_n;\n+\n+\t/* vq async features */\n+\tbool\t\tasync_inorder;\n+\tbool\t\tasync_registered;\n+\tuint16_t\tasync_threshold;\n } __rte_cache_aligned;\n \n #define VHOST_MAX_VRING\t\t\t0x100\n@@ -338,6 +364,7 @@ struct virtio_net {\n \tint16_t\t\t\tbroadcast_rarp;\n \tuint32_t\t\tnr_vring;\n \tint\t\t\tdequeue_zero_copy;\n+\tint\t\t\tasync_copy;\n \tint\t\t\textbuf;\n \tint\t\t\tlinearbuf;\n \tstruct vhost_virtqueue\t*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];\n@@ -683,7 +710,8 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)\n \t/* Don't kick guest if we don't reach index specified by guest. */\n \tif (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {\n \t\tuint16_t old = vq->signalled_used;\n-\t\tuint16_t new = vq->last_used_idx;\n+\t\tuint16_t new = vq->async_pkts_inflight_n ?\n+\t\t\t\t\tvq->used->idx:vq->last_used_idx;\n \t\tbool signalled_used_valid = vq->signalled_used_valid;\n \n \t\tvq->signalled_used = new;\ndiff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c\nindex 6039a8fdb..aa8605523 100644\n--- a/lib/librte_vhost/vhost_user.c\n+++ b/lib/librte_vhost/vhost_user.c\n@@ -476,12 +476,14 @@ vhost_user_set_vring_num(struct virtio_net **pdev,\n \t} else {\n \t\tif (vq->shadow_used_split)\n \t\t\trte_free(vq->shadow_used_split);\n+\n \t\tvq->shadow_used_split = rte_malloc(NULL,\n \t\t\t\tvq->size * sizeof(struct vring_used_elem),\n \t\t\t\tRTE_CACHE_LINE_SIZE);\n+\n \t\tif (!vq->shadow_used_split) {\n \t\t\tVHOST_LOG_CONFIG(ERR,\n-\t\t\t\t\t\"failed to allocate memory for shadow used ring.\\n\");\n+\t\t\t\t\t\"failed to allocate memory for vq internal data.\\n\");\n \t\t\treturn RTE_VHOST_MSG_RESULT_ERR;\n \t\t}\n \t}\n@@ -1166,7 +1168,8 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\t\tgoto err_mmap;\n \t\t}\n \n-\t\tpopulate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;\n+\t\tpopulate = (dev->dequeue_zero_copy || dev->async_copy) ?\n+\t\t\tMAP_POPULATE : 0;\n \t\tmmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,\n \t\t\t\t MAP_SHARED | populate, fd, 0);\n \n@@ -1181,7 +1184,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,\n \t\treg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +\n \t\t\t\t      mmap_offset;\n \n-\t\tif (dev->dequeue_zero_copy)\n+\t\tif (dev->dequeue_zero_copy || dev->async_copy)\n \t\t\tif (add_guest_pages(dev, reg, alignment) < 0) {\n \t\t\t\tVHOST_LOG_CONFIG(ERR,\n \t\t\t\t\t\"adding guest pages to region %u failed.\\n\",\n@@ -1979,6 +1982,12 @@ vhost_user_get_vring_base(struct virtio_net **pdev,\n \t} else {\n \t\trte_free(vq->shadow_used_split);\n \t\tvq->shadow_used_split = NULL;\n+\t\tif (vq->async_pkts_pending)\n+\t\t\trte_free(vq->async_pkts_pending);\n+\t\tif (vq->async_pending_info)\n+\t\t\trte_free(vq->async_pending_info);\n+\t\tvq->async_pkts_pending = NULL;\n+\t\tvq->async_pending_info = NULL;\n \t}\n \n \trte_free(vq->batch_copy_elems);\n@@ -2012,6 +2021,14 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,\n \t\t\"set queue enable: %d to qp idx: %d\\n\",\n \t\tenable, index);\n \n+\tif (!enable && dev->virtqueue[index]->async_registered) {\n+\t\tif (dev->virtqueue[index]->async_pkts_inflight_n) {\n+\t\t\tVHOST_LOG_CONFIG(ERR, \"failed to disable vring. \"\n+\t\t\t\"async inflight packets must be completed first\\n\");\n+\t\t\treturn RTE_VHOST_MSG_RESULT_ERR;\n+\t\t}\n+\t}\n+\n \t/* On disable, rings have to be stopped being processed. */\n \tif (!enable && dev->dequeue_zero_copy)\n \t\tdrain_zmbuf_list(dev->virtqueue[index]);\n",
    "prefixes": [
        "v6",
        "1/2"
    ]
}