Hi Jiayu,
> -----Original Message-----
> From: Hu, Jiayu <jiayu.hu@intel.com>
> Sent: Saturday, July 17, 2021 3:51 AM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Hu, Jiayu
> <jiayu.hu@intel.com>
> Subject: [PATCH v5 3/3] vhost: add thread unsafe async registeration functions
>
> This patch adds thread unsafe version for async register and
> unregister functions.
>
> Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
> ---
> doc/guides/prog_guide/vhost_lib.rst | 14 ++++
> lib/vhost/rte_vhost_async.h | 41 ++++++++++
> lib/vhost/version.map | 4 +
> lib/vhost/vhost.c | 149 +++++++++++++++++++++++++++--------
> -
> 4 files changed, 173 insertions(+), 35 deletions(-)
>
> diff --git a/doc/guides/prog_guide/vhost_lib.rst
> b/doc/guides/prog_guide/vhost_lib.rst
> index 2a61b85..c8638db 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -256,6 +256,13 @@ The following is an overview of some key Vhost API
> functions:
> vhost invokes this function to get the copy data completed by async
> devices.
>
> +* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, config,
> ops)``
> + Register an async copy device channel for a vhost queue without
> + performing any locking.
> +
> + This function is only safe to call in vhost callback functions
> + (i.e., struct vhost_device_ops).
> +
> * ``rte_vhost_async_channel_unregister(vid, queue_id)``
>
> Unregister the async copy device channel from a vhost queue.
> @@ -268,6 +275,13 @@ The following is an overview of some key Vhost API
> functions:
> devices for all vhost queues in destroy_device(), when a
> virtio device is paused or shut down.
>
> +* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
We should add a blank line between API name and its description. I will add
them when applying.
With above fixed:
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
@@ -256,6 +256,13 @@ The following is an overview of some key Vhost API functions:
vhost invokes this function to get the copy data completed by async
devices.
+* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, config, ops)``
+ Register an async copy device channel for a vhost queue without
+ performing any locking.
+
+ This function is only safe to call in vhost callback functions
+ (i.e., struct vhost_device_ops).
+
* ``rte_vhost_async_channel_unregister(vid, queue_id)``
Unregister the async copy device channel from a vhost queue.
@@ -268,6 +275,13 @@ The following is an overview of some key Vhost API functions:
devices for all vhost queues in destroy_device(), when a
virtio device is paused or shut down.
+* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
+ Unregister the async copy device channel for a vhost queue without
+ performing any locking.
+
+ This function is only safe to call in vhost callback functions
+ (i.e., struct vhost_device_ops).
+
* ``rte_vhost_submit_enqueue_burst(vid, queue_id, pkts, count, comp_pkts, comp_count)``
Submit an enqueue request to transmit ``count`` packets from host to guest
@@ -142,6 +142,47 @@ __rte_experimental
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
/**
+ * Register an async channel for a vhost queue without performing any
+ * locking
+ *
+ * @note This function does not perform any locking, and is only safe to
+ * call in vhost callback functions.
+ *
+ * @param vid
+ * vhost device id async channel to be attached to
+ * @param queue_id
+ * vhost queue id async channel to be attached to
+ * @param config
+ * Async channel configuration
+ * @param ops
+ * Async channel operation callbacks
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops);
+
+/**
+ * Unregister an async channel for a vhost queue without performing any
+ * locking
+ *
+ * @note This function does not perform any locking, and is only safe to
+ * call in vhost callback functions.
+ *
+ * @param vid
+ * vhost device id async channel to be detached from
+ * @param queue_id
+ * vhost queue id async channel to be detached from
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
+ uint16_t queue_id);
+
+/**
* This function submits enqueue data to async engine. Successfully
* enqueued packets can be transfer completed or being occupied by DMA
* engines, when this API returns. Transfer completed packets are returned
@@ -79,4 +79,8 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_async_channel_register_thread_unsafe;
+ rte_vhost_async_channel_unregister_thread_unsafe;
};
@@ -1619,43 +1619,19 @@ int rte_vhost_extern_callback_register(int vid,
return 0;
}
-int
-rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+static __rte_always_inline int
+async_channel_register(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
struct rte_vhost_async_channel_ops *ops)
{
- struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL || ops == NULL)
- return -1;
-
- if (queue_id >= VHOST_MAX_VRING)
- return -1;
-
- vq = dev->virtqueue[queue_id];
-
- if (unlikely(vq == NULL || !dev->async_copy))
- return -1;
-
- if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
- VHOST_LOG_CONFIG(ERR,
- "async copy is not supported on non-inorder mode "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
- rte_spinlock_lock(&vq->access_lock);
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
@@ -1666,7 +1642,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
@@ -1677,7 +1653,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
@@ -1688,7 +1664,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
if (vq_is_packed(dev)) {
@@ -1700,7 +1676,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
@@ -1711,7 +1687,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
}
@@ -1721,10 +1697,78 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
vq->async_registered = true;
-reg_out:
+ return 0;
+}
+
+int
+rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+ ret = async_channel_register(vid, queue_id, config, ops);
rte_spinlock_unlock(&vq->access_lock);
- return 0;
+ return ret;
+}
+
+int
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ return async_channel_register(vid, queue_id, config, ops);
}
int
@@ -1775,5 +1819,40 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
return ret;
}
+int
+rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return -1;
+
+ if (!vq->async_registered)
+ return 0;
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ return -1;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+ return 0;
+}
+
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);