@@ -253,6 +253,12 @@ The following is an overview of some key Vhost API functions:
vhost invokes this function to get the copy data completed by async
devices.
+* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, features, ops)``
+ Register a vhost queue with async copy device channel without
+ performing any locking.
+
+ This function is only safe to call from within vhost callback functions.
+
* ``rte_vhost_async_channel_unregister(vid, queue_id)``
Unregister the async copy device channel from a vhost queue.
@@ -265,6 +271,12 @@ The following is an overview of some key Vhost API functions:
devices for all vhost queues in destroy_device(), when a
virtio device is paused or shut down.
+* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
+ Unregister the async copy device channel from a vhost queue without
+ performing any locking.
+
+ This function is only safe to call from within vhost callback functions.
+
* ``rte_vhost_submit_enqueue_burst(vid, queue_id, pkts, count, comp_pkts, comp_count)``
Submit an enqueue request to transmit ``count`` packets from host to guest
@@ -135,6 +135,45 @@ __rte_experimental
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
/**
+ * register an async channel for vhost without performing any locking
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within vhost callback functions.
+ *
+ * @param vid
+ * vhost device id async channel to be attached to
+ * @param queue_id
+ * vhost queue id async channel to be attached to
+ * @param features
+ * DMA channel feature structure
+ * @param ops
+ * DMA operation callbacks
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_features features,
+ struct rte_vhost_async_channel_ops *ops);
+
+/**
+ * unregister a dma channel for vhost without performing any lock
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within vhost callback functions.
+ *
+ * @param vid
+ * vhost device id DMA channel to be detached
+ * @param queue_id
+ * vhost queue id DMA channel to be detached
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
+ uint16_t queue_id);
+
+/**
* This function submits enqueue data to async engine. Successfully
* enqueued packets can be transfer completed or being occupied by DMA
* engines, when this API returns. Transfer completed packets are returned
@@ -79,4 +79,8 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_async_channel_register_thread_unsafe;
+ rte_vhost_async_channel_unregister_thread_unsafe;
};
@@ -1619,42 +1619,19 @@ int rte_vhost_extern_callback_register(int vid,
return 0;
}
-int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
- struct rte_vhost_async_features features,
+static __rte_always_inline int
+async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_features f,
struct rte_vhost_async_channel_ops *ops)
{
- struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL || ops == NULL)
- return -1;
-
- if (queue_id >= VHOST_MAX_VRING)
- return -1;
-
- vq = dev->virtqueue[queue_id];
-
- if (unlikely(vq == NULL || !dev->async_copy))
- return -1;
-
- if (unlikely(!features.async_inorder)) {
- VHOST_LOG_CONFIG(ERR,
- "async copy is not supported on non-inorder mode "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
- rte_spinlock_lock(&vq->access_lock);
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
@@ -1665,7 +1642,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
@@ -1676,7 +1653,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
@@ -1687,7 +1664,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
if (vq_is_packed(dev)) {
@@ -1699,7 +1676,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
@@ -1710,22 +1687,88 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
}
vq->async_ops.check_completed_copies = ops->check_completed_copies;
vq->async_ops.transfer_data = ops->transfer_data;
+ vq->async_inorder = f.async_inorder;
+ vq->async_threshold = f.async_threshold;
+ vq->async_registered = true;
- vq->async_inorder = features.async_inorder;
- vq->async_threshold = features.async_threshold;
+ return 0;
+}
- vq->async_registered = true;
+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_features features,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!features.async_inorder)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL)) {
+ return -1;
+ }
-reg_out:
+ rte_spinlock_lock(&vq->access_lock);
+ ret = async_channel_register(vid, queue_id, features, ops);
rte_spinlock_unlock(&vq->access_lock);
- return 0;
+ return ret;
+}
+
+int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_features features,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!features.async_inorder)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL)) {
+ return -1;
+ }
+
+ return async_channel_register(vid, queue_id, features, ops);
}
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
@@ -1775,5 +1818,40 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
return ret;
}
+int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
+ uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return -1;
+
+ if (!vq->async_registered)
+ return 0;
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ return -1;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+ return 0;
+}
+
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);