@@ -288,7 +288,13 @@ The following is an overview of some key Vhost API functions:
* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count, dma_id, vchan_id)``
- Clear inflight packets which are submitted to DMA engine in vhost async data
+ Clear in-flight packets which are submitted to async channel in vhost
+ async data path without performing any locking. Completed packets are
+ returned to applications through ``pkts``.
+
+* ``rte_vhost_clear_queue(vid, queue_id, **pkts, count, dma_id, vchan_id)``
+
+ Clear in-flight packets which are submitted to async channel in vhost async data
path. Completed packets are returned to applications through ``pkts``.
* ``rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, struct rte_vhost_stat_name *names, unsigned int size)``
@@ -92,6 +92,11 @@ New Features
Added vhost async dequeue API which can leverage DMA devices to
accelerate receiving pkts from guest.
+* **Added thread-safe version of inflight packet clear API in vhost library.**
+
+ Added an API which can clear the inflight packets submitted to
+ the async channel in a thread-safe manner in the vhost async data path.
+
Removed Items
-------------
@@ -183,6 +183,31 @@ uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id);
+/**
+ * This function checks async completion status and clear packets for
+ * a specific vhost device queue. Packets which are inflight will be
+ * returned in an array.
+ *
+ * @param vid
+ * ID of vhost device to clear data
+ * @param queue_id
+ * Queue id to clear data
+ * @param pkts
+ * Blank array to get return packet pointer
+ * @param count
+ * Size of the packet array
+ * @param dma_id
+ * The identifier of the DMA device
+ * @param vchan_id
+ * The identifier of virtual DMA channel
+ * @return
+ * Number of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_clear_queue(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id);
+
/**
* The DMA vChannels used in asynchronous data path must be configured
* first. So this function needs to be called before enabling DMA
@@ -94,6 +94,7 @@ EXPERIMENTAL {
rte_vhost_vring_stats_get;
rte_vhost_vring_stats_reset;
rte_vhost_async_try_dequeue_burst;
+ rte_vhost_clear_queue;
};
INTERNAL {
@@ -26,6 +26,11 @@
#define MAX_BATCH_LEN 256
+static __rte_always_inline uint16_t
+async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id, bool legacy_ol_flags);
+
/* DMA device copy operation tracking array. */
struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
@@ -2155,7 +2160,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
return 0;
VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ if (unlikely(queue_id >= dev->nr_vring)) {
VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
dev->ifname, __func__, queue_id);
return 0;
@@ -2182,7 +2187,18 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
return 0;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ if (queue_id % 2 == 0)
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
+ pkts, count, dma_id, vchan_id);
+ else {
+ if (unlikely(vq_is_packed(dev)))
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: async dequeue does not support packed ring.\n",
+ dev->vid, __func__);
+ else
+ n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
+ dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
+ }
vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
vq->stats.inflight_completed += n_pkts_cpl;
@@ -2190,6 +2206,68 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
return n_pkts_cpl;
}
+uint16_t
+rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
+ uint16_t count, int16_t dma_id, uint16_t vchan_id)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+ if (unlikely(queue_id >= dev->nr_vring)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to clear async queue id %d, virtqueue busy.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ if (unlikely(!vq->async)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ goto out_access_unlock;
+ }
+
+ if (unlikely(!dma_copy_track[dma_id].vchans ||
+ !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+ dma_id, vchan_id);
+ goto out_access_unlock;
+ }
+
+ if (queue_id % 2 == 0)
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
+ pkts, count, dma_id, vchan_id);
+ else {
+ if (unlikely(vq_is_packed(dev)))
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: async dequeue does not support packed ring.\n",
+ dev->vid, __func__);
+ else
+ n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
+ dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
+ }
+
+ vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
+ vq->stats.inflight_completed += n_pkts_cpl;
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return n_pkts_cpl;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)