[RFC,09/14] vhost: remove notion of async descriptor

Message ID 20211007220013.355530-10-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: clean-up and simplify async implementation |

Commit Message

Maxime Coquelin Oct. 7, 2021, 10 p.m. UTC
  Now that IO vectors iterator have been simplified, the
rte_vhost_async_desc struct only contains a pointer on
the iterator array stored in the async metadata.

This patch removes it, and pass directly the iterators
array pointer to the transfer_data callback. Doing that,
we avoid declaring the descriptor array in the stack, and
also avoid the cost of filling it.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 examples/vhost/ioat.c       | 10 +++++-----
 examples/vhost/ioat.h       |  2 +-
 lib/vhost/rte_vhost_async.h | 16 ++++------------
 lib/vhost/virtio_net.c      | 19 ++-----------------
 4 files changed, 12 insertions(+), 35 deletions(-)
  

Patch

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index a8c588deff..9aeeb12fd9 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -124,10 +124,10 @@  open_ioat(const char *value)
 
 int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_desc *descs,
+		struct rte_vhost_iov_iter *iov_iter,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
 {
-	uint32_t i_desc;
+	uint32_t i_iter;
 	uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
 	struct rte_vhost_iov_iter *iter = NULL;
 	unsigned long i_seg;
@@ -135,8 +135,8 @@  ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	unsigned short write = cb_tracker[dev_id].next_write;
 
 	if (!opaque_data) {
-		for (i_desc = 0; i_desc < count; i_desc++) {
-			iter = descs[i_desc].iter;
+		for (i_iter = 0; i_iter < count; i_iter++) {
+			iter = iov_iter + i_iter;
 			i_seg = 0;
 			if (cb_tracker[dev_id].ioat_space < iter->nr_segs)
 				break;
@@ -161,7 +161,7 @@  ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	/* ring the doorbell */
 	rte_ioat_perform_ops(dev_id);
 	cb_tracker[dev_id].next_write = write;
-	return i_desc;
+	return i_iter;
 }
 
 int32_t
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 62e163c585..a4f09ee39b 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -29,7 +29,7 @@  int open_ioat(const char *value);
 
 int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
-		struct rte_vhost_async_desc *descs,
+		struct rte_vhost_iov_iter *iov_iter,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
 int32_t
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 4ea5cfab10..a87ea6ba37 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -26,14 +26,6 @@  struct rte_vhost_iov_iter {
 	unsigned long nr_segs;
 };
 
-/**
- * dma transfer descriptor
- */
-struct rte_vhost_async_desc {
-	/* memory iov_iter */
-	struct rte_vhost_iov_iter *iter;
-};
-
 /**
  * dma transfer status
  */
@@ -55,17 +47,17 @@  struct rte_vhost_async_channel_ops {
 	 *  id of vhost device to perform data copies
 	 * @param queue_id
 	 *  queue id to perform data copies
-	 * @param descs
-	 *  an array of DMA transfer memory descriptors
+	 * @param iov_iter
+	 *  an array of IOV iterators
 	 * @param opaque_data
 	 *  opaque data pair sending to DMA engine
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed, negative value means error
+	 *  number of IOV iterators processed, negative value means error
 	 */
 	int32_t (*transfer_data)(int vid, uint16_t queue_id,
-		struct rte_vhost_async_desc *descs,
+		struct rte_vhost_iov_iter *iov_iter,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
 	/**
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 5ce4c14a73..b295dc1d39 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -994,15 +994,6 @@  async_iter_reset(struct vhost_async *async)
 	async->iovec_idx = 0;
 }
 
-static __rte_always_inline void
-async_fill_descs(struct vhost_async *async, struct rte_vhost_async_desc *descs)
-{
-	int i;
-
-	for (i = 0; i < async->iter_idx; i++)
-		descs[i].iter = async->iov_iter + i;
-}
-
 static __rte_always_inline int
 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			struct rte_mbuf *m, struct buf_vector *buf_vec,
@@ -1549,7 +1540,6 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	uint16_t avail_head;
 
 	struct vhost_async *async = vq->async;
-	struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	uint32_t pkt_err = 0;
 	int32_t n_xfer;
@@ -1594,9 +1584,7 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	if (unlikely(pkt_idx == 0))
 		return 0;
 
-	async_fill_descs(async, async_descs);
-
-	n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+	n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
 	if (unlikely(n_xfer < 0)) {
 		VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
 				dev->vid, __func__, queue_id);
@@ -1811,7 +1799,6 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t num_descs;
 
 	struct vhost_async *async = vq->async;
-	struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	uint32_t pkt_err = 0;
 	uint16_t slot_idx = 0;
@@ -1839,9 +1826,7 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	if (unlikely(pkt_idx == 0))
 		return 0;
 
-	async_fill_descs(async, async_descs);
-
-	n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+	n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
 	if (unlikely(n_xfer < 0)) {
 		VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
 				dev->vid, __func__, queue_id);