[v4,2/5] vhost: prepare async for descriptor to mbuf refactoring

Message ID 20220505062335.25445-3-xuan.ding@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: support async dequeue data path |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ding, Xuan May 5, 2022, 6:23 a.m. UTC
  From: Xuan Ding <xuan.ding@intel.com>

This patch refactors vhost async enqueue path and dequeue path to use
the same function async_fill_seg() for preparing batch elements,
which simplifies the code without performance degradation.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/virtio_net.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)
  

Comments

Yang, YvonneX May 5, 2022, 7:38 a.m. UTC | #1
> -----Original Message-----
> From: xuan.ding@intel.com <xuan.ding@intel.com>
> Sent: 2022年5月5日 14:24
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Jiang, Cheng1
> <cheng1.jiang@intel.com>; Pai G, Sunil <sunil.pai.g@intel.com>;
> liangma@liangbit.com; Ding, Xuan <xuan.ding@intel.com>
> Subject: [PATCH v4 2/5] vhost: prepare async for descriptor to mbuf refactoring
> 
> From: Xuan Ding <xuan.ding@intel.com>
> 
> This patch refactors vhost async enqueue path and dequeue path to use the
> same function async_fill_seg() for preparing batch elements, which simplifies the
> code without performance degradation.
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/virtio_net.c | 22 ++++++++++++++--------
>  1 file changed, 14 insertions(+), 8 deletions(-)
> 


Tested-by: Yvonne Yang <yvonnex.yang@intel.com>
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index d4c94d2a9b..a9e2dcd9ce 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -997,13 +997,14 @@  async_iter_reset(struct vhost_async *async)
 }
 
 static __rte_always_inline int
-async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
-		uint64_t buf_iova, uint32_t cpy_len)
+		uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
 {
 	struct vhost_async *async = vq->async;
 	uint64_t mapped_len;
 	uint32_t buf_offset = 0;
+	void *src, *dst;
 	void *host_iova;
 
 	while (cpy_len) {
@@ -1015,10 +1016,15 @@  async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			return -1;
 		}
 
-		if (unlikely(async_iter_add_iovec(dev, async,
-						(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
-							mbuf_offset),
-						host_iova, (size_t)mapped_len)))
+		if (to_desc) {
+			src = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+			dst = host_iova;
+		} else {
+			src = host_iova;
+			dst = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+		}
+
+		if (unlikely(async_iter_add_iovec(dev, async, src, dst, (size_t)mapped_len)))
 			return -1;
 
 		cpy_len -= (uint32_t)mapped_len;
@@ -1167,8 +1173,8 @@  mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
 		if (is_async) {
-			if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
-						buf_iova + buf_offset, cpy_len) < 0)
+			if (async_fill_seg(dev, vq, m, mbuf_offset,
+					   buf_iova + buf_offset, cpy_len, true) < 0)
 				goto error;
 		} else {
 			sync_fill_seg(dev, vq, m, mbuf_offset,