[v3,2/5] vhost: prepare async for descriptor to mbuf refactoring

Message ID 20220419034323.92820-3-xuan.ding@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: support async dequeue data path |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ding, Xuan April 19, 2022, 3:43 a.m. UTC
  From: Xuan Ding <xuan.ding@intel.com>

This patch refactors vhost async enqueue path and dequeue path to use
the same function async_fill_seg() for preparing batch elements,
which simplifies the code without performance degradation.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 lib/vhost/virtio_net.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)
  

Comments

Maxime Coquelin April 22, 2022, 3:32 p.m. UTC | #1
On 4/19/22 05:43, xuan.ding@intel.com wrote:
> From: Xuan Ding <xuan.ding@intel.com>
> 
> This patch refactors vhost async enqueue path and dequeue path to use
> the same function async_fill_seg() for preparing batch elements,
> which simplifies the code without performance degradation.
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
>   lib/vhost/virtio_net.c | 23 +++++++++++++++--------
>   1 file changed, 15 insertions(+), 8 deletions(-)
> 
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 6d53016c75..391fb82f0e 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -997,13 +997,14 @@ async_iter_reset(struct vhost_async *async)
>   }
>   
>   static __rte_always_inline int
> -async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
> +async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		struct rte_mbuf *m, uint32_t mbuf_offset,
> -		uint64_t buf_iova, uint32_t cpy_len)
> +		uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
>   {
>   	struct vhost_async *async = vq->async;
>   	uint64_t mapped_len;
>   	uint32_t buf_offset = 0;
> +	void *src, *dst;
>   	void *host_iova;
>   
>   	while (cpy_len) {
> @@ -1015,10 +1016,16 @@ async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   			return -1;
>   		}
>   
> -		if (unlikely(async_iter_add_iovec(dev, async,
> -						(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
> -							mbuf_offset),
> -						host_iova, (size_t)mapped_len)))
> +		if (to_desc) {
> +			src = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
> +			dst = host_iova;
> +		} else {
> +			src = host_iova;
> +			dst = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
> +		}
> +
> +		if (unlikely(async_iter_add_iovec(dev, async, src, dst,
> +						 (size_t)mapped_len)))

Minor, but it may fit in a single line.

>   			return -1;
>   
>   		cpy_len -= (uint32_t)mapped_len;
> @@ -1167,8 +1174,8 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
>   
>   		if (is_async) {
> -			if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
> -						buf_iova + buf_offset, cpy_len) < 0)
> +			if (async_fill_seg(dev, vq, m, mbuf_offset,
> +						buf_iova + buf_offset, cpy_len, true) < 0)
>   				goto error;
>   		} else {
>   			sync_fill_seg(dev, vq, m, mbuf_offset,

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6d53016c75..391fb82f0e 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -997,13 +997,14 @@  async_iter_reset(struct vhost_async *async)
 }
 
 static __rte_always_inline int
-async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf *m, uint32_t mbuf_offset,
-		uint64_t buf_iova, uint32_t cpy_len)
+		uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
 {
 	struct vhost_async *async = vq->async;
 	uint64_t mapped_len;
 	uint32_t buf_offset = 0;
+	void *src, *dst;
 	void *host_iova;
 
 	while (cpy_len) {
@@ -1015,10 +1016,16 @@  async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			return -1;
 		}
 
-		if (unlikely(async_iter_add_iovec(dev, async,
-						(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
-							mbuf_offset),
-						host_iova, (size_t)mapped_len)))
+		if (to_desc) {
+			src = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+			dst = host_iova;
+		} else {
+			src = host_iova;
+			dst = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+		}
+
+		if (unlikely(async_iter_add_iovec(dev, async, src, dst,
+						 (size_t)mapped_len)))
 			return -1;
 
 		cpy_len -= (uint32_t)mapped_len;
@@ -1167,8 +1174,8 @@  mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
 		if (is_async) {
-			if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
-						buf_iova + buf_offset, cpy_len) < 0)
+			if (async_fill_seg(dev, vq, m, mbuf_offset,
+						buf_iova + buf_offset, cpy_len, true) < 0)
 				goto error;
 		} else {
 			sync_fill_seg(dev, vq, m, mbuf_offset,