[v3,3/5] vhost: merge sync and async descriptor to mbuf filling

Message ID 20220419034323.92820-4-xuan.ding@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: support async dequeue data path |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Ding, Xuan April 19, 2022, 3:43 a.m. UTC
  From: Xuan Ding <xuan.ding@intel.com>

This patches refactors copy_desc_to_mbuf() used by the sync
path to support both sync and async descriptor to mbuf filling.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 lib/vhost/vhost.h      |  1 +
 lib/vhost/virtio_net.c | 48 ++++++++++++++++++++++++++++++++----------
 2 files changed, 38 insertions(+), 11 deletions(-)
  

Comments

David Marchand April 22, 2022, 11:06 a.m. UTC | #1
We (at RH) have some issues with our email infrastructure, so I can't
reply inline of the patch.

Copy/pasting the code:

+static __rte_always_inline uint16_t
+async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t dma_id,
+ uint16_t vchan_id, bool legacy_ol_flags)
+{
+ uint16_t start_idx, from, i;
+ uint16_t nr_cpl_pkts = 0;
+ struct async_inflight_info *pkts_info;
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+

Please, don't pass queue_id as an input parameter for
async_poll_dequeue_completed_split().
The caller of this helper already dereferenced the vq.
You can pass vq.
  
Maxime Coquelin April 22, 2022, 3:43 p.m. UTC | #2
On 4/19/22 05:43, xuan.ding@intel.com wrote:
> From: Xuan Ding <xuan.ding@intel.com>
> 
> This patches refactors copy_desc_to_mbuf() used by the sync
> path to support both sync and async descriptor to mbuf filling.
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
>   lib/vhost/vhost.h      |  1 +
>   lib/vhost/virtio_net.c | 48 ++++++++++++++++++++++++++++++++----------
>   2 files changed, 38 insertions(+), 11 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  
Maxime Coquelin April 22, 2022, 3:46 p.m. UTC | #3
On 4/22/22 13:06, David Marchand wrote:
> We (at RH) have some issues with our email infrastructure, so I can't
> reply inline of the patch.
> 
> Copy/pasting the code:
> 
> +static __rte_always_inline uint16_t
> +async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count, uint16_t dma_id,
> + uint16_t vchan_id, bool legacy_ol_flags)
> +{
> + uint16_t start_idx, from, i;
> + uint16_t nr_cpl_pkts = 0;
> + struct async_inflight_info *pkts_info;
> + struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
> +
> 
> Please, don't pass queue_id as an input parameter for
> async_poll_dequeue_completed_split().
> The caller of this helper already dereferenced the vq.
> You can pass vq.
> 
> 


I think David's comment was intended to be a reply to patch 4, but I 
agree with him.

Could you please fix this and also fix the build issues reported by the
CI? I'll continue the review on V4.

Thanks,
Maxime
  
Ding, Xuan April 24, 2022, 2:02 a.m. UTC | #4
Hi Maxime, David,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, April 22, 2022 11:46 PM
> To: David Marchand <david.marchand@redhat.com>; Ding, Xuan
> <xuan.ding@intel.com>
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; dev <dev@dpdk.org>; Hu, Jiayu
> <jiayu.hu@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>; Pai G, Sunil
> <sunil.pai.g@intel.com>; liangma@liangbit.com
> Subject: Re: [PATCH v3 3/5] vhost: merge sync and async descriptor to mbuf
> filling
> 
> 
> 
> On 4/22/22 13:06, David Marchand wrote:
> > We (at RH) have some issues with our email infrastructure, so I can't
> > reply inline of the patch.
> >
> > Copy/pasting the code:
> >
> > +static __rte_always_inline uint16_t
> > +async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t
> > +queue_id,  struct rte_mbuf **pkts, uint16_t count, uint16_t dma_id,
> > +uint16_t vchan_id, bool legacy_ol_flags) {  uint16_t start_idx, from,
> > +i;  uint16_t nr_cpl_pkts = 0;  struct async_inflight_info *pkts_info;
> > +struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
> > +
> >
> > Please, don't pass queue_id as an input parameter for
> > async_poll_dequeue_completed_split().
> > The caller of this helper already dereferenced the vq.
> > You can pass vq.
> >
> >
> 
> 
> I think David's comment was intended to be a reply to patch 4, but I agree
> with him.
> 
> Could you please fix this and also fix the build issues reported by the CI? I'll
> continue the review on V4.

Thanks for your suggestion, please see v4.

Regards,
Xuan

> 
> Thanks,
> Maxime
  

Patch

diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index a9edc271aa..9209558465 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -177,6 +177,7 @@  extern struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
  * inflight async packet information
  */
 struct async_inflight_info {
+	struct virtio_net_hdr nethdr;
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
 	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 391fb82f0e..6f5bd21946 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2488,10 +2488,10 @@  copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
 }
 
 static __rte_always_inline int
-copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		  struct buf_vector *buf_vec, uint16_t nr_vec,
 		  struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
-		  bool legacy_ol_flags)
+		  bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
 {
 	uint32_t buf_avail, buf_offset, buf_len;
 	uint64_t buf_addr, buf_iova;
@@ -2502,6 +2502,8 @@  copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct virtio_net_hdr *hdr = NULL;
 	/* A counter to avoid desc dead loop chain */
 	uint16_t vec_idx = 0;
+	struct vhost_async *async = vq->async;
+	struct async_inflight_info *pkts_info;
 
 	buf_addr = buf_vec[vec_idx].buf_addr;
 	buf_iova = buf_vec[vec_idx].buf_iova;
@@ -2539,6 +2541,7 @@  copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (unlikely(++vec_idx >= nr_vec))
 			goto error;
 		buf_addr = buf_vec[vec_idx].buf_addr;
+		buf_iova = buf_vec[vec_idx].buf_iova;
 		buf_len = buf_vec[vec_idx].buf_len;
 
 		buf_offset = 0;
@@ -2554,12 +2557,25 @@  copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	mbuf_offset = 0;
 	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
+
+	if (is_async) {
+		pkts_info = async->pkts_info;
+		if (async_iter_initialize(dev, async))
+			return -1;
+	}
+
 	while (1) {
 		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
-		sync_fill_seg(dev, vq, cur, mbuf_offset,
-			      buf_addr + buf_offset,
-			      buf_iova + buf_offset, cpy_len, false);
+		if (is_async) {
+			if (async_fill_seg(dev, vq, cur, mbuf_offset,
+					   buf_iova + buf_offset, cpy_len, false) < 0)
+				goto error;
+		} else {
+			sync_fill_seg(dev, vq, cur, mbuf_offset,
+				buf_addr + buf_offset,
+				buf_iova + buf_offset, cpy_len, false);
+		}
 
 		mbuf_avail  -= cpy_len;
 		mbuf_offset += cpy_len;
@@ -2608,11 +2624,20 @@  copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	prev->data_len = mbuf_offset;
 	m->pkt_len    += mbuf_offset;
 
-	if (hdr)
-		vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+	if (is_async) {
+		async_iter_finalize(async);
+		if (hdr)
+			pkts_info[slot_idx].nethdr = *hdr;
+	} else {
+		if (hdr)
+			vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+	}
 
 	return 0;
 error:
+	if (is_async)
+		async_iter_cancel(async);
+
 	return -1;
 }
 
@@ -2744,8 +2769,8 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			break;
 		}
 
-		err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
-				mbuf_pool, legacy_ol_flags);
+		err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+				mbuf_pool, legacy_ol_flags, 0, false);
 		if (unlikely(err)) {
 			if (!allocerr_warned) {
 				VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
@@ -2756,6 +2781,7 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			i++;
 			break;
 		}
+
 	}
 
 	if (dropped)
@@ -2937,8 +2963,8 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
-				mbuf_pool, legacy_ol_flags);
+	err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
+				mbuf_pool, legacy_ol_flags, 0, false);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",