[v2,1/2] vhost: fix descs count in async vhost packed ring

Message ID 20221011030803.16746-2-cheng1.jiang@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: fix some async vhost index calculation issues |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jiang, Cheng1 Oct. 11, 2022, 3:08 a.m. UTC
  When vhost receive packets from the front-end using packed virtqueue, it
might use multiple descriptors for one packet, so we need calculate and
record the descriptor number for each packet to update available
descriptor counter and used descriptor counter, and rollback when DMA
ring is full.

Fixes: fe8477ebbd94 ("vhost: support async packed ring dequeue")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 lib/vhost/virtio_net.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)
  

Comments

Maxime Coquelin Oct. 21, 2022, 8:16 a.m. UTC | #1
On 10/11/22 05:08, Cheng Jiang wrote:
> When vhost receive packets from the front-end using packed virtqueue, it

receives

> might use multiple descriptors for one packet, so we need calculate and

to calculate

> record the descriptor number for each packet to update available
> descriptor counter and used descriptor counter, and rollback when DMA
> ring is full.
> 
> Fixes: fe8477ebbd94 ("vhost: support async packed ring dequeue")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
>   lib/vhost/virtio_net.c | 24 +++++++++++++++---------
>   1 file changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8f4d0f0502..457ac2e92a 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -3548,14 +3548,15 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
>   }
>   
>   static __rte_always_inline void
> -vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t buf_id)
> +vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
> +				uint16_t buf_id, uint16_t count)
>   {
>   	struct vhost_async *async = vq->async;
>   	uint16_t idx = async->buffer_idx_packed;
>   
>   	async->buffers_packed[idx].id = buf_id;
>   	async->buffers_packed[idx].len = 0;
> -	async->buffers_packed[idx].count = 1;
> +	async->buffers_packed[idx].count = count;
>   
>   	async->buffer_idx_packed++;
>   	if (async->buffer_idx_packed >= vq->size)
> @@ -3576,6 +3577,8 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
>   	uint16_t nr_vec = 0;
>   	uint32_t buf_len;
>   	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> +	struct vhost_async *async = vq->async;
> +	struct async_inflight_info *pkts_info = async->pkts_info;
>   	static bool allocerr_warned;
>   
>   	if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
> @@ -3604,8 +3607,12 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
>   		return -1;
>   	}
>   
> +	pkts_info[slot_idx].descs = desc_count;
> +
>   	/* update async shadow packed ring */
> -	vhost_async_shadow_dequeue_single_packed(vq, buf_id);
> +	vhost_async_shadow_dequeue_single_packed(vq, buf_id, desc_count);
> +
> +	vq_inc_last_avail_packed(vq, desc_count);
>   
>   	return err;
>   }
> @@ -3644,9 +3651,6 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		}
>   
>   		pkts_info[slot_idx].mbuf = pkt;
> -
> -		vq_inc_last_avail_packed(vq, 1);
> -
>   	}
>   
>   	n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
> @@ -3657,6 +3661,8 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   	pkt_err = pkt_idx - n_xfer;
>   
>   	if (unlikely(pkt_err)) {
> +		uint16_t descs_err = 0;
> +
>   		pkt_idx -= pkt_err;
>   
>   		/**
> @@ -3673,10 +3679,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		}
>   
>   		/* recover available ring */
> -		if (vq->last_avail_idx >= pkt_err) {
> -			vq->last_avail_idx -= pkt_err;
> +		if (vq->last_avail_idx >= descs_err) {
> +			vq->last_avail_idx -= descs_err;
>   		} else {
> -			vq->last_avail_idx += vq->size - pkt_err;
> +			vq->last_avail_idx += vq->size - descs_err;
>   			vq->avail_wrap_counter ^= 1;
>   		}
>   	}

If only the commit message typos need to be fixed, maybe no need to send
a new version.

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  
Jiang, Cheng1 Oct. 24, 2022, 1:41 a.m. UTC | #2
Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, October 21, 2022 4:16 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>; Wang,
> YuanX <yuanx.wang@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; He, Xingguang <xingguang.he@intel.com>;
> stable@dpdk.org
> Subject: Re: [PATCH v2 1/2] vhost: fix descs count in async vhost packed ring
> 
> 
> 
> On 10/11/22 05:08, Cheng Jiang wrote:
> > When vhost receive packets from the front-end using packed virtqueue,
> > it
> 
> receives
> 
> > might use multiple descriptors for one packet, so we need calculate
> > and
> 
> to calculate
> 
> > record the descriptor number for each packet to update available
> > descriptor counter and used descriptor counter, and rollback when DMA
> > ring is full.
> >
> > Fixes: fe8477ebbd94 ("vhost: support async packed ring dequeue")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> >   lib/vhost/virtio_net.c | 24 +++++++++++++++---------
> >   1 file changed, 15 insertions(+), 9 deletions(-)
> >
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > 8f4d0f0502..457ac2e92a 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -3548,14 +3548,15 @@ virtio_dev_tx_async_split_compliant(struct
> virtio_net *dev,
> >   }
> >
> >   static __rte_always_inline void
> > -vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue
> *vq,
> > uint16_t buf_id)
> > +vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue
> *vq,
> > +				uint16_t buf_id, uint16_t count)
> >   {
> >   	struct vhost_async *async = vq->async;
> >   	uint16_t idx = async->buffer_idx_packed;
> >
> >   	async->buffers_packed[idx].id = buf_id;
> >   	async->buffers_packed[idx].len = 0;
> > -	async->buffers_packed[idx].count = 1;
> > +	async->buffers_packed[idx].count = count;
> >
> >   	async->buffer_idx_packed++;
> >   	if (async->buffer_idx_packed >= vq->size) @@ -3576,6 +3577,8 @@
> > virtio_dev_tx_async_single_packed(struct virtio_net *dev,
> >   	uint16_t nr_vec = 0;
> >   	uint32_t buf_len;
> >   	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> > +	struct vhost_async *async = vq->async;
> > +	struct async_inflight_info *pkts_info = async->pkts_info;
> >   	static bool allocerr_warned;
> >
> >   	if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx,
> > &desc_count, @@ -3604,8 +3607,12 @@
> virtio_dev_tx_async_single_packed(struct virtio_net *dev,
> >   		return -1;
> >   	}
> >
> > +	pkts_info[slot_idx].descs = desc_count;
> > +
> >   	/* update async shadow packed ring */
> > -	vhost_async_shadow_dequeue_single_packed(vq, buf_id);
> > +	vhost_async_shadow_dequeue_single_packed(vq, buf_id,
> desc_count);
> > +
> > +	vq_inc_last_avail_packed(vq, desc_count);
> >
> >   	return err;
> >   }
> > @@ -3644,9 +3651,6 @@ virtio_dev_tx_async_packed(struct virtio_net
> *dev, struct vhost_virtqueue *vq,
> >   		}
> >
> >   		pkts_info[slot_idx].mbuf = pkt;
> > -
> > -		vq_inc_last_avail_packed(vq, 1);
> > -
> >   	}
> >
> >   	n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id,
> > async->pkts_idx, @@ -3657,6 +3661,8 @@
> virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> >   	pkt_err = pkt_idx - n_xfer;
> >
> >   	if (unlikely(pkt_err)) {
> > +		uint16_t descs_err = 0;
> > +
> >   		pkt_idx -= pkt_err;
> >
> >   		/**
> > @@ -3673,10 +3679,10 @@ virtio_dev_tx_async_packed(struct virtio_net
> *dev, struct vhost_virtqueue *vq,
> >   		}
> >
> >   		/* recover available ring */
> > -		if (vq->last_avail_idx >= pkt_err) {
> > -			vq->last_avail_idx -= pkt_err;
> > +		if (vq->last_avail_idx >= descs_err) {
> > +			vq->last_avail_idx -= descs_err;
> >   		} else {
> > -			vq->last_avail_idx += vq->size - pkt_err;
> > +			vq->last_avail_idx += vq->size - descs_err;
> >   			vq->avail_wrap_counter ^= 1;
> >   		}
> >   	}
> 
> If only the commit message typos need to be fixed, maybe no need to send
> a new version.

Sure, thanks a lot!
Cheng

> 
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> 
> Thanks,
> Maxime
  
Chenbo Xia Oct. 24, 2022, 8:42 a.m. UTC | #3
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Monday, October 24, 2022 9:42 AM
> To: Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>; Wang, YuanX
> <yuanx.wang@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>; He,
> Xingguang <xingguang.he@intel.com>; stable@dpdk.org
> Subject: RE: [PATCH v2 1/2] vhost: fix descs count in async vhost packed
> ring
> 
> Hi Maxime,
> 
> > -----Original Message-----
> > From: Maxime Coquelin <maxime.coquelin@redhat.com>
> > Sent: Friday, October 21, 2022 4:16 PM
> > To: Jiang, Cheng1 <cheng1.jiang@intel.com>; Xia, Chenbo
> > <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Ding, Xuan
> > <xuan.ding@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>; Wang,
> > YuanX <yuanx.wang@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; He, Xingguang <xingguang.he@intel.com>;
> > stable@dpdk.org
> > Subject: Re: [PATCH v2 1/2] vhost: fix descs count in async vhost packed
> ring
> >
> >
> >
> > On 10/11/22 05:08, Cheng Jiang wrote:
> > > When vhost receive packets from the front-end using packed virtqueue,
> > > it
> >
> > receives
> >
> > > might use multiple descriptors for one packet, so we need calculate
> > > and
> >
> > to calculate
> >
> > > record the descriptor number for each packet to update available
> > > descriptor counter and used descriptor counter, and rollback when DMA
> > > ring is full.
> > >
> > > Fixes: fe8477ebbd94 ("vhost: support async packed ring dequeue")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > > ---
> > >   lib/vhost/virtio_net.c | 24 +++++++++++++++---------
> > >   1 file changed, 15 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > > 8f4d0f0502..457ac2e92a 100644
> > > --- a/lib/vhost/virtio_net.c
> > > +++ b/lib/vhost/virtio_net.c
> > > @@ -3548,14 +3548,15 @@ virtio_dev_tx_async_split_compliant(struct
> > virtio_net *dev,
> > >   }
> > >
> > >   static __rte_always_inline void
> > > -vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue
> > *vq,
> > > uint16_t buf_id)
> > > +vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue
> > *vq,
> > > +				uint16_t buf_id, uint16_t count)
> > >   {
> > >   	struct vhost_async *async = vq->async;
> > >   	uint16_t idx = async->buffer_idx_packed;
> > >
> > >   	async->buffers_packed[idx].id = buf_id;
> > >   	async->buffers_packed[idx].len = 0;
> > > -	async->buffers_packed[idx].count = 1;
> > > +	async->buffers_packed[idx].count = count;
> > >
> > >   	async->buffer_idx_packed++;
> > >   	if (async->buffer_idx_packed >= vq->size) @@ -3576,6 +3577,8
> @@
> > > virtio_dev_tx_async_single_packed(struct virtio_net *dev,
> > >   	uint16_t nr_vec = 0;
> > >   	uint32_t buf_len;
> > >   	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> > > +	struct vhost_async *async = vq->async;
> > > +	struct async_inflight_info *pkts_info = async->pkts_info;
> > >   	static bool allocerr_warned;
> > >
> > >   	if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx,
> > > &desc_count, @@ -3604,8 +3607,12 @@
> > virtio_dev_tx_async_single_packed(struct virtio_net *dev,
> > >   		return -1;
> > >   	}
> > >
> > > +	pkts_info[slot_idx].descs = desc_count;
> > > +
> > >   	/* update async shadow packed ring */
> > > -	vhost_async_shadow_dequeue_single_packed(vq, buf_id);
> > > +	vhost_async_shadow_dequeue_single_packed(vq, buf_id,
> > desc_count);
> > > +
> > > +	vq_inc_last_avail_packed(vq, desc_count);
> > >
> > >   	return err;
> > >   }
> > > @@ -3644,9 +3651,6 @@ virtio_dev_tx_async_packed(struct virtio_net
> > *dev, struct vhost_virtqueue *vq,
> > >   		}
> > >
> > >   		pkts_info[slot_idx].mbuf = pkt;
> > > -
> > > -		vq_inc_last_avail_packed(vq, 1);
> > > -
> > >   	}
> > >
> > >   	n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id,
> > > async->pkts_idx, @@ -3657,6 +3661,8 @@
> > virtio_dev_tx_async_packed(struct virtio_net *dev, struct
> vhost_virtqueue
> > *vq,
> > >   	pkt_err = pkt_idx - n_xfer;
> > >
> > >   	if (unlikely(pkt_err)) {
> > > +		uint16_t descs_err = 0;
> > > +
> > >   		pkt_idx -= pkt_err;
> > >
> > >   		/**
> > > @@ -3673,10 +3679,10 @@ virtio_dev_tx_async_packed(struct virtio_net
> > *dev, struct vhost_virtqueue *vq,
> > >   		}
> > >
> > >   		/* recover available ring */
> > > -		if (vq->last_avail_idx >= pkt_err) {
> > > -			vq->last_avail_idx -= pkt_err;
> > > +		if (vq->last_avail_idx >= descs_err) {
> > > +			vq->last_avail_idx -= descs_err;
> > >   		} else {
> > > -			vq->last_avail_idx += vq->size - pkt_err;
> > > +			vq->last_avail_idx += vq->size - descs_err;
> > >   			vq->avail_wrap_counter ^= 1;
> > >   		}
> > >   	}
> >
> > If only the commit message typos need to be fixed, maybe no need to send
> > a new version.

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

Will fix above typos when applying

Thanks,
Chenbo

> 
> Sure, thanks a lot!
> Cheng
> 
> >
> > Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> >
> > Thanks,
> > Maxime
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8f4d0f0502..457ac2e92a 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3548,14 +3548,15 @@  virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
 }
 
 static __rte_always_inline void
-vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t buf_id)
+vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
+				uint16_t buf_id, uint16_t count)
 {
 	struct vhost_async *async = vq->async;
 	uint16_t idx = async->buffer_idx_packed;
 
 	async->buffers_packed[idx].id = buf_id;
 	async->buffers_packed[idx].len = 0;
-	async->buffers_packed[idx].count = 1;
+	async->buffers_packed[idx].count = count;
 
 	async->buffer_idx_packed++;
 	if (async->buffer_idx_packed >= vq->size)
@@ -3576,6 +3577,8 @@  virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 	uint16_t nr_vec = 0;
 	uint32_t buf_len;
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	struct vhost_async *async = vq->async;
+	struct async_inflight_info *pkts_info = async->pkts_info;
 	static bool allocerr_warned;
 
 	if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
@@ -3604,8 +3607,12 @@  virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
+	pkts_info[slot_idx].descs = desc_count;
+
 	/* update async shadow packed ring */
-	vhost_async_shadow_dequeue_single_packed(vq, buf_id);
+	vhost_async_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+
+	vq_inc_last_avail_packed(vq, desc_count);
 
 	return err;
 }
@@ -3644,9 +3651,6 @@  virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		pkts_info[slot_idx].mbuf = pkt;
-
-		vq_inc_last_avail_packed(vq, 1);
-
 	}
 
 	n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
@@ -3657,6 +3661,8 @@  virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	pkt_err = pkt_idx - n_xfer;
 
 	if (unlikely(pkt_err)) {
+		uint16_t descs_err = 0;
+
 		pkt_idx -= pkt_err;
 
 		/**
@@ -3673,10 +3679,10 @@  virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		/* recover available ring */
-		if (vq->last_avail_idx >= pkt_err) {
-			vq->last_avail_idx -= pkt_err;
+		if (vq->last_avail_idx >= descs_err) {
+			vq->last_avail_idx -= descs_err;
 		} else {
-			vq->last_avail_idx += vq->size - pkt_err;
+			vq->last_avail_idx += vq->size - descs_err;
 			vq->avail_wrap_counter ^= 1;
 		}
 	}