[2/2] net/virtio: on demand cleanup when doing in order xmit

Message ID 20190827102407.65106-2-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [1/2] net/virtio: update stats when in order xmit done |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Marvin Liu Aug. 27, 2019, 10:24 a.m. UTC
  Check whether freed descriptors are enough before enqueue operation.
If more space is needed, will try to cleanup used ring on demand. It
can give more chances to cleanup used ring, thus help RFC2544 perf.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
 drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
 1 file changed, 54 insertions(+), 19 deletions(-)
  

Comments

Tiwei Bie Sept. 10, 2019, 6:16 a.m. UTC | #1
On Tue, Aug 27, 2019 at 06:24:07PM +0800, Marvin Liu wrote:
> Check whether freed descriptors are enough before enqueue operation.
> If more space is needed, will try to cleanup used ring on demand. It
> can give more chances to cleanup used ring, thus help RFC2544 perf.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
>  drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
>  1 file changed, 54 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 5d4ed524e..550b0aa62 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
>  }
>  
>  /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
>  virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
>  {
>  	uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  	return nb_tx;
>  }
>  
> +static __rte_always_inline int
> +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> +{
> +	uint16_t nb_used;
> +	struct virtio_hw *hw = vq->hw;
> +
> +	nb_used = VIRTQUEUE_NUSED(vq);
> +	virtio_rmb(hw->weak_barriers);
> +	need = RTE_MIN(need, (int)nb_used);
> +
> +	virtio_xmit_cleanup_inorder(vq, need);
> +
> +	return (need - vq->vq_free_cnt);

It's possible that the `need` has been changed by

	need = RTE_MIN(need, (int)nb_used);

So it can't reflect the actual needs.

Besides, you are passing (nb_inorder_pkts - vq->vq_free_cnt)
as the `need`, here you can't subtract vq->vq_free_cnt to see
whether the needs have been met.

> +}
> +
>  uint16_t
>  virtio_xmit_pkts_inorder(void *tx_queue,
>  			struct rte_mbuf **tx_pkts,
> @@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  	struct virtqueue *vq = txvq->vq;
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
> -	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> +	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
>  	struct rte_mbuf *inorder_pkts[nb_pkts];
> +	int need, nb_left;
>  
>  	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>  		return nb_tx;
> @@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  	nb_used = VIRTQUEUE_NUSED(vq);
>  
>  	virtio_rmb(hw->weak_barriers);
> -	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> -		virtio_xmit_cleanup_inorder(vq, nb_used);
> -
> -	if (unlikely(!vq->vq_free_cnt))
> +	if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
>  		virtio_xmit_cleanup_inorder(vq, nb_used);
>  
> -	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> -
> -	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> +	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
>  		struct rte_mbuf *txm = tx_pkts[nb_tx];
> -		int slots, need;
> +		int slots;
>  
>  		/* optimize ring usage */
>  		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> @@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  		}
>  
>  		if (nb_inorder_pkts) {
> +			need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> +

There is no need to add blank lines here.

> +			if (unlikely(need > 0)) {
> +				nb_left = virtio_xmit_try_cleanup_inorder(vq,
> +									need);
> +
> +				if (unlikely(nb_left > 0)) {
> +					PMD_TX_LOG(ERR,
> +						"No free tx descriptors to "
> +						"transmit");
> +					nb_inorder_pkts = vq->vq_free_cnt;

You need to handle nb_tx as well.

> +					break;
> +				}
> +			}
> +
>  			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
>  							nb_inorder_pkts);
>  			nb_inorder_pkts = 0;
> @@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  		slots = txm->nb_segs + 1;
>  		need = slots - vq->vq_free_cnt;
>  		if (unlikely(need > 0)) {
> -			nb_used = VIRTQUEUE_NUSED(vq);
> -			virtio_rmb(hw->weak_barriers);
> -			need = RTE_MIN(need, (int)nb_used);
> +			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
>  
> -			virtio_xmit_cleanup_inorder(vq, need);
> -
> -			need = slots - vq->vq_free_cnt;
> -
> -			if (unlikely(need > 0)) {
> +			if (unlikely(nb_left > 0)) {
>  				PMD_TX_LOG(ERR,
>  					"No free tx descriptors to transmit");
>  				break;
> @@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  	}
>  
>  	/* Transmit all inorder packets */
> -	if (nb_inorder_pkts)
> +	if (nb_inorder_pkts) {
> +		need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> +		if (unlikely(need > 0)) {
> +			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> +
> +			if (unlikely(nb_left > 0)) {
> +				PMD_TX_LOG(ERR,
> +					"No free tx descriptors to transmit");
> +				nb_inorder_pkts = vq->vq_free_cnt;
> +				nb_tx -= nb_left;
> +			}
> +		}
> +
>  		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
>  						nb_inorder_pkts);
> +	}
>  
>  	txvq->stats.packets += nb_tx;
>  
> -- 
> 2.17.1
>
  
Marvin Liu Sept. 10, 2019, 7:44 a.m. UTC | #2
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Tuesday, September 10, 2019 2:17 PM
> To: Liu, Yong <yong.liu@intel.com>
> Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Wang, Zhihong
> <zhihong.wang@intel.com>
> Subject: Re: [PATCH 2/2] net/virtio: on demand cleanup when doing in order
> xmit
> 
> On Tue, Aug 27, 2019 at 06:24:07PM +0800, Marvin Liu wrote:
> > Check whether freed descriptors are enough before enqueue operation.
> > If more space is needed, will try to cleanup used ring on demand. It
> > can give more chances to cleanup used ring, thus help RFC2544 perf.
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > ---
> >  drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> >  1 file changed, 54 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> > index 5d4ed524e..550b0aa62 100644
> > --- a/drivers/net/virtio/virtio_rxtx.c
> > +++ b/drivers/net/virtio/virtio_rxtx.c
> > @@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> num)
> >  }
> >
> >  /* Cleanup from completed inorder transmits. */
> > -static void
> > +static __rte_always_inline void
> >  virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> >  {
> >  	uint16_t i, idx = vq->vq_used_cons_idx;
> > @@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> >  	return nb_tx;
> >  }
> >
> > +static __rte_always_inline int
> > +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> > +{
> > +	uint16_t nb_used;
> > +	struct virtio_hw *hw = vq->hw;
> > +
> > +	nb_used = VIRTQUEUE_NUSED(vq);
> > +	virtio_rmb(hw->weak_barriers);
> > +	need = RTE_MIN(need, (int)nb_used);
> > +
> > +	virtio_xmit_cleanup_inorder(vq, need);
> > +
> > +	return (need - vq->vq_free_cnt);
> 
> It's possible that the `need` has been changed by
> 
> 	need = RTE_MIN(need, (int)nb_used);
> 
> So it can't reflect the actual needs.
> 
> Besides, you are passing (nb_inorder_pkts - vq->vq_free_cnt)
> as the `need`, here you can't subtract vq->vq_free_cnt to see
> whether the needs have been met.
> 

Tiwei,
Thanks for reminder, this calculation can't reflect the number of left packets. I will fix it in v2.

Regards,
Marvin

> > +}
> > +
> >  uint16_t
> >  virtio_xmit_pkts_inorder(void *tx_queue,
> >  			struct rte_mbuf **tx_pkts,
> > @@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> >  	struct virtqueue *vq = txvq->vq;
> >  	struct virtio_hw *hw = vq->hw;
> >  	uint16_t hdr_size = hw->vtnet_hdr_size;
> > -	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> > +	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> >  	struct rte_mbuf *inorder_pkts[nb_pkts];
> > +	int need, nb_left;
> >
> >  	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> >  		return nb_tx;
> > @@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> >  	nb_used = VIRTQUEUE_NUSED(vq);
> >
> >  	virtio_rmb(hw->weak_barriers);
> > -	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> > -		virtio_xmit_cleanup_inorder(vq, nb_used);
> > -
> > -	if (unlikely(!vq->vq_free_cnt))
> > +	if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> >  		virtio_xmit_cleanup_inorder(vq, nb_used);
> >
> > -	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> > -
> > -	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> > +	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> >  		struct rte_mbuf *txm = tx_pkts[nb_tx];
> > -		int slots, need;
> > +		int slots;
> >
> >  		/* optimize ring usage */
> >  		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> > @@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> >  		}
> >
> >  		if (nb_inorder_pkts) {
> > +			need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > +
> 
> There is no need to add blank lines here.
> 
> > +			if (unlikely(need > 0)) {
> > +				nb_left = virtio_xmit_try_cleanup_inorder(vq,
> > +									need);
> > +
> > +				if (unlikely(nb_left > 0)) {
> > +					PMD_TX_LOG(ERR,
> > +						"No free tx descriptors to "
> > +						"transmit");
> > +					nb_inorder_pkts = vq->vq_free_cnt;
> 
> You need to handle nb_tx as well.
> 
> > +					break;
> > +				}
> > +			}
> > +
> >  			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> >  							nb_inorder_pkts);
> >  			nb_inorder_pkts = 0;
> > @@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> >  		slots = txm->nb_segs + 1;
> >  		need = slots - vq->vq_free_cnt;
> >  		if (unlikely(need > 0)) {
> > -			nb_used = VIRTQUEUE_NUSED(vq);
> > -			virtio_rmb(hw->weak_barriers);
> > -			need = RTE_MIN(need, (int)nb_used);
> > +			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> >
> > -			virtio_xmit_cleanup_inorder(vq, need);
> > -
> > -			need = slots - vq->vq_free_cnt;
> > -
> > -			if (unlikely(need > 0)) {
> > +			if (unlikely(nb_left > 0)) {
> >  				PMD_TX_LOG(ERR,
> >  					"No free tx descriptors to transmit");
> >  				break;
> > @@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> >  	}
> >
> >  	/* Transmit all inorder packets */
> > -	if (nb_inorder_pkts)
> > +	if (nb_inorder_pkts) {
> > +		need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > +		if (unlikely(need > 0)) {
> > +			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> > +
> > +			if (unlikely(nb_left > 0)) {
> > +				PMD_TX_LOG(ERR,
> > +					"No free tx descriptors to transmit");
> > +				nb_inorder_pkts = vq->vq_free_cnt;
> > +				nb_tx -= nb_left;
> > +			}
> > +		}
> > +
> >  		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> >  						nb_inorder_pkts);
> > +	}
> >
> >  	txvq->stats.packets += nb_tx;
> >
> > --
> > 2.17.1
> >
  

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 5d4ed524e..550b0aa62 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -317,7 +317,7 @@  virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
 }
 
 /* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
 {
 	uint16_t i, idx = vq->vq_used_cons_idx;
@@ -2152,6 +2152,21 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	return nb_tx;
 }
 
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+	uint16_t nb_used;
+	struct virtio_hw *hw = vq->hw;
+
+	nb_used = VIRTQUEUE_NUSED(vq);
+	virtio_rmb(hw->weak_barriers);
+	need = RTE_MIN(need, (int)nb_used);
+
+	virtio_xmit_cleanup_inorder(vq, need);
+
+	return (need - vq->vq_free_cnt);
+}
+
 uint16_t
 virtio_xmit_pkts_inorder(void *tx_queue,
 			struct rte_mbuf **tx_pkts,
@@ -2161,8 +2176,9 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 	struct virtqueue *vq = txvq->vq;
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
 	struct rte_mbuf *inorder_pkts[nb_pkts];
+	int need, nb_left;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2175,17 +2191,12 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 	nb_used = VIRTQUEUE_NUSED(vq);
 
 	virtio_rmb(hw->weak_barriers);
-	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
-		virtio_xmit_cleanup_inorder(vq, nb_used);
-
-	if (unlikely(!vq->vq_free_cnt))
+	if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
 		virtio_xmit_cleanup_inorder(vq, nb_used);
 
-	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
-	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
-		int slots, need;
+		int slots;
 
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2203,6 +2214,22 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 		}
 
 		if (nb_inorder_pkts) {
+			need = nb_inorder_pkts - vq->vq_free_cnt;
+
+
+			if (unlikely(need > 0)) {
+				nb_left = virtio_xmit_try_cleanup_inorder(vq,
+									need);
+
+				if (unlikely(nb_left > 0)) {
+					PMD_TX_LOG(ERR,
+						"No free tx descriptors to "
+						"transmit");
+					nb_inorder_pkts = vq->vq_free_cnt;
+					break;
+				}
+			}
+
 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 							nb_inorder_pkts);
 			nb_inorder_pkts = 0;
@@ -2211,15 +2238,9 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 		slots = txm->nb_segs + 1;
 		need = slots - vq->vq_free_cnt;
 		if (unlikely(need > 0)) {
-			nb_used = VIRTQUEUE_NUSED(vq);
-			virtio_rmb(hw->weak_barriers);
-			need = RTE_MIN(need, (int)nb_used);
+			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
 
-			virtio_xmit_cleanup_inorder(vq, need);
-
-			need = slots - vq->vq_free_cnt;
-
-			if (unlikely(need > 0)) {
+			if (unlikely(nb_left > 0)) {
 				PMD_TX_LOG(ERR,
 					"No free tx descriptors to transmit");
 				break;
@@ -2232,9 +2253,23 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 	}
 
 	/* Transmit all inorder packets */
-	if (nb_inorder_pkts)
+	if (nb_inorder_pkts) {
+		need = nb_inorder_pkts - vq->vq_free_cnt;
+
+		if (unlikely(need > 0)) {
+			nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
+
+			if (unlikely(nb_left > 0)) {
+				PMD_TX_LOG(ERR,
+					"No free tx descriptors to transmit");
+				nb_inorder_pkts = vq->vq_free_cnt;
+				nb_tx -= nb_left;
+			}
+		}
+
 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 						nb_inorder_pkts);
+	}
 
 	txvq->stats.packets += nb_tx;