diff mbox series

[v2,1/4] vhost: move allocation of mbuf outside of packet enqueue

Message ID f7b08832f86d2fb9a8834b2a19b13f26d3c6857b.1617790501.git.bnemeth@redhat.com (mailing list archive)
State Superseded
Delegated to: Maxime Coquelin
Headers show
Series [v2,1/4] vhost: move allocation of mbuf outside of packet enqueue | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Balazs Nemeth April 7, 2021, 10:17 a.m. UTC
In preparation for subsequent patches, move mbuf allocation out and
rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
 1 file changed, 40 insertions(+), 14 deletions(-)

Comments

Maxime Coquelin April 15, 2021, 12:37 p.m. UTC | #1
Hi Balazs,

Hint for future revisions, please add a cover letter when multiple
patches, it makes the series handling for the maintainer easier.
Also, please use the MAINTAINERS file to add the other maintainers.

On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> In preparation for subsequent patches, move mbuf allocation out and
> rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> function now receives an already allocated mbuf pointer.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
>  1 file changed, 40 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 7f621fb6d..666e7fdb8 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
>  	return NULL;
>  }
>  
> +static __rte_always_inline int
> +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
> +			 uint32_t data_len)
> +{
> +	if (rte_pktmbuf_tailroom(pkt) >= data_len)
> +		return 0;
> +
> +	/* attach an external buffer if supported */
> +	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
> +		return 0;
> +
> +	/* check if chained buffers are allowed */
> +	if (!dev->linearbuf)
> +		return 0;

Add new line here.

> +	return 1;

Maybe return a negative value for consistency.

> +}
> +
>  static __rte_noinline uint16_t
>  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
> @@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  static __rte_always_inline int
>  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  				 struct vhost_virtqueue *vq,
> -				 struct rte_mempool *mbuf_pool,
>  				 struct rte_mbuf **pkts,
>  				 uint16_t avail_idx,
>  				 uintptr_t *desc_addrs,
> @@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  	}
>  
>  	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> -		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
> -		if (!pkts[i])
> +		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
>  			goto free_buf;
>  	}
>  
> @@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  	return 0;
>  
>  free_buf:

It is no more freeing here, so better to rename the label.

> -	for (i = 0; i < PACKED_BATCH_SIZE; i++)
> -		rte_pktmbuf_free(pkts[i]);
> -
>  	return -1;
>  }
>  
>  static __rte_always_inline int
>  virtio_dev_tx_batch_packed(struct virtio_net *dev,
>  			   struct vhost_virtqueue *vq,
> -			   struct rte_mempool *mbuf_pool,
>  			   struct rte_mbuf **pkts)
>  {
>  	uint16_t avail_idx = vq->last_avail_idx;
> @@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
>  	uint16_t ids[PACKED_BATCH_SIZE];
>  	uint16_t i;
>  
> -	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
> -					     avail_idx, desc_addrs, ids))
> +	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
> +					     desc_addrs, ids))
>  		return -1;
>  
>  	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
> @@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
>  					 VHOST_ACCESS_RO) < 0))
>  		return -1;
>  
> -	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
> -	if (unlikely(*pkts == NULL)) {
> +
> +	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
>  		if (!allocerr_warned) {
>  			VHOST_LOG_DATA(ERR,
>  				"Failed mbuf alloc of size %d from %s on %s.\n",
> @@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
>  				dev->ifname);
>  			allocerr_warned = true;
>  		}
> -		rte_pktmbuf_free(*pkts);
>  		return -1;
>  	}
>  
> @@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>  {
>  	uint32_t pkt_idx = 0;
>  	uint32_t remained = count;
> +	uint16_t i;
>  
>  	do {
>  		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>  
>  		if (remained >= PACKED_BATCH_SIZE) {
> -			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
> +			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +				pkts[pkt_idx + i] =
> +					rte_pktmbuf_alloc(mbuf_pool);

No check on whether the alloc succeeded?

Also, we recently move to up to 100 chars lines, so maybe it can fit in
a single one here now.

> +			}
> +
> +			if (!virtio_dev_tx_batch_packed(dev, vq,
>  							&pkts[pkt_idx])) {

Ditto

>  				pkt_idx += PACKED_BATCH_SIZE;
>  				remained -= PACKED_BATCH_SIZE;
> +
>  				continue;
> +			} else {
> +				vhost_for_each_try_unroll(i, 0,
> +					PACKED_BATCH_SIZE) {

Same here

> +					rte_pktmbuf_free(pkts[pkt_idx + i]);
> +				}
>  			}
>  		}
>  
> +		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);

Here also you may want to ensure the allocation succeeded.

> +
>  		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
> -						&pkts[pkt_idx]))
> +						&pkts[pkt_idx])) {
> +			rte_pktmbuf_free(pkts[pkt_idx]);
>  			break;
> +		}
>  		pkt_idx++;
>  		remained--;
>  
> 

Maxime
diff mbox series

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7f621fb6d..666e7fdb8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2166,6 +2166,23 @@  virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 	return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+	return 1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2259,7 +2276,6 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2304,8 +2320,7 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
 			goto free_buf;
 	}
 
@@ -2326,16 +2341,12 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	return 0;
 
 free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2345,8 +2356,8 @@  virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2396,8 +2407,8 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2416,7 +2427,6 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2459,22 +2469,38 @@  virtio_dev_tx_packed(struct virtio_net *dev,
 {
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
+	uint16_t i;
 
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+				pkts[pkt_idx + i] =
+					rte_pktmbuf_alloc(mbuf_pool);
+			}
+
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
+
 				continue;
+			} else {
+				vhost_for_each_try_unroll(i, 0,
+					PACKED_BATCH_SIZE) {
+					rte_pktmbuf_free(pkts[pkt_idx + i]);
+				}
 			}
 		}
 
+		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
+
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						&pkts[pkt_idx])) {
+			rte_pktmbuf_free(pkts[pkt_idx]);
 			break;
+		}
 		pkt_idx++;
 		remained--;