diff mbox series

[v4] vhost: allocate and free packets in bulk

Message ID f2eccbfa8a1f7aaa00f2da69ea9cb9a959f28e4f.1618566506.git.bnemeth@redhat.com (mailing list archive)
State Superseded
Delegated to: Maxime Coquelin
Headers show
Series [v4] vhost: allocate and free packets in bulk | expand

Checks

Context Check Description
ci/Intel-compilation fail Compilation issues
ci/checkpatch success coding style OK

Commit Message

Balazs Nemeth April 16, 2021, 9:48 a.m. UTC
Move allocation out further and perform all allocation in bulk. The same
goes for freeing packets. In the process, also introduce
virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 80 +++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 36 deletions(-)
diff mbox series

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff39878609..95d2454de0 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2134,6 +2134,24 @@  virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
 	return 0;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+
+	return -1;
+}
+
 /*
  * Allocate a host supported pktmbuf.
  */
@@ -2149,23 +2167,15 @@  virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 		return NULL;
 	}
 
-	if (rte_pktmbuf_tailroom(pkt) >= data_len)
-		return pkt;
+	if (virtio_dev_pktmbuf_prep(dev, pkt, data_len)) {
+		/* Data doesn't fit into the buffer and the host supports
+		 * only linear buffers
+		 */
+		rte_pktmbuf_free(pkt);
+		return NULL;
+	}
 
-	/* attach an external buffer if supported */
-	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
-		return pkt;
-
-	/* check if chained buffers are allowed */
-	if (!dev->linearbuf)
-		return pkt;
-
-	/* Data doesn't fit into the buffer and the host supports
-	 * only linear buffers
-	 */
-	rte_pktmbuf_free(pkt);
-
-	return NULL;
+	return pkt
 }
 
 static __rte_noinline uint16_t
@@ -2261,7 +2271,6 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2306,9 +2315,8 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
-			goto free_buf;
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2316,7 +2324,7 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
-			goto free_buf;
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
@@ -2327,17 +2335,13 @@  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	return 0;
 
-free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
+err:
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2347,8 +2351,8 @@  virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2381,7 +2385,7 @@  static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2398,8 +2402,7 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2409,7 +2412,7 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2418,7 +2421,6 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2429,7 +2431,7 @@  static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2462,11 +2464,14 @@  virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
 
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
+		return 0;
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
@@ -2475,13 +2480,16 @@  virtio_dev_tx_packed(struct virtio_net *dev,
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						pkts[pkt_idx]))
 			break;
 		pkt_idx++;
 		remained--;
 
 	} while (remained);
 
+	if (pkt_idx != count)
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);