diff mbox series

[v3] vhost: fix mbuf allocation failures

Message ID 20200508111751.82341-1-Sivaprasad.Tummala@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers show
Series [v3] vhost: fix mbuf allocation failures | expand

Checks

Context Check Description
ci/iol-testing fail Testing issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation fail Compilation issues
ci/travis-robot warning Travis build: failed
ci/iol-nxp-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/checkpatch success coding style OK

Commit Message

Sivaprasad Tummala May 8, 2020, 11:17 a.m. UTC
vhost buffer allocation is successful for packets that fit
into a linear buffer. If it fails, vhost library is expected
to drop the current packet and skip to the next.

The patch fixes the error scenario by skipping to next packet.
Note: Drop counters are not currently supported.

Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
Cc: fbl@sysclose.org
Cc: stable@dpdk.org

v3:
 * fixed review comments - Flavio Leitner

v2:
 * fixed review comments - Maxime Coquelin
 * fixed mbuf alloc errors for packed virtqueues - Maxime Coquelin
 * fixed mbuf copy errors - Flavio Leitner

Signed-off-by: Sivaprasad Tummala <Sivaprasad.Tummala@intel.com>
---
 lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++--------
 1 file changed, 55 insertions(+), 15 deletions(-)

Comments

Maxime Coquelin May 15, 2020, 7:29 a.m. UTC | #1
On 5/8/20 1:17 PM, Sivaprasad Tummala wrote:
> vhost buffer allocation is successful for packets that fit
> into a linear buffer. If it fails, vhost library is expected
> to drop the current packet and skip to the next.
> 
> The patch fixes the error scenario by skipping to next packet.
> Note: Drop counters are not currently supported.
> 
> Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
> Cc: fbl@sysclose.org
> Cc: stable@dpdk.org
> 
> v3:
>  * fixed review comments - Flavio Leitner
> 
> v2:
>  * fixed review comments - Maxime Coquelin
>  * fixed mbuf alloc errors for packed virtqueues - Maxime Coquelin
>  * fixed mbuf copy errors - Flavio Leitner
> 
> Signed-off-by: Sivaprasad Tummala <Sivaprasad.Tummala@intel.com>
> ---
>  lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++--------
>  1 file changed, 55 insertions(+), 15 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
Maxime Coquelin May 15, 2020, 8:36 a.m. UTC | #2
On 5/8/20 1:17 PM, Sivaprasad Tummala wrote:
> vhost buffer allocation is successful for packets that fit
> into a linear buffer. If it fails, vhost library is expected
> to drop the current packet and skip to the next.
> 
> The patch fixes the error scenario by skipping to next packet.
> Note: Drop counters are not currently supported.
> 
> Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
> Cc: fbl@sysclose.org
> Cc: stable@dpdk.org
> 
> v3:
>  * fixed review comments - Flavio Leitner
> 
> v2:
>  * fixed review comments - Maxime Coquelin
>  * fixed mbuf alloc errors for packed virtqueues - Maxime Coquelin
>  * fixed mbuf copy errors - Flavio Leitner
> 
> Signed-off-by: Sivaprasad Tummala <Sivaprasad.Tummala@intel.com>
> ---
>  lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++--------
>  1 file changed, 55 insertions(+), 15 deletions(-)

Applied to dpdk-next-virtio/master.

Thanks,
Maxime
diff mbox series

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1fc30c681..a85d77897 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1674,6 +1674,8 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
 	uint16_t i;
 	uint16_t free_entries;
+	uint16_t dropped = 0;
+	static bool allocerr_warned;
 
 	if (unlikely(dev->dequeue_zero_copy)) {
 		struct zcopy_mbuf *zmbuf, *next;
@@ -1737,13 +1739,35 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			update_shadow_used_ring_split(vq, head_idx, 0);
 
 		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-		if (unlikely(pkts[i] == NULL))
+		if (unlikely(pkts[i] == NULL)) {
+			/*
+			 * mbuf allocation fails for jumbo packets when external
+			 * buffer allocation is not allowed and linear buffer
+			 * is required. Drop this packet.
+			 */
+			if (!allocerr_warned) {
+				VHOST_LOG_DATA(ERR,
+					"Failed mbuf alloc of size %d from %s on %s.\n",
+					buf_len, mbuf_pool->name, dev->ifname);
+				allocerr_warned = true;
+			}
+			dropped += 1;
+			i++;
 			break;
+		}
 
 		err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
 				mbuf_pool);
 		if (unlikely(err)) {
 			rte_pktmbuf_free(pkts[i]);
+			if (!allocerr_warned) {
+				VHOST_LOG_DATA(ERR,
+					"Failed to copy desc to mbuf on %s.\n",
+					dev->ifname);
+				allocerr_warned = true;
+			}
+			dropped += 1;
+			i++;
 			break;
 		}
 
@@ -1753,6 +1777,8 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			zmbuf = get_zmbuf(vq);
 			if (!zmbuf) {
 				rte_pktmbuf_free(pkts[i]);
+				dropped += 1;
+				i++;
 				break;
 			}
 			zmbuf->mbuf = pkts[i];
@@ -1782,7 +1808,7 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 	}
 
-	return i;
+	return (i - dropped);
 }
 
 static __rte_always_inline int
@@ -1914,6 +1940,7 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 	uint32_t buf_len;
 	uint16_t nr_vec = 0;
 	int err;
+	static bool allocerr_warned;
 
 	if (unlikely(fill_vec_buf_packed(dev, vq,
 					 vq->last_avail_idx, desc_count,
@@ -1924,14 +1951,24 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 
 	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
 	if (unlikely(*pkts == NULL)) {
-		VHOST_LOG_DATA(ERR,
-			"Failed to allocate memory for mbuf.\n");
+		if (!allocerr_warned) {
+			VHOST_LOG_DATA(ERR,
+				"Failed mbuf alloc of size %d from %s on %s.\n",
+				buf_len, mbuf_pool->name, dev->ifname);
+			allocerr_warned = true;
+		}
 		return -1;
 	}
 
 	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
+		if (!allocerr_warned) {
+			VHOST_LOG_DATA(ERR,
+				"Failed to copy desc to mbuf on %s.\n",
+				dev->ifname);
+			allocerr_warned = true;
+		}
 		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
@@ -1946,21 +1983,24 @@  virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct rte_mbuf **pkts)
 {
 
-	uint16_t buf_id, desc_count;
+	uint16_t buf_id, desc_count = 0;
+	int ret;
 
-	if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
-					&desc_count))
-		return -1;
+	ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+					&desc_count);
 
-	if (virtio_net_is_inorder(dev))
-		vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
-							   desc_count);
-	else
-		vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+	if (likely(desc_count > 0)) {
+		if (virtio_net_is_inorder(dev))
+			vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+								   desc_count);
+		else
+			vhost_shadow_dequeue_single_packed(vq, buf_id,
+					desc_count);
 
-	vq_inc_last_avail_packed(vq, desc_count);
+		vq_inc_last_avail_packed(vq, desc_count);
+	}
 
-	return 0;
+	return ret;
 }
 
 static __rte_always_inline int