net/virtio: fix refill order in packed ring datapath

Message ID 20210708095801.23973-1-cheng1.jiang@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series net/virtio: fix refill order in packed ring datapath |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-testing fail Testing issues
ci/intel-Testing success Testing PASS
ci/iol-abi-testing success Testing PASS

Commit Message

Jiang, Cheng1 July 8, 2021, 9:58 a.m. UTC
  The front-end should refill the descriptor with the mbuf indicated by
the buff_id rather then the index of used descriptor. Back-end may
return buffers out of order if async copy mode is enabled.

When initializing rxq, refill the descriptors in order as buff_id is
not available at that time.

Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Signed-off-by: Marvin Liu <yong.liu@intel.com>

--
2.17.1
  

Comments

Maxime Coquelin July 19, 2021, 8:30 a.m. UTC | #1
On 7/8/21 11:58 AM, Cheng Jiang wrote:
> The front-end should refill the descriptor with the mbuf indicated by
> the buff_id rather then the index of used descriptor. Back-end may
> return buffers out of order if async copy mode is enabled.
> 
> When initializing rxq, refill the descriptors in order as buff_id is
> not available at that time.
> 
> Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  
Chenbo Xia July 20, 2021, 2:48 a.m. UTC | #2
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Thursday, July 8, 2021 5:58 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Liu, Yong
> <yong.liu@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>; Jiang, Cheng1
> <cheng1.jiang@intel.com>; stable@dpdk.org
> Subject: [PATCH] net/virtio: fix refill order in packed ring datapath
> 
> The front-end should refill the descriptor with the mbuf indicated by
> the buff_id rather then the index of used descriptor. Back-end may
> return buffers out of order if async copy mode is enabled.
> 
> When initializing rxq, refill the descriptors in order as buff_id is
> not available at that time.
> 
> Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> --
> 2.17.1

Applied to next-virtio/main, thanks.
  

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 3ac847317f..d35875d9ce 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -328,13 +328,35 @@  virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
 	return 0;
 }

+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+			       struct vring_packed_desc *dp,
+			       struct rte_mbuf *cookie)
+{
+	uint16_t flags = vq->vq_packed.cached_flags;
+	struct virtio_hw *hw = vq->hw;
+
+	dp->addr = cookie->buf_iova +
+			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+	dp->len = cookie->buf_len -
+		RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+	virtqueue_store_flags_packed(dp, flags,
+				     hw->weak_barriers);
+
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
+		vq->vq_avail_idx -= vq->vq_nentries;
+		vq->vq_packed.cached_flags ^=
+			VRING_PACKED_DESC_F_AVAIL_USED;
+		flags = vq->vq_packed.cached_flags;
+	}
+}
+
 static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
 				     struct rte_mbuf **cookie, uint16_t num)
 {
 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
-	uint16_t flags = vq->vq_packed.cached_flags;
-	struct virtio_hw *hw = vq->hw;
 	struct vq_desc_extra *dxp;
 	uint16_t idx;
 	int i;
@@ -350,24 +372,34 @@  virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
 		dxp->cookie = (void *)cookie[i];
 		dxp->ndescs = 1;

-		start_dp[idx].addr = cookie[i]->buf_iova +
-			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-		start_dp[idx].len = cookie[i]->buf_len -
-			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+	}
+	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+	return 0;
+}

-		vq->vq_desc_head_idx = dxp->next;
-		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+				     struct rte_mbuf **cookie, uint16_t num)
+{
+	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+	struct vq_desc_extra *dxp;
+	uint16_t idx, did;
+	int i;

-		virtqueue_store_flags_packed(&start_dp[idx], flags,
-					     hw->weak_barriers);
+	if (unlikely(vq->vq_free_cnt == 0))
+		return -ENOSPC;
+	if (unlikely(vq->vq_free_cnt < num))
+		return -EMSGSIZE;

-		if (++vq->vq_avail_idx >= vq->vq_nentries) {
-			vq->vq_avail_idx -= vq->vq_nentries;
-			vq->vq_packed.cached_flags ^=
-				VRING_PACKED_DESC_F_AVAIL_USED;
-			flags = vq->vq_packed.cached_flags;
-		}
+	for (i = 0; i < num; i++) {
+		idx = vq->vq_avail_idx;
+		did = start_dp[idx].id;
+		dxp = &vq->vq_descx[did];
+		dxp->cookie = (void *)cookie[i];
+		dxp->ndescs = 1;
+
+		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
 	}
 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
 	return 0;
@@ -742,7 +774,7 @@  virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)

 			/* Enqueue allocated buffers */
 			if (virtio_with_packed_queue(vq->hw))
-				error = virtqueue_enqueue_recv_refill_packed(vq,
+				error = virtqueue_enqueue_recv_refill_packed_init(vq,
 						&m, 1);
 			else
 				error = virtqueue_enqueue_recv_refill(vq,