diff mbox series

[2/2] net/virtio: use indirect ring in packed datapath

Message ID 20200928082052.61872-2-yong.liu@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers show
Series [1/2] net/virtio: setup Tx region for packed ring | expand

Checks

Context Check Description
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/travis-robot success Travis build: passed
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Liu, Yong Sept. 28, 2020, 8:20 a.m. UTC
Like split ring, packed ring will utilize indirect ring elements when
queuing mbufs need multiple descriptors. Thus each packet will take only
one slot when having multiple segments.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

Comments

Maxime Coquelin Sept. 30, 2020, 3:24 p.m. UTC | #1
On 9/28/20 10:20 AM, Marvin Liu wrote:
> Like split ring, packed ring will utilize indirect ring elements when
> queuing mbufs need multiple descriptors. Thus each packet will take only
> one slot when having multiple segments.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 


In this patch also I would add Fixes tag and Cc stable:

Fixes: 892dc798fa9c ("net/virtio: implement Tx path for packed queues")
Cc: stable@dpdk.org

And would change the title to

net/virtio: fix indirect descriptor handling in packed datapaths

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
Maxime Coquelin Sept. 30, 2020, 4:19 p.m. UTC | #2
On 9/28/20 10:20 AM, Marvin Liu wrote:
> Like split ring, packed ring will utilize indirect ring elements when
> queuing mbufs need multiple descriptors. Thus each packet will take only
> one slot when having multiple segments.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>


Applied to dpdk-next-virtio/main with suggested commit messages changes.

Thanks,
Maxime
diff mbox series

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index f915b8a2c..b3b1586a7 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1756,7 +1756,7 @@  virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
-		int can_push = 0, slots, need;
+		int can_push = 0, use_indirect = 0, slots, need;
 
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -1768,12 +1768,15 @@  virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
 			can_push = 1;
-
+		else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+			use_indirect = 1;
 		/* How many main ring entries are needed to this Tx?
+		 * indirect   => 1
 		 * any_layout => number of segments
 		 * default    => number of segments + 1
 		 */
-		slots = txm->nb_segs + !can_push;
+		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
 		need = slots - vq->vq_free_cnt;
 
 		/* Positive value indicates it need free vring descriptors */
@@ -1791,7 +1794,8 @@  virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if (can_push)
 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
 		else
-			virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
+			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
+						      use_indirect, 0,
 						      in_order);
 
 		virtio_update_packet_stats(&txvq->stats, txm);
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c
index 6a8214725..ce035b574 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
@@ -207,19 +207,26 @@  virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 	struct virtqueue *vq = txvq->vq;
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t slots, can_push;
+	uint16_t slots, can_push = 0, use_indirect = 0;
 	int16_t need;
 
+	/* optimize ring usage */
+	if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+	      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+	    rte_mbuf_refcnt_read(txm) == 1 &&
+	    RTE_MBUF_DIRECT(txm) &&
+	    txm->nb_segs == 1 &&
+	    rte_pktmbuf_headroom(txm) >= hdr_size)
+		can_push = 1;
+	else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+		 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+		use_indirect = 1;
 	/* How many main ring entries are needed to this Tx?
+	 * indirect   => 1
 	 * any_layout => number of segments
 	 * default    => number of segments + 1
 	 */
-	can_push = rte_mbuf_refcnt_read(txm) == 1 &&
-		   RTE_MBUF_DIRECT(txm) &&
-		   txm->nb_segs == 1 &&
-		   rte_pktmbuf_headroom(txm) >= hdr_size;
-
-	slots = txm->nb_segs + !can_push;
+	slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
 	need = slots - vq->vq_free_cnt;
 
 	/* Positive value indicates it need free vring descriptors */
@@ -234,7 +241,8 @@  virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 	}
 
 	/* Enqueue Packet buffers */
-	virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push, 1);
+	virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect,
+				can_push, 1);
 
 	txvq->stats.bytes += txm->pkt_len;
 	return 0;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 7d910a0a1..753dfb85c 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -686,7 +686,8 @@  virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
 
 static inline void
 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
-			      uint16_t needed, int can_push, int in_order)
+			      uint16_t needed, int use_indirect, int can_push,
+			      int in_order)
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
@@ -722,6 +723,25 @@  virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		/* if offload disabled, it is not zeroed below, do it now */
 		if (!vq->hw->has_tx_offload)
 			virtqueue_clear_net_hdr(hdr);
+	} else if (use_indirect) {
+		/* setup tx ring slot to point to indirect
+		 * descriptor list stored in reserved region.
+		 *
+		 * the first slot in indirect ring is already preset
+		 * to point to the header in reserved region
+		 */
+		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+			RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
+		start_dp[idx].len   = (needed + 1) *
+			sizeof(struct vring_packed_desc);
+		/* reset flags for indirect desc */
+		head_flags = VRING_DESC_F_INDIRECT;
+		head_flags |= vq->vq_packed.cached_flags;
+		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+		/* loop below will fill in rest of the indirect elements */
+		start_dp = txr[idx].tx_packed_indir;
+		idx = 1;
 	} else {
 		/* setup first tx ring slot to point to header
 		 * stored in reserved region.
@@ -767,6 +787,15 @@  virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 	start_dp[prev].id = id;
 
+	if (use_indirect) {
+		idx = head_idx;
+		if (++idx >= vq->vq_nentries) {
+			idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+		}
+	}
+
 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
 	vq->vq_avail_idx = idx;