[v2,10/16] vhost: split enqueue and dequeue flush functions

Message ID 20190919163643.24130-11-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Headers
Series vhost packed ring performance optimization |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Marvin Liu Sept. 19, 2019, 4:36 p.m. UTC
  Vhost enqueue descriptors are updated by burst number, while vhost
dequeue descriptors are buffered. Meanwhile in dequeue function only
first descriptor is buffered. Due to these differences, split vhost
enqueue and dequeue flush functions.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Comments

Gavin Hu Sept. 25, 2019, 4:11 a.m. UTC | #1
Similarly the rte_smp_wmb can be out of and right above the loop. 

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Marvin Liu
> Sent: Friday, September 20, 2019 12:37 AM
> To: maxime.coquelin@redhat.com; tiwei.bie@intel.com;
> zhihong.wang@intel.com
> Cc: dev@dpdk.org; Marvin Liu <yong.liu@intel.com>
> Subject: [dpdk-dev] [PATCH v2 10/16] vhost: split enqueue and dequeue
> flush functions
> 
> Vhost enqueue descriptors are updated by burst number, while vhost
> dequeue descriptors are buffered. Meanwhile in dequeue function only
> first descriptor is buffered. Due to these differences, split vhost
> enqueue and dequeue flush functions.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
  

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 94c1b8dc7..c5c86c219 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -92,8 +92,7 @@  update_shadow_split(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-flush_shadow_packed(struct virtio_net *dev,
-			struct vhost_virtqueue *vq)
+flush_enqueue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 	int i;
 	uint16_t used_idx = vq->last_used_idx;
@@ -158,6 +157,31 @@  flush_shadow_packed(struct virtio_net *dev,
 	vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+flush_dequeue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	uint16_t head_idx = vq->dequeue_shadow_head;
+	uint16_t head_flags;
+	struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
+
+	if (used_elem->used_wrap_counter)
+		head_flags = VIRTIO_TX_USED_FLAG;
+	else
+		head_flags = VIRTIO_TX_USED_WRAP_FLAG;
+
+	vq->desc_packed[head_idx].id = used_elem->id;
+
+	rte_smp_wmb();
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq, head_idx *
+				   sizeof(struct vring_packed_desc),
+				   sizeof(struct vring_packed_desc));
+
+	vq->shadow_used_idx = 0;
+	vhost_log_cache_sync(dev, vq);
+}
+
 static __rte_always_inline void
 update_shadow_packed(struct vhost_virtqueue *vq,
 			 uint16_t desc_idx, uint32_t len, uint16_t count)
@@ -200,6 +224,51 @@  flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	uint16_t *ids)
+{
+	uint16_t flags = 0;
+	uint16_t i;
+
+	if (vq->used_wrap_counter)
+		flags = VIRTIO_TX_USED_FLAG;
+	else
+		flags = VIRTIO_TX_USED_WRAP_FLAG;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+		vq->shadow_used_packed[0].id  = ids[0];
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = 1;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		UNROLL_PRAGMA(PRAGMA_PARAM)
+		for (i = 1; i < PACKED_DESCS_BURST; i++)
+			vq->desc_packed[vq->last_used_idx + i].id = ids[i];
+
+		UNROLL_PRAGMA(PRAGMA_PARAM)
+		for (i = 1; i < PACKED_DESCS_BURST; i++) {
+			rte_smp_wmb();
+			vq->desc_packed[vq->last_used_idx + i].flags = flags;
+		}
+
+		vq->shadow_used_idx = 1;
+
+		vq->last_used_idx += PACKED_DESCS_BURST;
+		if (vq->last_used_idx >= vq->size) {
+			vq->used_wrap_counter ^= 1;
+			vq->last_used_idx -= vq->size;
+		}
+	} else {
+		uint64_t lens[PACKED_DESCS_BURST] = {0};
+		flush_burst_packed(dev, vq, lens, ids, flags);
+	}
+}
+
 static __rte_always_inline void
 flush_enqueue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint64_t *lens, uint16_t *ids)
@@ -309,10 +378,29 @@  flush_enqueue_packed(struct virtio_net *dev,
 
 		if (vq->enqueue_shadow_count >= PACKED_DESCS_BURST) {
 			do_data_copy_enqueue(dev, vq);
-			flush_shadow_packed(dev, vq);
+			flush_enqueue_shadow_packed(dev, vq);
 		}
 	}
 }
+
+static __rte_unused __rte_always_inline void
+flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	if (!vq->shadow_used_idx)
+		return;
+
+	int16_t shadow_count = vq->last_used_idx - vq->dequeue_shadow_head;
+	if (shadow_count <= 0)
+		shadow_count += vq->size;
+
+	/* buffer used descs as many as possible when doing dequeue */
+	if ((uint16_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
+		do_data_copy_dequeue(vq);
+		flush_dequeue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+}
+
 /* avoid write operation when necessary, to lessen cache issues */
 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
 	if ((var) != (val))			\
@@ -1178,7 +1266,7 @@  virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	do_data_copy_enqueue(dev, vq);
 
 	if (likely(vq->shadow_used_idx)) {
-		flush_shadow_packed(dev, vq);
+		flush_enqueue_shadow_packed(dev, vq);
 		vhost_vring_call_packed(dev, vq);
 	}
 
@@ -1810,6 +1898,7 @@  virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   pkts[i]->pkt_len);
 	}
 
+	update_dequeue_burst_packed(dev, vq, ids);
 	if (virtio_net_with_host_offload(dev)) {
 		UNROLL_PRAGMA(PRAGMA_PARAM)
 		for (i = 0; i < PACKED_DESCS_BURST; i++) {
@@ -1911,7 +2000,7 @@  virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		if (likely(vq->shadow_used_idx)) {
-			flush_shadow_packed(dev, vq);
+			flush_dequeue_shadow_packed(dev, vq);
 			vhost_vring_call_packed(dev, vq);
 		}
 	}
@@ -1990,7 +2079,7 @@  virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (unlikely(i < count))
 			vq->shadow_used_idx = i;
 		if (likely(vq->shadow_used_idx)) {
-			flush_shadow_packed(dev, vq);
+			flush_dequeue_shadow_packed(dev, vq);
 			vhost_vring_call_packed(dev, vq);
 		}
 	}