[v1,09/14] vhost: split enqueue and dequeue flush functions

Message ID 20190905161421.55981-10-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost packed ring performance optimization |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Marvin Liu Sept. 5, 2019, 4:14 p.m. UTC
  Vhost enqueue descriptors are updated by burst number, while vhost
dequeue descriptors are buffered. Meanwhile in dequeue function only
first descriptor is buffered. Due to these differences, split vhost
enqueue and dequeue flush functions.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index f8ad54e18..8d09e1611 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -92,7 +92,7 @@  update_shadow_split(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+flush_enqueue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 	int i;
 	uint16_t used_idx = vq->last_used_idx;
@@ -157,6 +157,33 @@  flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+flush_dequeue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	uint16_t head_idx = vq->dequeue_shadow_head;
+	uint16_t head_flags;
+
+	if (vq->shadow_used_packed[0].used_wrap_counter)
+		head_flags = VIRTIO_TX_USED_FLAG;
+	else
+		head_flags = VIRTIO_TX_USED_WRAP_FLAG;
+
+	if (vq->shadow_used_packed[0].len)
+		head_flags |= VRING_DESC_F_WRITE;
+
+	vq->desc_packed[head_idx].id = vq->shadow_used_packed[0].id;
+
+	rte_smp_wmb();
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq, head_idx *
+				   sizeof(struct vring_packed_desc),
+				   sizeof(struct vring_packed_desc));
+
+	vq->shadow_used_idx = 0;
+	vhost_log_cache_sync(dev, vq);
+}
+
 static __rte_always_inline void
 flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id,
@@ -195,6 +222,52 @@  flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	uint16_t id, uint16_t id1, uint16_t id2, uint16_t id3)
+{
+	uint16_t flags = 0;
+
+	if (vq->used_wrap_counter)
+		flags = VIRTIO_TX_USED_FLAG;
+	else
+		flags = VIRTIO_TX_USED_WRAP_FLAG;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+		vq->shadow_used_packed[0].id  = id;
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = 1;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->desc_packed[vq->last_used_idx + 1].id = id1;
+		vq->desc_packed[vq->last_used_idx + 2].id = id2;
+		vq->desc_packed[vq->last_used_idx + 3].id = id3;
+
+		rte_smp_wmb();
+		vq->desc_packed[vq->last_used_idx + 1].flags = flags;
+		rte_smp_wmb();
+		vq->desc_packed[vq->last_used_idx + 2].flags = flags;
+		rte_smp_wmb();
+		vq->desc_packed[vq->last_used_idx + 3].flags = flags;
+
+		vq->shadow_used_idx = 1;
+
+		vq->last_used_idx += PACKED_DESCS_BURST;
+		if (vq->last_used_idx >= vq->size) {
+			vq->used_wrap_counter ^= 1;
+			vq->last_used_idx -= vq->size;
+		}
+	} else {
+
+		flush_burst_packed(dev, vq, 0, 0, 0, 0, id, id1, id2, id3,
+				   flags);
+	}
+}
+
 static __rte_always_inline void
 flush_enqueue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id,
@@ -316,11 +389,29 @@  flush_enqueue_packed(struct virtio_net *dev,
 
 		if (vq->enqueue_shadow_count >= PACKED_DESCS_BURST) {
 			do_data_copy_enqueue(dev, vq);
-			flush_shadow_packed(dev, vq);
+			flush_enqueue_shadow_packed(dev, vq);
 		}
 	}
 }
 
+static __rte_unused void
+flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	if (!vq->shadow_used_idx)
+		return;
+
+	int16_t shadow_count = vq->last_used_idx - vq->dequeue_shadow_head;
+	if (shadow_count <= 0)
+		shadow_count += vq->size;
+
+	/* buffer used descs as many as possible when doing dequeue */
+	if ((uint16_t)shadow_count >= (vq->size >> 1)) {
+		do_data_copy_dequeue(vq);
+		flush_dequeue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+}
+
 /* avoid write operation when necessary, to lessen cache issues */
 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
 	if ((var) != (val))			\
@@ -1211,7 +1302,7 @@  virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	do_data_copy_enqueue(dev, vq);
 
 	if (likely(vq->shadow_used_idx)) {
-		flush_shadow_packed(dev, vq);
+		flush_enqueue_shadow_packed(dev, vq);
 		vhost_vring_call_packed(dev, vq);
 	}
 
@@ -1869,6 +1960,8 @@  virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		(void *)(uintptr_t)(desc_addr[3] + buf_offset),
 		pkts[3]->pkt_len);
 
+	update_dequeue_burst_packed(dev, vq, ids[0], ids[1], ids[2], ids[3]);
+
 	if (virtio_net_with_host_offload(dev)) {
 		hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr[0]);
 		hdr1 = (struct virtio_net_hdr *)((uintptr_t)desc_addr[1]);
@@ -1972,7 +2065,7 @@  virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		if (likely(vq->shadow_used_idx)) {
-			flush_shadow_packed(dev, vq);
+			flush_dequeue_shadow_packed(dev, vq);
 			vhost_vring_call_packed(dev, vq);
 		}
 	}
@@ -2050,7 +2143,7 @@  virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (unlikely(i < count))
 			vq->shadow_used_idx = i;
 		if (likely(vq->shadow_used_idx)) {
-			flush_shadow_packed(dev, vq);
+			flush_dequeue_shadow_packed(dev, vq);
 			vhost_vring_call_packed(dev, vq);
 		}
 	}