[RFC,06/13] add vhost fast enqueue flush function

Message ID 20190708171320.38802-7-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [RFC,01/13] add vhost normal enqueue function |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Marvin Liu July 8, 2019, 5:13 p.m. UTC
  Vhost fast enqueue function will flush used ring immediately.
Descriptor's flag is pre-calculated by macro.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Patch

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b8198747e..d084fe364 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -39,6 +39,10 @@ 
 
 #define VHOST_LOG_CACHE_NR 32
 
+/* Pre-calculated packed ring flags */
+#define VIRTIO_RX_FLAG_PACKED  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE)
+#define VIRTIO_RX_WRAP_FLAG_PACKED (VRING_DESC_F_WRITE)
+
 /* Used in fast packed ring functions */
 #define PACKED_DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_packed_desc))
 
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 96f7a8bec..9eeebe642 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -225,6 +225,63 @@  flush_enqueue_used_packed(struct virtio_net *dev,
 	vhost_log_cache_sync(dev, vq);
 }
 
+/* flags are same when flushing used ring in fast path */
+static __rte_always_inline void
+flush_used_fast_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3,
+		uint16_t id, uint16_t id1, uint16_t id2, uint16_t id3,
+		uint16_t flags)
+{
+	vq->desc_packed[vq->last_used_idx].id = id;
+	vq->desc_packed[vq->last_used_idx].len = len;
+	vq->desc_packed[vq->last_used_idx + 1].id = id1;
+	vq->desc_packed[vq->last_used_idx + 1].len = len1;
+
+	vq->desc_packed[vq->last_used_idx + 2].id = id2;
+	vq->desc_packed[vq->last_used_idx + 2].len = len2;
+
+	vq->desc_packed[vq->last_used_idx + 3].id = id3;
+	vq->desc_packed[vq->last_used_idx + 3].len = len3;
+
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 1].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 2].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 3].flags = flags;
+
+	vhost_log_cache_used_vring(dev, vq,
+				vq->last_used_idx *
+				sizeof(struct vring_packed_desc),
+				RTE_CACHE_LINE_SIZE);
+	vhost_log_cache_sync(dev, vq);
+
+	vq->last_used_idx += PACKED_DESC_PER_CACHELINE;
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
+static __rte_always_inline void
+flush_enqueue_fast_used_packed(struct virtio_net *dev,
+			struct vhost_virtqueue *vq, uint64_t len,
+			uint64_t len1, uint64_t len2, uint64_t len3,
+			uint16_t id, uint16_t id1, uint16_t id2, uint16_t id3)
+{
+	uint16_t flags = 0;
+
+	if (vq->used_wrap_counter)
+		flags = VIRTIO_RX_FLAG_PACKED;
+	else
+		flags = VIRTIO_RX_WRAP_FLAG_PACKED;
+
+	flush_used_fast_packed(dev, vq, len, len1, len2, len3, id, id1, id2,
+			id3, flags);
+}
+
 static __rte_always_inline void
 update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq,
 				uint16_t desc_idx, uint32_t len,
@@ -1020,6 +1077,7 @@  virtio_dev_rx_fast_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		len2, len3;
 	struct virtio_net_hdr_mrg_rxbuf *hdr, *hdr1, *hdr2, *hdr3;
 	uint32_t buf_offset = dev->vhost_hlen;
+	uint16_t id, id1, id2, id3;
 
 	if (unlikely(avail_idx & 0x3))
 		return -1;
@@ -1055,6 +1113,11 @@  virtio_dev_rx_fast_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		(pkts[3]->pkt_len > (len3 - buf_offset))))
 		return -1;
 
+	id = descs[avail_idx].id;
+	id1 = descs[avail_idx + 1].id;
+	id2 = descs[avail_idx + 2].id;
+	id3 = descs[avail_idx + 3].id;
+
 	desc_addr = vhost_iova_to_vva(dev, vq,
 			descs[avail_idx].addr,
 			&len,
@@ -1115,6 +1178,9 @@  virtio_dev_rx_fast_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		rte_pktmbuf_mtod_offset(pkts[3], void *, 0),
 		pkts[3]->pkt_len);
 
+	flush_enqueue_fast_used_packed(dev, vq, len, len1, len2, len3, id,
+		id1, id2, id3);
+
 	return 0;
 }