[v7,08/13] vhost: flush batched enqueue descs directly

Message ID 20191021154016.16274-9-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost packed ring performance optimization |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Marvin Liu Oct. 21, 2019, 3:40 p.m. UTC
  Flush used elements when batched enqueue function is finished.
Descriptor's flags are pre-calculated as they will be reset by vhost.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
  

Patch

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index d59446442..f8dbe841c 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -39,6 +39,9 @@ 
 
 #define VHOST_LOG_CACHE_NR 32
 
+#define PACKED_DESC_ENQUEUE_USED_FLAG(w)	\
+	((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
+		VRING_DESC_F_WRITE)
 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
 					 VRING_DESC_F_INDIRECT)
 
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1cff9b86f..eefa1efa3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -154,6 +154,36 @@  vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
 	vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
+				 struct vhost_virtqueue *vq,
+				 uint64_t *lens,
+				 uint16_t *ids)
+{
+	uint16_t i;
+	uint16_t flags;
+
+	flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		vq->desc_packed[vq->last_used_idx + i].id = ids[i];
+		vq->desc_packed[vq->last_used_idx + i].len = lens[i];
+	}
+
+	rte_smp_wmb();
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		vq->desc_packed[vq->last_used_idx + i].flags = flags;
+
+	vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
+				   sizeof(struct vring_packed_desc),
+				   sizeof(struct vring_packed_desc) *
+				   PACKED_BATCH_SIZE);
+	vhost_log_cache_sync(dev, vq);
+
+	vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
 static __rte_always_inline void
 flush_shadow_used_ring_packed(struct virtio_net *dev,
 			struct vhost_virtqueue *vq)
@@ -992,6 +1022,7 @@  virtio_dev_rx_batch_packed(struct virtio_net *dev,
 	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
 	uint32_t buf_offset = dev->vhost_hlen;
 	uint64_t lens[PACKED_BATCH_SIZE];
+	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
 	if (unlikely(avail_idx & PACKED_BATCH_MASK))
@@ -1047,6 +1078,11 @@  virtio_dev_rx_batch_packed(struct virtio_net *dev,
 			   pkts[i]->pkt_len);
 	}
 
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		ids[i] = descs[avail_idx + i].id;
+
+	vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
+
 	return 0;
 }