[v2,16/16] vhost: optimize packed ring dequeue when in-order

Message ID 20190919163643.24130-17-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Headers
Series vhost packed ring performance optimization |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Marvin Liu Sept. 19, 2019, 4:36 p.m. UTC
  When VIRTIO_F_IN_ORDER feature is negotiated, vhost can optimize dequeue
function by only update first used descriptor.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 357517cdd..a7bb4ec79 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -31,6 +31,12 @@  rxvq_is_mergeable(struct virtio_net *dev)
 	return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
 }
 
+static  __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+	return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
 static bool
 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
@@ -213,6 +219,30 @@  flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_burst_packed_inorder(struct vhost_virtqueue *vq, uint16_t id)
+{
+	vq->shadow_used_packed[0].id  = id;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = 1;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->shadow_used_idx = 1;
+
+	}
+
+	vq->last_used_idx += PACKED_DESCS_BURST;
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
 static __rte_always_inline void
 update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint16_t *ids)
@@ -315,7 +345,6 @@  update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
 		else
 			vq->desc_packed[vq->last_used_idx].flags =
 				VIRTIO_TX_USED_WRAP_FLAG;
-
 	}
 
 	vq->last_used_idx += count;
@@ -326,6 +355,31 @@  update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_shadow_packed_inorder(struct vhost_virtqueue *vq,
+	uint16_t buf_id, uint16_t count)
+{
+	vq->shadow_used_packed[0].id = buf_id;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = count;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->shadow_used_idx = 1;
+	}
+
+	vq->last_used_idx += count;
+
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
 
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
@@ -1834,7 +1888,12 @@  virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   pkts[i]->pkt_len);
 	}
 
-	update_dequeue_burst_packed(dev, vq, ids);
+	if (virtio_net_is_inorder(dev))
+		update_dequeue_burst_packed_inorder(vq,
+						    ids[PACKED_BURST_MASK]);
+	else
+		update_dequeue_burst_packed(dev, vq, ids);
+
 	if (virtio_net_with_host_offload(dev)) {
 		UNROLL_PRAGMA(PRAGMA_PARAM)
 		for (i = 0; i < PACKED_DESCS_BURST; i++) {
@@ -1897,7 +1956,10 @@  virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 					&desc_count))
 		return -1;
 
-	update_dequeue_shadow_packed(vq, buf_id, desc_count);
+	if (virtio_net_is_inorder(dev))
+		update_dequeue_shadow_packed_inorder(vq, buf_id, desc_count);
+	else
+		update_dequeue_shadow_packed(vq, buf_id, desc_count);
 
 	vq->last_avail_idx += desc_count;
 	if (vq->last_avail_idx >= vq->size) {