[v4,01/14] vhost: add single packet enqueue function

Message ID 20191009133849.69002-2-yong.liu@intel.com
State Superseded
Delegated to: Maxime Coquelin
Headers show
Series
  • vhost packed ring performance optimization
Related show

Checks

Context Check Description
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-compilation success Compile Testing PASS
ci/checkpatch success coding style OK

Commit Message

Liu, Yong Oct. 9, 2019, 1:38 p.m.
Add vhost enqueue function for single packet and meanwhile left space
for flush used ring function.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

Comments

Maxime Coquelin Oct. 11, 2019, 12:34 p.m. | #1
On 10/9/19 3:38 PM, Marvin Liu wrote:
> Add vhost enqueue function for single packet and meanwhile left space
> for flush used ring function.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5b85b832d..520c4c6a8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -774,6 +774,58 @@  copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return error;
 }
 
+static __rte_always_inline int
+vhost_enqueue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mbuf *pkt, struct buf_vector *buf_vec, uint16_t *nr_descs)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count;
+	uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+	uint16_t num_buffers = 0;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		num_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+		return -1;
+
+	return 0;
+}
+
+
 static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
@@ -831,6 +883,36 @@  virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return pkt_idx;
 }
 
+static __rte_unused int16_t
+virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mbuf *pkt)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	uint16_t nr_descs = 0;
+
+	rte_smp_rmb();
+	if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
+						 &nr_descs) < 0)) {
+		VHOST_LOG_DEBUG(VHOST_DATA,
+				"(%d) failed to get enough desc from vring\n",
+				dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + nr_descs);
+
+	vq->last_avail_idx += nr_descs;
+	if (vq->last_avail_idx >= vq->size) {
+		vq->last_avail_idx -= vq->size;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	return 0;
+}
+
+
 static __rte_noinline uint32_t
 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)