[RFC,01/13] add vhost normal enqueue function

Message ID 20190708171320.38802-2-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [RFC,01/13] add vhost normal enqueue function |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Marvin Liu July 8, 2019, 5:13 p.m. UTC
  Rewrite vhost enqueue function and meanwhile left space for later flush
enqeueue shadow used descriptors changes.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5b85b832d..003aec1d4 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -774,6 +774,72 @@  copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return error;
 }
 
+/*
+ * Returns -1 on fail, 0 on success
+ */
+static inline int
+vhost_enqueue_normal_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mbuf *pkt, struct buf_vector *buf_vec,
+	uint16_t *nr_descs)
+{
+	uint16_t nr_vec = 0;
+
+	uint16_t avail_idx;
+	uint16_t max_tries, tries = 0;
+
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count;
+
+	uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+	avail_idx = vq->last_avail_idx;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	uint16_t num_buffers = 0;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0)) {
+			return -1;
+		}
+
+		len = RTE_MIN(len, size);
+
+		size -= len;
+
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+
+		*nr_descs += desc_count;
+		num_buffers += 1;
+	}
+
+	if (copy_mbuf_to_desc(dev, vq, pkt,
+					buf_vec, nr_vec,
+					num_buffers) < 0) {
+		return 0;
+	}
+
+	return 0;
+}
+
+
 static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
@@ -831,6 +897,35 @@  virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return pkt_idx;
 }
 
+static __rte_always_inline uint16_t
+virtio_dev_rx_normal_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+			struct rte_mbuf *pkt)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	uint16_t nr_descs = 0;
+
+	rte_smp_rmb();
+	if (unlikely(vhost_enqueue_normal_packed(dev, vq,
+					pkt, buf_vec, &nr_descs) < 0)) {
+		VHOST_LOG_DEBUG(VHOST_DATA,
+			"(%d) failed to get enough desc from vring\n",
+			dev->vid);
+		return 0;
+	}
+
+	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+		dev->vid, vq->last_avail_idx,
+		vq->last_avail_idx + nr_descs);
+
+	vq->last_avail_idx += nr_descs;
+	if (vq->last_avail_idx >= vq->size) {
+		vq->last_avail_idx -= vq->size;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	return 1;
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)