[v7,11/15] vhost: add vector filling support for packed ring
diff mbox series

Message ID 20180704215438.5579-12-maxime.coquelin@redhat.com
State Superseded, archived
Delegated to: Maxime Coquelin
Headers show
Series
  • Vhost: add support to packed ring layout
Related show

Checks

Context Check Description
ci/Intel-compilation fail Compilation issues

Commit Message

Maxime Coquelin July 4, 2018, 9:54 p.m. UTC
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 111 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 111 insertions(+)

Patch
diff mbox series

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index e3b73c951..1a04df807 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -450,6 +450,117 @@  reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
+static __rte_always_inline int
+fill_vec_buf_packed_indirect(struct virtio_net *dev,
+			struct vhost_virtqueue *vq,
+			struct vring_desc_packed *desc, uint16_t *vec_idx,
+			struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
+{
+	uint16_t i;
+	uint32_t nr_descs;
+	uint16_t vec_id = *vec_idx;
+	uint64_t dlen;
+	struct vring_desc_packed *descs, *idescs = NULL;
+
+	dlen = desc->len;
+	descs = (struct vring_desc_packed *)(uintptr_t)
+		vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
+	if (unlikely(!descs))
+		return -1;
+
+	if (unlikely(dlen < desc->len)) {
+		/*
+		 * The indirect desc table is not contiguous
+		 * in process VA space, we have to copy it.
+		 */
+		idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
+		if (unlikely(!idescs))
+			return -1;
+
+		descs = idescs;
+	}
+
+	nr_descs =  desc->len / sizeof(struct vring_desc_packed);
+	if (unlikely(nr_descs >= vq->size)) {
+		free_ind_table(idescs);
+		return -1;
+	}
+
+	for (i = 0; i < nr_descs; i++) {
+		if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
+			free_ind_table(idescs);
+			return -1;
+		}
+
+		*len += descs[i].len;
+		if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+						descs[i].addr, descs[i].len,
+						perm)))
+			return -1;
+	}
+	*vec_idx = vec_id;
+
+	if (unlikely(!!idescs))
+		free_ind_table(idescs);
+
+	return 0;
+}
+
+static inline int
+fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+				uint16_t avail_idx, uint16_t *desc_count,
+				struct buf_vector *buf_vec, uint16_t *vec_idx,
+				uint16_t *buf_id, uint16_t *len, uint8_t perm)
+{
+	bool wrap_counter = vq->avail_wrap_counter;
+	struct vring_desc_packed *descs = vq->desc_packed;
+	uint16_t vec_id = *vec_idx;
+
+	if (avail_idx < vq->last_avail_idx)
+		wrap_counter ^= 1;
+
+	if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
+		return -1;
+
+	*desc_count = 0;
+
+	while (1) {
+		if (unlikely(vec_id >= BUF_VECTOR_MAX))
+			return -1;
+
+		*desc_count += 1;
+		*buf_id = descs[avail_idx].index;
+
+		if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
+			if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
+							&descs[avail_idx],
+							&vec_id, buf_vec,
+							len, perm) < 0))
+				return -1;
+		} else {
+			*len += descs[avail_idx].len;
+
+			if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+							descs[avail_idx].addr,
+							descs[avail_idx].len,
+							perm)))
+				return -1;
+		}
+
+		if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
+			break;
+
+		if (++avail_idx >= vq->size) {
+			avail_idx -= vq->size;
+			wrap_counter ^= 1;
+		}
+	}
+
+	*vec_idx = vec_id;
+
+	return 0;
+}
+
 static __rte_always_inline int
 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *m, struct buf_vector *buf_vec,