diff mbox series

[v6,11/15] vhost: add vector filling support for packed ring

Message ID 20180702081629.29258-12-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers show
Series Vhost: add support to packed ring layout | expand

Checks

Context Check Description
ci/Intel-compilation fail Compilation issues

Commit Message

Maxime Coquelin July 2, 2018, 8:16 a.m. UTC
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 111 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 111 insertions(+)

Comments

Tiwei Bie July 4, 2018, 5:53 a.m. UTC | #1
On Mon, Jul 02, 2018 at 10:16:25AM +0200, Maxime Coquelin wrote:
[...]
> +static __rte_always_inline int
> +fill_vec_buf_packed_indirect(struct virtio_net *dev,
> +			struct vhost_virtqueue *vq,
> +			struct vring_desc_packed *desc, uint16_t *vec_idx,
> +			struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
> +{
> +	uint16_t i;
> +	uint32_t nr_decs;
> +	uint16_t vec_id = *vec_idx;
> +	uint64_t dlen;
> +	struct vring_desc_packed *descs, *idescs = NULL;
> +
> +	dlen = desc->len;
> +	descs = (struct vring_desc_packed *)(uintptr_t)
> +		vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
> +	if (unlikely(!descs))
> +		return -1;
> +
> +	if (unlikely(dlen < desc->len)) {
> +		/*
> +		 * The indirect desc table is not contiguous
> +		 * in process VA space, we have to copy it.
> +		 */
> +		idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
> +		if (unlikely(!idescs))
> +			return -1;
> +
> +		descs = idescs;
> +	}
> +
> +	nr_decs =  desc->len / sizeof(struct vring_desc_packed);

s/nr_decs =  /nr_desc = /


> +	if (unlikely(nr_decs >= vq->size)) {
> +		free_ind_table(idescs);
> +		return -1;
> +	}
[...]
> +
> +static inline int
> +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
> +				uint16_t avail_idx, uint16_t *desc_count,
> +				struct buf_vector *buf_vec, uint16_t *vec_idx,
> +				uint16_t *buf_id, uint16_t *len, uint8_t perm)
> +{
> +	bool wrap_counter = vq->avail_wrap_counter;
> +	struct vring_desc_packed *descs = vq->desc_packed;
> +	uint16_t vec_id = *vec_idx;
> +
> +	if (avail_idx < vq->last_avail_idx)
> +		wrap_counter ^= 1;

In which case avail_idx will be less than vq->last_avail_idx
and we need to wrap the wrap_counter?

> +
> +	if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
> +		return -1;
> +
> +	*desc_count = 0;
> +
> +	while (1) {
> +		if (unlikely(vec_id >= BUF_VECTOR_MAX))
> +			return -1;
> +
> +		*desc_count += 1;
> +		*buf_id = descs[avail_idx].index;
> +
> +		if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
> +			if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
> +							&descs[avail_idx],
> +							&vec_id, buf_vec,
> +							len, perm) < 0))
> +				return -1;
> +		} else {
> +			*len += descs[avail_idx].len;
> +
> +			if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
> +							descs[avail_idx].addr,
> +							descs[avail_idx].len,
> +							perm)))
> +				return -1;
> +		}
> +
> +		if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
> +			break;
> +
> +		if (++avail_idx >= vq->size) {
> +			avail_idx -= vq->size;
> +			wrap_counter ^= 1;
> +		}
> +	}
> +
> +	*vec_idx = vec_id;
> +
> +	return 0;
> +}
[...]
Maxime Coquelin July 4, 2018, 4:18 p.m. UTC | #2
On 07/04/2018 07:53 AM, Tiwei Bie wrote:
> On Mon, Jul 02, 2018 at 10:16:25AM +0200, Maxime Coquelin wrote:
> [...]
>> +static __rte_always_inline int
>> +fill_vec_buf_packed_indirect(struct virtio_net *dev,
>> +			struct vhost_virtqueue *vq,
>> +			struct vring_desc_packed *desc, uint16_t *vec_idx,
>> +			struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
>> +{
>> +	uint16_t i;
>> +	uint32_t nr_decs;
>> +	uint16_t vec_id = *vec_idx;
>> +	uint64_t dlen;
>> +	struct vring_desc_packed *descs, *idescs = NULL;
>> +
>> +	dlen = desc->len;
>> +	descs = (struct vring_desc_packed *)(uintptr_t)
>> +		vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
>> +	if (unlikely(!descs))
>> +		return -1;
>> +
>> +	if (unlikely(dlen < desc->len)) {
>> +		/*
>> +		 * The indirect desc table is not contiguous
>> +		 * in process VA space, we have to copy it.
>> +		 */
>> +		idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
>> +		if (unlikely(!idescs))
>> +			return -1;
>> +
>> +		descs = idescs;
>> +	}
>> +
>> +	nr_decs =  desc->len / sizeof(struct vring_desc_packed);
> 
> s/nr_decs =  /nr_desc = /

Fixed.

> 
> 
>> +	if (unlikely(nr_decs >= vq->size)) {
>> +		free_ind_table(idescs);
>> +		return -1;
>> +	}
> [...]
>> +
>> +static inline int
>> +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>> +				uint16_t avail_idx, uint16_t *desc_count,
>> +				struct buf_vector *buf_vec, uint16_t *vec_idx,
>> +				uint16_t *buf_id, uint16_t *len, uint8_t perm)
>> +{
>> +	bool wrap_counter = vq->avail_wrap_counter;
>> +	struct vring_desc_packed *descs = vq->desc_packed;
>> +	uint16_t vec_id = *vec_idx;
>> +
>> +	if (avail_idx < vq->last_avail_idx)
>> +		wrap_counter ^= 1;
> 
> In which case avail_idx will be less than vq->last_avail_idx
> and we need to wrap the wrap_counter?

In the receive mergeable case, it can happen (see patch 12):

static inline int
reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
				uint32_t size, struct buf_vector *buf_vec,
				uint16_t *nr_vec, uint16_t *num_buffers,
				uint16_t *nr_descs)
{
	uint16_t avail_idx;
	uint16_t vec_idx = 0;
	uint16_t max_tries, tries = 0;

	uint16_t buf_id = 0;
	uint16_t len = 0;
	uint16_t desc_count;

	*num_buffers = 0;
	avail_idx = vq->last_avail_idx;

	if (rxvq_is_mergeable(dev))
		max_tries = vq->size;
	else
		max_tries = 1;

	while (size > 0) {
		if (unlikely(fill_vec_buf_packed(dev, vq,
						avail_idx, &desc_count,
						buf_vec, &vec_idx,
						&buf_id, &len,
						VHOST_ACCESS_RO) < 0))
			return -1;

		len = RTE_MIN(len, size);
		update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
		size -= len;

		avail_idx += desc_count;
		if (avail_idx >= vq->size)
			avail_idx -= vq->size;

		*nr_descs += desc_count;
		tries++;
		*num_buffers += 1;

		/*
		 * if we tried all available ring items, and still
		 * can't get enough buf, it means something abnormal
		 * happened.
		 */
		if (unlikely(tries > max_tries))
			return -1;
	}

	*nr_vec = vec_idx;

	return 0;
}

>> +
>> +	if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
>> +		return -1;
>> +
>> +	*desc_count = 0;
>> +
>> +	while (1) {
>> +		if (unlikely(vec_id >= BUF_VECTOR_MAX))
>> +			return -1;
>> +
>> +		*desc_count += 1;
>> +		*buf_id = descs[avail_idx].index;
>> +
>> +		if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
>> +			if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
>> +							&descs[avail_idx],
>> +							&vec_id, buf_vec,
>> +							len, perm) < 0))
>> +				return -1;
>> +		} else {
>> +			*len += descs[avail_idx].len;
>> +
>> +			if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
>> +							descs[avail_idx].addr,
>> +							descs[avail_idx].len,
>> +							perm)))
>> +				return -1;
>> +		}
>> +
>> +		if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
>> +			break;
>> +
>> +		if (++avail_idx >= vq->size) {
>> +			avail_idx -= vq->size;
>> +			wrap_counter ^= 1;
>> +		}
>> +	}
>> +
>> +	*vec_idx = vec_id;
>> +
>> +	return 0;
>> +}
> [...]
>
diff mbox series

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 64664b7de..2d867e88e 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -448,6 +448,117 @@  reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
+static __rte_always_inline int
+fill_vec_buf_packed_indirect(struct virtio_net *dev,
+			struct vhost_virtqueue *vq,
+			struct vring_desc_packed *desc, uint16_t *vec_idx,
+			struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
+{
+	uint16_t i;
+	uint32_t nr_decs;
+	uint16_t vec_id = *vec_idx;
+	uint64_t dlen;
+	struct vring_desc_packed *descs, *idescs = NULL;
+
+	dlen = desc->len;
+	descs = (struct vring_desc_packed *)(uintptr_t)
+		vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
+	if (unlikely(!descs))
+		return -1;
+
+	if (unlikely(dlen < desc->len)) {
+		/*
+		 * The indirect desc table is not contiguous
+		 * in process VA space, we have to copy it.
+		 */
+		idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
+		if (unlikely(!idescs))
+			return -1;
+
+		descs = idescs;
+	}
+
+	nr_decs =  desc->len / sizeof(struct vring_desc_packed);
+	if (unlikely(nr_decs >= vq->size)) {
+		free_ind_table(idescs);
+		return -1;
+	}
+
+	for (i = 0; i < nr_decs; i++) {
+		if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
+			free_ind_table(idescs);
+			return -1;
+		}
+
+		*len += descs[i].len;
+		if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+						descs[i].addr, descs[i].len,
+						perm)))
+			return -1;
+	}
+	*vec_idx = vec_id;
+
+	if (unlikely(!!idescs))
+		free_ind_table(idescs);
+
+	return 0;
+}
+
+static inline int
+fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+				uint16_t avail_idx, uint16_t *desc_count,
+				struct buf_vector *buf_vec, uint16_t *vec_idx,
+				uint16_t *buf_id, uint16_t *len, uint8_t perm)
+{
+	bool wrap_counter = vq->avail_wrap_counter;
+	struct vring_desc_packed *descs = vq->desc_packed;
+	uint16_t vec_id = *vec_idx;
+
+	if (avail_idx < vq->last_avail_idx)
+		wrap_counter ^= 1;
+
+	if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
+		return -1;
+
+	*desc_count = 0;
+
+	while (1) {
+		if (unlikely(vec_id >= BUF_VECTOR_MAX))
+			return -1;
+
+		*desc_count += 1;
+		*buf_id = descs[avail_idx].index;
+
+		if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
+			if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
+							&descs[avail_idx],
+							&vec_id, buf_vec,
+							len, perm) < 0))
+				return -1;
+		} else {
+			*len += descs[avail_idx].len;
+
+			if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+							descs[avail_idx].addr,
+							descs[avail_idx].len,
+							perm)))
+				return -1;
+		}
+
+		if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
+			break;
+
+		if (++avail_idx >= vq->size) {
+			avail_idx -= vq->size;
+			wrap_counter ^= 1;
+		}
+	}
+
+	*vec_idx = vec_id;
+
+	return 0;
+}
+
 static __rte_always_inline int
 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *m, struct buf_vector *buf_vec,