From patchwork Thu Apr 19 07:07:48 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jens Freimann X-Patchwork-Id: 38481 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9D41F8D89; Thu, 19 Apr 2018 09:08:54 +0200 (CEST) Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 12AAD6D81 for ; Thu, 19 Apr 2018 09:08:51 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.rdu2.redhat.com [10.11.54.6]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id A0A61722ED; Thu, 19 Apr 2018 07:08:50 +0000 (UTC) Received: from localhost (ovpn-117-19.ams2.redhat.com [10.36.117.19]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 282462166BAE; Thu, 19 Apr 2018 07:08:50 +0000 (UTC) From: Jens Freimann To: dev@dpdk.org Cc: tiwei.bie@intel.com, yliu@fridaylinux.org, maxime.coquelin@redhat.com, mst@redhat.com, jens@freimann.org Date: Thu, 19 Apr 2018 09:07:48 +0200 Message-Id: <20180419070751.8933-18-jfreimann@redhat.com> In-Reply-To: <20180419070751.8933-1-jfreimann@redhat.com> References: <20180419070751.8933-1-jfreimann@redhat.com> X-Scanned-By: MIMEDefang 2.78 on 10.11.54.6 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.2]); Thu, 19 Apr 2018 07:08:50 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.2]); Thu, 19 Apr 2018 07:08:50 +0000 (UTC) for IP:'10.11.54.6' DOMAIN:'int-mx06.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'jfreimann@redhat.com' RCPT:'' Subject: [dpdk-dev] [PATCH v4 17/20] vhost: add support for mergeable buffers with packed virtqueues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Jens Freimann --- lib/librte_vhost/virtio_net.c | 143 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 116 insertions(+), 27 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 1c7664a7d..e6e75f9a3 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -401,17 +401,53 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, } static __rte_always_inline int -fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t avail_idx, uint32_t *vec_idx, - struct buf_vector *buf_vec, uint16_t *desc_chain_head, - uint16_t *desc_chain_len) +__fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, + uint32_t *len, uint32_t *vec_id) +{ + uint16_t idx = vq->last_avail_idx; + struct vring_desc_packed *descs = vq->desc_packed; + uint32_t _vec_id = *vec_id; + + if (vq->desc_packed[idx].flags & VRING_DESC_F_INDIRECT) { + descs = (struct vring_desc_packed *)(uintptr_t) + vhost_iova_to_vva(dev, vq, vq->desc_packed[idx].addr, + vq->desc_packed[idx].len, + VHOST_ACCESS_RO); + if (unlikely(!descs)) + return -1; + + idx = 0; + } + + while (1) { + if (unlikely(_vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) + return -1; + + *len += descs[idx].len; + buf_vec[_vec_id].buf_addr = descs[idx].addr; + buf_vec[_vec_id].buf_len = descs[idx].len; + buf_vec[_vec_id].desc_idx = idx; + _vec_id++; + + if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) + break; + + idx = increase_index(idx, vq->size); + } + *vec_id = _vec_id; + + return 0; +} + +static __rte_always_inline int +__fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, + uint32_t *len, uint32_t *vec_id, uint32_t avail_idx) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; - uint32_t vec_id = *vec_idx; - uint32_t len = 0; struct vring_desc *descs = vq->desc; - - *desc_chain_head = idx; + uint32_t _vec_id = *vec_id; if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { descs = (struct vring_desc *)(uintptr_t) @@ -425,20 +461,51 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, } while (1) { - if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) + if (unlikely(_vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) return -1; - len += descs[idx].len; - buf_vec[vec_id].buf_addr = descs[idx].addr; - buf_vec[vec_id].buf_len = descs[idx].len; - buf_vec[vec_id].desc_idx = idx; - vec_id++; + *len += descs[idx].len; + buf_vec[_vec_id].buf_addr = descs[idx].addr; + buf_vec[_vec_id].buf_len = descs[idx].len; + buf_vec[_vec_id].desc_idx = idx; + _vec_id++; if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) break; idx = descs[idx].next; } + *vec_id = _vec_id; + + return 0; +} + +static __rte_always_inline int +fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t avail_idx, uint32_t *vec_idx, + struct buf_vector *buf_vec, uint16_t *desc_chain_head, + uint16_t *desc_chain_len) +{ + uint16_t idx; + uint32_t vec_id = *vec_idx; + uint32_t len = 0; + + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) + idx = vq->last_avail_idx; + else + idx = vq->avail->ring[avail_idx & (vq->size - 1)]; + + *desc_chain_head = idx; + + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { + if (__fill_vec_buf_packed(dev, vq, + buf_vec, &len, &vec_id)) + return -1; + } else { + if (__fill_vec_buf_split(dev, vq, + buf_vec, &len, &vec_id, avail_idx)) + return -1; + } *desc_chain_len = len; *vec_idx = vec_id; @@ -465,14 +532,16 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, cur_idx = vq->last_avail_idx; while (size > 0) { - if (unlikely(cur_idx == avail_head)) + if (unlikely(cur_idx == avail_head) && + !(dev->features & (1ull < VIRTIO_F_RING_PACKED))) return -1; if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec, &head_idx, &len) < 0)) return -1; len = RTE_MIN(len, size); - update_shadow_used_ring(vq, head_idx, len); + if (!vq_is_packed(dev)) + update_shadow_used_ring(vq, head_idx, len); size -= len; cur_idx++; @@ -620,6 +689,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; + uint16_t i; + struct vring_desc_packed *descs = NULL; VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { @@ -634,7 +705,6 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, if (unlikely(vq->enabled == 0)) goto out_access_unlock; - if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); @@ -648,10 +718,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, vq->batch_copy_nb_elems = 0; - rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); - - vq->shadow_used_idx = 0; - avail_head = *((volatile uint16_t *)&vq->avail->idx); + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { + avail_head = vq->last_avail_idx; + descs = vq->desc_packed; + } else { + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & + (vq->size - 1)]); + avail_head = *((volatile uint16_t *)&vq->avail->idx); + vq->shadow_used_idx = 0; + } for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; @@ -661,7 +736,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, VHOST_LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); - vq->shadow_used_idx -= num_buffers; + + if (!dev->features & (1ULL & VIRTIO_F_RING_PACKED)) + vq->shadow_used_idx -= num_buffers; break; } @@ -671,7 +748,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx], buf_vec, num_buffers) < 0) { - vq->shadow_used_idx -= num_buffers; + if (!dev->features & (1ULL & VIRTIO_F_RING_PACKED)) + vq->shadow_used_idx -= num_buffers; break; } @@ -680,9 +758,18 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, do_data_copy_enqueue(dev, vq); - if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring(dev, vq); - vhost_vring_call(dev, vq); + if (!(dev->features & (1ULL << VIRTIO_F_RING_PACKED))) { + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring(dev, vq); + vhost_vring_call(dev, vq); + } + } else { + rte_smp_wmb(); + for (i = avail_head; i < vq->last_avail_idx; i++) { + if ((i & (vq->size - 1)) == 0) + toggle_wrap_counter(vq); + set_desc_used(vq, &descs[i & (vq->size - 1)]); + } } out: @@ -773,7 +860,7 @@ vhost_enqueue_burst_packed(struct virtio_net *dev, uint16_t queue_id, goto out; } - idx = (idx+1) & (vq->size - 1); + idx = (idx + 1) & mask; desc = &descs[idx]; if (unlikely(!desc_is_avail(vq, desc))) goto out; @@ -842,6 +929,8 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id, if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) return virtio_dev_merge_rx(dev, queue_id, pkts, count); + else if (vq_is_packed(dev)) + return vhost_enqueue_burst_packed(dev, queue_id, pkts, count); else return virtio_dev_rx(dev, queue_id, pkts, count); }