From patchwork Thu Apr 19 07:07:46 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jens Freimann X-Patchwork-Id: 38479 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A761B7EB0; Thu, 19 Apr 2018 09:08:49 +0200 (CEST) Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 60F5D8019 for ; Thu, 19 Apr 2018 09:08:47 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.rdu2.redhat.com [10.11.54.3]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id E8B9E722ED; Thu, 19 Apr 2018 07:08:46 +0000 (UTC) Received: from localhost (ovpn-117-19.ams2.redhat.com [10.36.117.19]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 7F1CF10F1BFA; Thu, 19 Apr 2018 07:08:46 +0000 (UTC) From: Jens Freimann To: dev@dpdk.org Cc: tiwei.bie@intel.com, yliu@fridaylinux.org, maxime.coquelin@redhat.com, mst@redhat.com, jens@freimann.org Date: Thu, 19 Apr 2018 09:07:46 +0200 Message-Id: <20180419070751.8933-16-jfreimann@redhat.com> In-Reply-To: <20180419070751.8933-1-jfreimann@redhat.com> References: <20180419070751.8933-1-jfreimann@redhat.com> X-Scanned-By: MIMEDefang 2.78 on 10.11.54.3 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.2]); Thu, 19 Apr 2018 07:08:46 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.2]); Thu, 19 Apr 2018 07:08:46 +0000 (UTC) for IP:'10.11.54.3' DOMAIN:'int-mx03.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'jfreimann@redhat.com' RCPT:'' Subject: [dpdk-dev] [PATCH v4 15/20] vhost: dequeue for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Implement code to dequeue and process descriptors from the vring if VIRTIO_F_RING_PACKED is enabled. Check if descriptor was made available by driver by looking at VIRTIO_F_DESC_AVAIL flag in descriptor. If so dequeue and set the used flag VIRTIO_F_DESC_USED to the current value of the used wrap counter. Used ring wrap counter needs to be toggled when last descriptor is written out. This allows the host/guest to detect new descriptors even after the ring has wrapped. Signed-off-by: Jens Freimann --- lib/librte_vhost/vhost.c | 1 + lib/librte_vhost/virtio_net.c | 220 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 221 insertions(+) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 5ddf55ed9..f7989cfbd 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -185,6 +185,7 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + vq->used_wrap_counter = 1; vhost_user_iotlb_init(dev, vring_idx); /* Backends are set to -1 indicating an inactive device. */ diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index ed7198dbb..9a9ff92f9 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -19,6 +19,7 @@ #include "iotlb.h" #include "vhost.h" +#include "virtio-packed.h" #define MAX_PKT_BURST 32 @@ -1118,6 +1119,221 @@ restore_mbuf(struct rte_mbuf *m) } } +static inline uint16_t +dequeue_desc_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf *m, + struct vring_desc_packed *descs) +{ + struct vring_desc_packed *desc; + uint64_t desc_addr; + uint32_t desc_avail, desc_offset; + uint32_t mbuf_avail, mbuf_offset; + uint32_t cpy_len; + struct rte_mbuf *cur = m, *prev = m; + struct virtio_net_hdr *hdr = NULL; + uint16_t head_idx = vq->last_used_idx & (vq->size - 1); + int wrap_counter = vq->used_wrap_counter; + int rc = 0; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + desc = &descs[vq->last_used_idx & (vq->size - 1)]; + if (unlikely((desc->len < dev->vhost_hlen)) || + (desc->flags & VRING_DESC_F_INDIRECT)) { + RTE_LOG(ERR, VHOST_DATA, + "INDIRECT not supported yet\n"); + rc = -1; + goto out; + } + + desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, + sizeof(*desc), VHOST_ACCESS_RO); + + if (unlikely(!desc_addr)) { + rc = -1; + goto out; + } + + if (virtio_net_with_host_offload(dev)) { + hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); + rte_prefetch0(hdr); + } + + /* + * A virtio driver normally uses at least 2 desc buffers + * for Tx: the first for storing the header, and others + * for storing the data. + */ + if (likely((desc->len == dev->vhost_hlen) && + (desc->flags & VRING_DESC_F_NEXT) != 0)) { + if ((++vq->last_used_idx & (vq->size - 1)) == 0) + toggle_wrap_counter(vq); + + desc = &descs[vq->last_used_idx & (vq->size - 1)]; + + desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, + sizeof(*desc), VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + rc = -1; + goto out; + } + + desc_offset = 0; + desc_avail = desc->len; + } else { + desc_avail = desc->len - dev->vhost_hlen; + desc_offset = dev->vhost_hlen; + } + + rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset)); + + PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0); + + mbuf_offset = 0; + mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; + while (1) { + uint64_t hpa; + + cpy_len = RTE_MIN(desc_avail, mbuf_avail); + + /* + * A desc buf might across two host physical pages that are + * not continuous. In such case (gpa_to_hpa returns 0), data + * will be copied even though zero copy is enabled. + */ + if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev, + desc->addr + desc_offset, cpy_len)))) { + cur->data_len = cpy_len; + cur->data_off = 0; + cur->buf_addr = (void *)(uintptr_t)desc_addr; + cur->buf_physaddr = hpa; + + /* + * In zero copy mode, one mbuf can only reference data + * for one or partial of one desc buff. + */ + mbuf_avail = cpy_len; + } else { + rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, + mbuf_offset), + (void *)((uintptr_t)(desc_addr + desc_offset)), + cpy_len); + } + + mbuf_avail -= cpy_len; + mbuf_offset += cpy_len; + desc_avail -= cpy_len; + desc_offset += cpy_len; + + /* This desc reaches to its end, get the next one */ + if (desc_avail == 0) { + if ((desc->flags & VRING_DESC_F_NEXT) == 0) + break; + + if ((++vq->last_used_idx & (vq->size - 1)) == 0) + toggle_wrap_counter(vq); + + desc = &descs[vq->last_used_idx & (vq->size - 1)]; + + desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, + sizeof(*desc), + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) + break; + + rte_prefetch0((void *)(uintptr_t)desc_addr); + + desc_offset = 0; + desc_avail = desc->len; + + PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0); + } + + /* + * This mbuf reaches to its end, get a new one + * to hold more data. + */ + if (mbuf_avail == 0) { + cur = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(cur == NULL)) { + RTE_LOG(ERR, VHOST_DATA, "Failed to " + "allocate memory for mbuf.\n"); + break; + } + + prev->next = cur; + prev->data_len = mbuf_offset; + m->nb_segs += 1; + m->pkt_len += mbuf_offset; + prev = cur; + + mbuf_offset = 0; + mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM; + } + } + + if (hdr) + vhost_dequeue_offload(hdr, m); + + if ((++vq->last_used_idx & (vq->size - 1)) == 0) + toggle_wrap_counter(vq); + + rte_smp_wmb(); + _set_desc_used(&descs[head_idx], wrap_counter); + + prev->data_len = mbuf_offset; + m->pkt_len += mbuf_offset; + +out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + + return rc; +} + +static inline uint16_t +vhost_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, + uint16_t count) +{ + uint16_t i = 0; + uint16_t idx; + struct vring_desc_packed *desc = vq->desc_packed; + int err; + + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0)) + goto out; + + count = RTE_MIN(MAX_PKT_BURST, count); + + for (i = 0; i < count; i++) { + idx = vq->last_used_idx & (vq->size - 1); + if (!desc_is_avail(vq, &desc[idx])) + break; + rte_smp_rmb(); + + pkts[i] = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(pkts[i] == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + break; + } + + err = dequeue_desc_packed(dev, vq, mbuf_pool, pkts[i], desc); + if (unlikely(err)) { + rte_pktmbuf_free(pkts[i]); + break; + } + } + +out: + rte_spinlock_unlock(&vq->access_lock); + + return i; +} + uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) @@ -1150,6 +1366,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vq = dev->virtqueue[queue_id]; + if (vq_is_packed(dev)) + return vhost_dequeue_burst_packed(dev, vq, mbuf_pool, + pkts, count); + if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) return 0;