From patchwork Tue Jul 4 09:49:15 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 26350 X-Patchwork-Delegate: yuanhan.liu@linux.intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 4A3847CD0; Tue, 4 Jul 2017 11:50:26 +0200 (CEST) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id EBDB27CCE for ; Tue, 4 Jul 2017 11:50:24 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 3FC876B251; Tue, 4 Jul 2017 09:50:24 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 3FC876B251 Authentication-Results: ext-mx01.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx01.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=maxime.coquelin@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com 3FC876B251 Received: from localhost.localdomain (ovpn-112-47.ams2.redhat.com [10.36.112.47]) by smtp.corp.redhat.com (Postfix) with ESMTP id AB41163F69; Tue, 4 Jul 2017 09:50:21 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, Yuanhan Liu Cc: mst@redhat.com, vkaplans@redhat.com, jasowang@redhat.com, jfreiman@redhat.com, Maxime Coquelin Date: Tue, 4 Jul 2017 11:49:15 +0200 Message-Id: <20170704094922.11405-13-maxime.coquelin@redhat.com> In-Reply-To: <20170704094922.11405-1-maxime.coquelin@redhat.com> References: <20170704094922.11405-1-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.11 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.25]); Tue, 04 Jul 2017 09:50:24 +0000 (UTC) Subject: [dpdk-dev] [RFC 12/19] vhost: use the guest IOVA to host VA helper X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replace rte_vhost_gpa_to_vva() calls with vhost_iova_to_vva(), which requires to also pass the mapped len and the access permissions needed. Signed-off-by: Maxime Coquelin --- lib/librte_vhost/virtio_net.c | 71 +++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 726d349..62c231a 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -154,8 +154,9 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) } static __rte_always_inline int -copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, - struct rte_mbuf *m, uint16_t desc_idx, uint32_t size) +copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *descs, struct rte_mbuf *m, + uint16_t desc_idx, uint32_t size) { uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; @@ -166,7 +167,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, uint16_t nr_desc = 1; desc = &descs[desc_idx]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, + desc->len, VHOST_ACCESS_RW); /* * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid * performance issue with some versions of gcc (4.8.4 and 5.3.0) which @@ -205,7 +207,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, return -1; desc = &descs[desc->next]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, + desc->len, + VHOST_ACCESS_RW); if (unlikely(!desc_addr)) return -1; @@ -290,8 +294,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_idx].addr); + vhost_iova_to_vva(dev, + vq, vq->desc[desc_idx].addr, + vq->desc[desc_idx].len, + VHOST_ACCESS_RO); if (unlikely(!descs)) { count = i; break; @@ -304,7 +310,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, sz = vq->size; } - err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz); + err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz); if (unlikely(err)) { used_idx = (start_idx + i) & (vq->size - 1); vq->used->ring[used_idx].len = dev->vhost_hlen; @@ -350,7 +356,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr); + vhost_iova_to_vva(dev, vq, vq->desc[idx].addr, + vq->desc[idx].len, + VHOST_ACCESS_RO); if (unlikely(!descs)) return -1; @@ -425,8 +433,9 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, } static __rte_always_inline int -copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, - struct buf_vector *buf_vec, uint16_t num_buffers) +copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf *m, struct buf_vector *buf_vec, + uint16_t num_buffers) { uint32_t vec_idx = 0; uint64_t desc_addr; @@ -439,7 +448,9 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, if (unlikely(m == NULL)) return -1; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr); + desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr, + buf_vec[vec_idx].buf_len, + VHOST_ACCESS_RW); if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) return -1; @@ -460,8 +471,11 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, /* done with current desc buf, get the next one */ if (desc_avail == 0) { vec_idx++; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, - buf_vec[vec_idx].buf_addr); + desc_addr = + vhost_iova_to_vva(dev, vq, + buf_vec[vec_idx].buf_addr, + buf_vec[vec_idx].buf_len, + VHOST_ACCESS_RW); if (unlikely(!desc_addr)) return -1; @@ -558,7 +572,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); - if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx], + if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx], buf_vec, num_buffers) < 0) { vq->shadow_used_idx -= num_buffers; break; @@ -755,8 +769,9 @@ put_zmbuf(struct zcopy_mbuf *zmbuf) } static __rte_always_inline int -copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, - uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx, +copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *descs, uint16_t max_desc, + struct rte_mbuf *m, uint16_t desc_idx, struct rte_mempool *mbuf_pool) { struct vring_desc *desc; @@ -774,7 +789,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, (desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_addr = vhost_iova_to_vva(dev, + vq, desc->addr, + desc->len, + VHOST_ACCESS_RO); if (unlikely(!desc_addr)) return -1; @@ -794,7 +812,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_addr = vhost_iova_to_vva(dev, + vq, desc->addr, + desc->len, + VHOST_ACCESS_RO); if (unlikely(!desc_addr)) return -1; @@ -858,7 +879,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_addr = vhost_iova_to_vva(dev, + vq, desc->addr, + desc->len, + VHOST_ACCESS_RO); if (unlikely(!desc_addr)) return -1; @@ -1104,8 +1128,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { desc = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_indexes[i]].addr); + vhost_iova_to_vva(dev, vq, + vq->desc[desc_indexes[i]].addr, + sizeof(*desc), + VHOST_ACCESS_RO); if (unlikely(!desc)) break; @@ -1125,7 +1151,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, break; } - err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool); + err = copy_desc_to_mbuf(dev, vq, desc, sz, + pkts[i], idx, mbuf_pool); if (unlikely(err)) { rte_pktmbuf_free(pkts[i]); break;