From patchwork Thu Oct 6 17:00:41 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 16423 X-Patchwork-Delegate: yuanhan.liu@linux.intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 2A9FD6AE0; Thu, 6 Oct 2016 19:00:49 +0200 (CEST) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 119386A87 for ; Thu, 6 Oct 2016 19:00:47 +0200 (CEST) Received: from int-mx13.intmail.prod.int.phx2.redhat.com (int-mx13.intmail.prod.int.phx2.redhat.com [10.5.11.26]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 6F9FC4E02A; Thu, 6 Oct 2016 17:00:46 +0000 (UTC) Received: from max-t460s.redhat.com (vpn1-6-251.ams2.redhat.com [10.36.6.251]) by int-mx13.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u96H0hQI001412; Thu, 6 Oct 2016 13:00:43 -0400 From: Maxime Coquelin To: yuanhan.liu@linux.intel.com, dev@dpdk.org Cc: mst@redhat.com, jianfeng.tan@intel.com, olivier.matz@6wind.com, stephen@networkplumber.org, Maxime Coquelin Date: Thu, 6 Oct 2016 19:00:41 +0200 Message-Id: <1475773241-5714-1-git-send-email-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.26 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.38]); Thu, 06 Oct 2016 17:00:46 +0000 (UTC) Subject: [dpdk-dev] [PATCH] vhost: Only access header if offloading is supported in dequeue path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" If offloading features are not negotiated, parsing the virtio header is not needed. Micro-benchmark with testpmd shows that the gain is +4% with indirect descriptors, +1% when using direct descriptors. Signed-off-by: Maxime Coquelin --- lib/librte_vhost/virtio_net.c | 47 ++++++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index a59c39b..5d51693 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -548,6 +548,18 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id, return virtio_dev_rx(dev, queue_id, pkts, count); } +static inline bool +virtio_net_with_host_offload(struct virtio_net *dev) +{ + if (dev->features & + (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN | + VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | + VIRTIO_NET_F_HOST_UFO)) + return true; + + return false; +} + static void parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) { @@ -600,6 +612,9 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) void *l4_hdr = NULL; struct tcp_hdr *tcp_hdr = NULL; + if (hdr->flags == 0 || hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) + return; + parse_ethernet(m, &l4_proto, &l4_hdr); if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (hdr->csum_start == (m->l2_len + m->l3_len)) { @@ -684,12 +699,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, struct rte_mempool *mbuf_pool) { struct vring_desc *desc; - uint64_t desc_addr; + uint64_t desc_addr = 0; uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; struct rte_mbuf *cur = m, *prev = m; - struct virtio_net_hdr *hdr; + struct virtio_net_hdr *hdr = NULL; /* A counter to avoid desc dead loop chain */ uint32_t nr_desc = 1; @@ -698,12 +713,14 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, (desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); - if (unlikely(!desc_addr)) - return -1; + if (virtio_net_with_host_offload(dev)) { + desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; - hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); - rte_prefetch0(hdr); + hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); + rte_prefetch0(hdr); + } /* * A virtio driver normally uses at least 2 desc buffers @@ -720,18 +737,24 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(!desc_addr)) return -1; - rte_prefetch0((void *)(uintptr_t)desc_addr); - desc_offset = 0; desc_avail = desc->len; nr_desc += 1; - - PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0); } else { + if (!desc_addr) { + desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; + } + desc_avail = desc->len - dev->vhost_hlen; desc_offset = dev->vhost_hlen; } + rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset)); + + PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0); + mbuf_offset = 0; mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; while (1) { @@ -795,7 +818,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, prev->data_len = mbuf_offset; m->pkt_len += mbuf_offset; - if (hdr->flags != 0 || hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) + if (virtio_net_with_host_offload(dev)) vhost_dequeue_offload(hdr, m); return 0;