From patchwork Wed Sep 25 17:13:15 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59713 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B73981B994; Wed, 25 Sep 2019 11:32:36 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id B97822AA0 for ; Wed, 25 Sep 2019 11:32:31 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986134" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:28 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:15 +0800 Message-Id: <20190925171329.63734-2-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 01/15] vhost: add single packet enqueue function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add vhost enqueue function for single packet and meanwhile left space for flush used ring function. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 5b85b832d..520c4c6a8 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -774,6 +774,58 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, return error; } +static __rte_always_inline int +vhost_enqueue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf *pkt, struct buf_vector *buf_vec, uint16_t *nr_descs) +{ + uint16_t nr_vec = 0; + uint16_t avail_idx = vq->last_avail_idx; + uint16_t max_tries, tries = 0; + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count; + uint32_t size = pkt->pkt_len + dev->vhost_hlen; + uint16_t num_buffers = 0; + + if (rxvq_is_mergeable(dev)) + max_tries = vq->size - 1; + else + max_tries = 1; + + while (size > 0) { + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; + + if (unlikely(fill_vec_buf_packed(dev, vq, + avail_idx, &desc_count, + buf_vec, &nr_vec, + &buf_id, &len, + VHOST_ACCESS_RW) < 0)) + return -1; + + len = RTE_MIN(len, size); + size -= len; + + num_buffers += 1; + + *nr_descs += desc_count; + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; + } + + if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0) + return -1; + + return 0; +} + + static __rte_noinline uint32_t virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) @@ -831,6 +883,36 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return pkt_idx; } +static __rte_unused int16_t +virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf *pkt) +{ + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t nr_descs = 0; + + rte_smp_rmb(); + if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec, + &nr_descs) < 0)) { + VHOST_LOG_DEBUG(VHOST_DATA, + "(%d) failed to get enough desc from vring\n", + dev->vid); + return -1; + } + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + dev->vid, vq->last_avail_idx, + vq->last_avail_idx + nr_descs); + + vq->last_avail_idx += nr_descs; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + return 0; +} + + static __rte_noinline uint32_t virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) From patchwork Wed Sep 25 17:13:16 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59714 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0CC1E1BE82; Wed, 25 Sep 2019 11:32:43 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 8A3D42AA0 for ; Wed, 25 Sep 2019 11:32:32 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986143" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:30 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:16 +0800 Message-Id: <20190925171329.63734-3-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 02/15] vhost: unify unroll pragma parameter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add macro for unifying Clang/ICC/GCC unroll pragma format. Batch functions were contained of several small loops which optimized by compiler’s loop unrolling pragma. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile index 8623e91c0..30839a001 100644 --- a/lib/librte_vhost/Makefile +++ b/lib/librte_vhost/Makefile @@ -16,6 +16,24 @@ CFLAGS += -I vhost_user CFLAGS += -fno-strict-aliasing LDLIBS += -lpthread +ifeq ($(RTE_TOOLCHAIN), gcc) +ifeq ($(shell test $(GCC_VERSION) -ge 83 && echo 1), 1) +CFLAGS += -DSUPPORT_GCC_UNROLL_PRAGMA +endif +endif + +ifeq ($(RTE_TOOLCHAIN), clang) +ifeq ($(shell test $(CLANG_MAJOR_VERSION)$(CLANG_MINOR_VERSION) -ge 37 && echo 1), 1) +CFLAGS += -DSUPPORT_CLANG_UNROLL_PRAGMA +endif +endif + +ifeq ($(RTE_TOOLCHAIN), icc) +ifeq ($(shell test $(ICC_MAJOR_VERSION) -ge 16 && echo 1), 1) +CFLAGS += -DSUPPORT_ICC_UNROLL_PRAGMA +endif +endif + ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y) LDLIBS += -lnuma endif diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 884befa85..4cba8c5ef 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,24 @@ #define VHOST_LOG_CACHE_NR 32 +#ifdef SUPPORT_GCC_UNROLL_PRAGMA +#define UNROLL_PRAGMA_PARAM "GCC unroll 4" +#endif + +#ifdef SUPPORT_CLANG_UNROLL_PRAGMA +#define UNROLL_PRAGMA_PARAM "unroll 4" +#endif + +#ifdef SUPPORT_ICC_UNROLL_PRAGMA +#define UNROLL_PRAGMA_PARAM "unroll (4)" +#endif + +#ifdef UNROLL_PRAGMA_PARAM +#define UNROLL_PRAGMA(param) _Pragma(param) +#else +#define UNROLL_PRAGMA(param) do {} while (0); +#endif + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. From patchwork Wed Sep 25 17:13:17 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59715 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D11C61BE92; Wed, 25 Sep 2019 11:32:45 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 768042BD5 for ; Wed, 25 Sep 2019 11:32:33 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986154" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:31 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:17 +0800 Message-Id: <20190925171329.63734-4-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 03/15] vhost: add batch enqueue function for packed ring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Batch enqueue function will first check whether descriptors are cache aligned. It will also check prerequisites in the beginning. Batch enqueue function not support chained mbufs, single packet enqueue function will handle it. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 4cba8c5ef..e241436c7 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,10 @@ #define VHOST_LOG_CACHE_NR 32 +#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ + sizeof(struct vring_packed_desc)) +#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) + #ifdef SUPPORT_GCC_UNROLL_PRAGMA #define UNROLL_PRAGMA_PARAM "GCC unroll 4" #endif diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 520c4c6a8..5e08f7d9b 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -883,6 +883,86 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return pkt_idx; } +static __rte_unused int +virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf **pkts) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint16_t avail_idx = vq->last_avail_idx; + uint64_t desc_addrs[PACKED_BATCH_SIZE]; + struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE]; + uint32_t buf_offset = dev->vhost_hlen; + uint64_t lens[PACKED_BATCH_SIZE]; + uint16_t i; + + if (unlikely(avail_idx & PACKED_BATCH_MASK)) + return -1; + + if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size)) + return -1; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely(pkts[i]->next != NULL)) + return -1; + if (unlikely(!desc_is_avail(&descs[avail_idx + i], + wrap_counter))) + return -1; + } + + rte_smp_rmb(); + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + lens[i] = descs[avail_idx + i].len; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset))) + return -1; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + desc_addrs[i] = vhost_iova_to_vva(dev, vq, + descs[avail_idx + i].addr, + &lens[i], + VHOST_ACCESS_RW); + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely(lens[i] != descs[avail_idx + i].len)) + return -1; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + rte_prefetch0((void *)(uintptr_t)desc_addrs[i]); + hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *) + (uintptr_t)desc_addrs[i]; + lens[i] = pkts[i]->pkt_len + dev->vhost_hlen; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr); + + vq->last_avail_idx += PACKED_BATCH_SIZE; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset), + rte_pktmbuf_mtod_offset(pkts[i], void *, 0), + pkts[i]->pkt_len); + } + + return 0; +} + static __rte_unused int16_t virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf *pkt) From patchwork Wed Sep 25 17:13:18 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59716 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 388711BE99; Wed, 25 Sep 2019 11:32:49 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 0F2A22BD5 for ; Wed, 25 Sep 2019 11:32:34 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986167" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:33 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:18 +0800 Message-Id: <20190925171329.63734-5-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 04/15] vhost: add single packet dequeue function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add vhost single packet dequeue function for packed ring and meanwhile left space for shadow used ring update function. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 5e08f7d9b..17aabe8eb 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1571,6 +1571,60 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return i; } +static __rte_always_inline int +vhost_dequeue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t *buf_id, + uint16_t *desc_count) +{ + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint32_t dummy_len; + uint16_t nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, desc_count, + buf_vec, &nr_vec, + buf_id, &dummy_len, + VHOST_ACCESS_RO) < 0)) + return -1; + + *pkts = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(*pkts == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + return -1; + } + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts, + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(*pkts); + return -1; + } + + return 0; +} + +static __rte_unused int +virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) +{ + + uint16_t buf_id, desc_count; + + if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id, + &desc_count)) + return -1; + + vq->last_avail_idx += desc_count; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + return 0; +} + static __rte_noinline uint16_t virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) From patchwork Wed Sep 25 17:13:19 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59717 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 74CA51BEA6; Wed, 25 Sep 2019 11:32:52 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 7D9E01B203 for ; Wed, 25 Sep 2019 11:32:36 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986172" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:34 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:19 +0800 Message-Id: <20190925171329.63734-6-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 05/15] vhost: add batch dequeue function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add batch dequeue function like enqueue function for packed ring, batch dequeue function will not support chained descritpors, single packet dequeue function will handle it. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index e241436c7..e50e137ca 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -61,6 +61,8 @@ #define UNROLL_PRAGMA(param) do {} while (0); #endif +#define PACKED_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | VRING_DESC_F_INDIRECT) + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 17aabe8eb..2ff7329b2 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1571,6 +1571,119 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return i; } +static __rte_always_inline int +vhost_dequeue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, + uint16_t avail_idx, uintptr_t *desc_addrs, uint16_t *ids) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint64_t lens[PACKED_BATCH_SIZE]; + uint64_t buf_lens[PACKED_BATCH_SIZE]; + uint32_t buf_offset = dev->vhost_hlen; + uint16_t i; + + if (unlikely(avail_idx & PACKED_BATCH_MASK)) + return -1; + if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size)) + return -1; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely(!desc_is_avail(&descs[avail_idx + i], + wrap_counter))) + return -1; + if (unlikely(descs[avail_idx + i].flags & + PACKED_SINGLE_DEQUEUE_FLAG)) + return -1; + } + + rte_smp_rmb(); + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + lens[i] = descs[avail_idx + i].len; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + desc_addrs[i] = vhost_iova_to_vva(dev, vq, + descs[avail_idx + i].addr, + &lens[i], VHOST_ACCESS_RW); + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely((lens[i] != descs[avail_idx + i].len))) + return -1; + } + + if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, PACKED_BATCH_SIZE)) + return -1; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (unlikely(buf_lens[i] < (lens[i] - buf_offset))) + goto free_buf; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset; + pkts[i]->data_len = pkts[i]->pkt_len; + ids[i] = descs[avail_idx + i].id; + } + + return 0; +free_buf: + for (i = 0; i < PACKED_BATCH_SIZE; i++) + rte_pktmbuf_free(pkts[i]); + + return -1; +} + +static __rte_unused int +virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) +{ + uint16_t avail_idx = vq->last_avail_idx; + uint32_t buf_offset = dev->vhost_hlen; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + struct virtio_net_hdr *hdr; + uint16_t i; + + if (vhost_dequeue_batch_packed(dev, vq, mbuf_pool, pkts, avail_idx, + desc_addrs, ids)) + return -1; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + rte_prefetch0((void *)(uintptr_t)desc_addrs[i]); + rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0), + (void *)(uintptr_t)(desc_addrs[i] + buf_offset), + pkts[i]->pkt_len); + } + + if (virtio_net_with_host_offload(dev)) { + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + hdr = (struct virtio_net_hdr *)(desc_addrs[i]); + vhost_dequeue_offload(hdr, pkts[i]); + } + } + + vq->last_avail_idx += PACKED_BATCH_SIZE; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + return 0; +} + static __rte_always_inline int vhost_dequeue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t *buf_id, From patchwork Wed Sep 25 17:13:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59718 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 629F91BEB2; Wed, 25 Sep 2019 11:32:55 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 17C791B203 for ; Wed, 25 Sep 2019 11:32:37 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986186" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:36 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:20 +0800 Message-Id: <20190925171329.63734-7-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 06/15] vhost: flush vhost enqueue shadow ring by batch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Buffer vhost enqueue shadow ring update, flush shadow ring until buffered descriptors number exceed one batch. Thus virtio can receive packets at a faster frequency. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index e50e137ca..18a207fc6 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -163,6 +163,7 @@ struct vhost_virtqueue { struct vring_used_elem_packed *shadow_used_packed; }; uint16_t shadow_used_idx; + uint16_t enqueue_shadow_count; struct vhost_vring_addr ring_addrs; struct batch_copy_elem *batch_copy_elems; diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 2ff7329b2..f85619dc2 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -169,6 +169,24 @@ update_shadow_used_ring_packed(struct vhost_virtqueue *vq, vq->shadow_used_packed[i].count = count; } +static __rte_always_inline void +update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq, + uint16_t desc_idx, uint32_t len, uint16_t count) +{ + /* enqueue shadow flush action aligned with batch num */ + if (!vq->shadow_used_idx) + vq->enqueue_shadow_count = vq->last_used_idx & + PACKED_BATCH_MASK; + + uint16_t i = vq->shadow_used_idx++; + + vq->shadow_used_packed[i].id = desc_idx; + vq->shadow_used_packed[i].len = len; + vq->shadow_used_packed[i].count = count; + + vq->enqueue_shadow_count += count; +} + static inline void do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -198,6 +216,23 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq) vq->batch_copy_nb_elems = 0; } +static __rte_always_inline void +flush_enqueue_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, uint32_t len[], uint16_t id[], + uint16_t count[], uint16_t num_buffers) +{ + int i; + for (i = 0; i < num_buffers; i++) { + update_enqueue_shadow_used_ring_packed(vq, id[i], len[i], + count[i]); + + if (vq->enqueue_shadow_count >= PACKED_BATCH_SIZE) { + do_data_copy_enqueue(dev, vq); + flush_shadow_used_ring_packed(dev, vq); + } + } +} + /* avoid write operation when necessary, to lessen cache issues */ #define ASSIGN_UNLESS_EQUAL(var, val) do { \ if ((var) != (val)) \ @@ -786,6 +821,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t desc_count; uint32_t size = pkt->pkt_len + dev->vhost_hlen; uint16_t num_buffers = 0; + uint32_t buffer_len[vq->size]; + uint16_t buffer_buf_id[vq->size]; + uint16_t buffer_desc_count[vq->size]; if (rxvq_is_mergeable(dev)) max_tries = vq->size - 1; @@ -811,6 +849,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, len = RTE_MIN(len, size); size -= len; + buffer_len[num_buffers] = len; + buffer_buf_id[num_buffers] = buf_id; + buffer_desc_count[num_buffers] = desc_count; num_buffers += 1; *nr_descs += desc_count; @@ -822,6 +863,8 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0) return -1; + flush_enqueue_packed(dev, vq, buffer_len, buffer_buf_id, + buffer_desc_count, num_buffers); return 0; } From patchwork Wed Sep 25 17:13:21 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59719 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D6E261BEBA; Wed, 25 Sep 2019 11:32:58 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id B24ED1BE82 for ; Wed, 25 Sep 2019 11:32:39 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986202" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:37 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:21 +0800 Message-Id: <20190925171329.63734-8-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 07/15] vhost: add flush function for batch enqueue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Flush used flags when batched enqueue function is finished. Descriptor's flags are pre-calculated as they will be reset by vhost. Signed-off-by: Marvin Liu Reviewed-by: Gavin Hu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 18a207fc6..7bf9ff9b7 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,9 @@ #define VHOST_LOG_CACHE_NR 32 +#define PACKED_RX_USED_FLAG (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \ + | VRING_DESC_F_WRITE) +#define PACKED_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE) #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ sizeof(struct vring_packed_desc)) #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f85619dc2..a629e66d4 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -169,6 +169,49 @@ update_shadow_used_ring_packed(struct vhost_virtqueue *vq, vq->shadow_used_packed[i].count = count; } +static __rte_always_inline void +flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t *lens, uint16_t *ids, uint16_t flags) +{ + uint16_t i; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + vq->desc_packed[vq->last_used_idx + i].len = lens[i]; + } + + rte_smp_wmb(); + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_BATCH_SIZE); + vhost_log_cache_sync(dev, vq); + + vq->last_used_idx += PACKED_BATCH_SIZE; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + +static __rte_always_inline void +flush_enqueue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t *lens, uint16_t *ids) +{ + uint16_t flags = 0; + + if (vq->used_wrap_counter) + flags = PACKED_RX_USED_FLAG; + else + flags = PACKED_RX_USED_WRAP_FLAG; + flush_used_batch_packed(dev, vq, lens, ids, flags); +} + static __rte_always_inline void update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len, uint16_t count) @@ -937,6 +980,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE]; uint32_t buf_offset = dev->vhost_hlen; uint64_t lens[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; uint16_t i; if (unlikely(avail_idx & PACKED_BATCH_MASK)) @@ -1003,6 +1047,12 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, pkts[i]->pkt_len); } + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + ids[i] = descs[avail_idx + i].id; + + flush_enqueue_batch_packed(dev, vq, lens, ids); + return 0; } From patchwork Wed Sep 25 17:13:22 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59720 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7DED21BEB8; Wed, 25 Sep 2019 11:33:02 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 4D86E1B94D for ; Wed, 25 Sep 2019 11:32:41 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986219" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:39 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:22 +0800 Message-Id: <20190925171329.63734-9-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 08/15] vhost: buffer vhost dequeue shadow ring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Buffer used ring updates as many as possible in vhost dequeue function for coordinating with virtio driver. For supporting buffer, shadow used ring element should contain descriptor index and its wrap counter. First shadowed ring index is recorded for calculating buffered number. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 7bf9ff9b7..f62e9ec3f 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -42,6 +42,8 @@ #define PACKED_RX_USED_FLAG (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \ | VRING_DESC_F_WRITE) #define PACKED_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE) +#define PACKED_TX_USED_FLAG (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED) +#define PACKED_TX_USED_WRAP_FLAG (0x0) #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ sizeof(struct vring_packed_desc)) #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) @@ -110,9 +112,11 @@ struct log_cache_entry { }; struct vring_used_elem_packed { + uint16_t used_idx; uint16_t id; uint32_t len; uint32_t count; + uint16_t used_wrap_counter; }; /** @@ -167,6 +171,7 @@ struct vhost_virtqueue { }; uint16_t shadow_used_idx; uint16_t enqueue_shadow_count; + uint16_t dequeue_shadow_head; struct vhost_vring_addr ring_addrs; struct batch_copy_elem *batch_copy_elems; diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index a629e66d4..8f7209f83 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -230,6 +230,41 @@ update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq, vq->enqueue_shadow_count += count; } +static __rte_always_inline void +update_dequeue_shadow_used_ring_packed(struct vhost_virtqueue *vq, + uint16_t buf_id, uint16_t count) +{ + if (!vq->shadow_used_idx) { + vq->dequeue_shadow_head = vq->last_used_idx; + + vq->shadow_used_packed[0].id = buf_id; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = count; + vq->shadow_used_packed[0].used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].used_wrap_counter = + vq->used_wrap_counter; + + vq->shadow_used_idx = 1; + } else { + vq->desc_packed[vq->last_used_idx].id = buf_id; + vq->desc_packed[vq->last_used_idx].len = 0; + + if (vq->used_wrap_counter) + vq->desc_packed[vq->last_used_idx].flags = + PACKED_TX_USED_FLAG; + else + vq->desc_packed[vq->last_used_idx].flags = + PACKED_TX_USED_WRAP_FLAG; + } + + vq->last_used_idx += count; + + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + static inline void do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -1822,6 +1857,8 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, &desc_count)) return -1; + update_dequeue_shadow_used_ring_packed(vq, buf_id, desc_count); + vq->last_avail_idx += desc_count; if (vq->last_avail_idx >= vq->size) { vq->last_avail_idx -= vq->size; From patchwork Wed Sep 25 17:13:23 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59721 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E858E1BED7; Wed, 25 Sep 2019 11:33:05 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id A913D1BE8A for ; Wed, 25 Sep 2019 11:32:43 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986237" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:41 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:23 +0800 Message-Id: <20190925171329.63734-10-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 09/15] vhost: split enqueue and dequeue flush functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Vhost enqueue descriptors are updated by batch number, while vhost dequeue descriptors are buffered. Meanwhile in dequeue function only first descriptor is buffered. Due to these differences, split vhost enqueue and dequeue flush functions. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 8f7209f83..1b0fa2c64 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -92,8 +92,8 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq, } static __rte_always_inline void -flush_shadow_used_ring_packed(struct virtio_net *dev, - struct vhost_virtqueue *vq) +flush_enqueue_shadow_used_ring_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) { int i; uint16_t used_idx = vq->last_used_idx; @@ -158,6 +158,32 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, vhost_log_cache_sync(dev, vq); } +static __rte_always_inline void +flush_dequeue_shadow_used_ring_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + uint16_t head_idx = vq->dequeue_shadow_head; + uint16_t head_flags; + struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0]; + + if (used_elem->used_wrap_counter) + head_flags = PACKED_TX_USED_FLAG; + else + head_flags = PACKED_TX_USED_WRAP_FLAG; + + vq->desc_packed[head_idx].id = used_elem->id; + + rte_smp_wmb(); + vq->desc_packed[head_idx].flags = head_flags; + + vhost_log_cache_used_vring(dev, vq, head_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + static __rte_always_inline void update_shadow_used_ring_packed(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len, uint16_t count) @@ -199,6 +225,47 @@ flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } } +static __rte_always_inline void +update_dequeue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint16_t *ids) +{ + uint16_t flags = 0; + uint16_t i; + + if (vq->used_wrap_counter) + flags = PACKED_TX_USED_FLAG; + else + flags = PACKED_TX_USED_WRAP_FLAG; + + if (!vq->shadow_used_idx) { + vq->dequeue_shadow_head = vq->last_used_idx; + vq->shadow_used_packed[0].id = ids[0]; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = 1; + vq->shadow_used_packed[0].used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].used_wrap_counter = + vq->used_wrap_counter; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 1; i < PACKED_BATCH_SIZE; i++) + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + rte_smp_wmb(); + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 1; i < PACKED_BATCH_SIZE; i++) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vq->shadow_used_idx = 1; + vq->last_used_idx += PACKED_BATCH_SIZE; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } + } else { + uint64_t lens[PACKED_BATCH_SIZE] = {0}; + flush_used_batch_packed(dev, vq, lens, ids, flags); + } +} + static __rte_always_inline void flush_enqueue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t *lens, uint16_t *ids) @@ -306,11 +373,29 @@ flush_enqueue_packed(struct virtio_net *dev, if (vq->enqueue_shadow_count >= PACKED_BATCH_SIZE) { do_data_copy_enqueue(dev, vq); - flush_shadow_used_ring_packed(dev, vq); + flush_enqueue_shadow_used_ring_packed(dev, vq); } } } +static __rte_unused void +flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + if (!vq->shadow_used_idx) + return; + + int16_t shadow_count = vq->last_used_idx - vq->dequeue_shadow_head; + if (shadow_count <= 0) + shadow_count += vq->size; + + /* buffer used descs as many as possible when doing dequeue */ + if ((uint16_t)shadow_count >= (vq->size - MAX_PKT_BURST)) { + do_data_copy_dequeue(vq); + flush_dequeue_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } +} + /* avoid write operation when necessary, to lessen cache issues */ #define ASSIGN_UNLESS_EQUAL(var, val) do { \ if ((var) != (val)) \ @@ -1165,7 +1250,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_enqueue_shadow_used_ring_packed(dev, vq); vhost_vring_call_packed(dev, vq); } @@ -1796,6 +1881,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, pkts[i]->pkt_len); } + update_dequeue_batch_packed(dev, vq, ids); + if (virtio_net_with_host_offload(dev)) { UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) for (i = 0; i < PACKED_BATCH_SIZE; i++) { @@ -1896,7 +1983,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_dequeue_shadow_used_ring_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } @@ -1975,7 +2062,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(i < count)) vq->shadow_used_idx = i; if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_dequeue_shadow_used_ring_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } From patchwork Wed Sep 25 17:13:24 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59722 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B4E4E1BEE0; Wed, 25 Sep 2019 11:33:08 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 39F081BE8E for ; Wed, 25 Sep 2019 11:32:45 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:44 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986252" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:43 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:24 +0800 Message-Id: <20190925171329.63734-11-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 10/15] vhost: optimize enqueue function of packed ring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Optimize vhost device Tx datapath by separate functions. Packets can be filled into one descriptor will be handled by batch and others will be handled one by one as before. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 1b0fa2c64..c485e7f49 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -753,64 +753,6 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -/* - * Returns -1 on fail, 0 on success - */ -static inline int -reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t size, struct buf_vector *buf_vec, - uint16_t *nr_vec, uint16_t *num_buffers, - uint16_t *nr_descs) -{ - uint16_t avail_idx; - uint16_t vec_idx = 0; - uint16_t max_tries, tries = 0; - - uint16_t buf_id = 0; - uint32_t len = 0; - uint16_t desc_count; - - *num_buffers = 0; - avail_idx = vq->last_avail_idx; - - if (rxvq_is_mergeable(dev)) - max_tries = vq->size - 1; - else - max_tries = 1; - - while (size > 0) { - /* - * if we tried all available ring items, and still - * can't get enough buf, it means something abnormal - * happened. - */ - if (unlikely(++tries > max_tries)) - return -1; - - if (unlikely(fill_vec_buf_packed(dev, vq, - avail_idx, &desc_count, - buf_vec, &vec_idx, - &buf_id, &len, - VHOST_ACCESS_RW) < 0)) - return -1; - - len = RTE_MIN(len, size); - update_shadow_used_ring_packed(vq, buf_id, len, desc_count); - size -= len; - - avail_idx += desc_count; - if (avail_idx >= vq->size) - avail_idx -= vq->size; - - *nr_descs += desc_count; - *num_buffers += 1; - } - - *nr_vec = vec_idx; - - return 0; -} - static __rte_noinline void copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, struct buf_vector *buf_vec, @@ -1089,7 +1031,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return pkt_idx; } -static __rte_unused int +static __rte_always_inline int virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts) { @@ -1176,7 +1118,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -static __rte_unused int16_t +static __rte_always_inline int16_t virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf *pkt) { @@ -1205,52 +1147,36 @@ virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } - static __rte_noinline uint32_t virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) { uint32_t pkt_idx = 0; - uint16_t num_buffers; - struct buf_vector buf_vec[BUF_VECTOR_MAX]; - - for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { - uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; - uint16_t nr_vec = 0; - uint16_t nr_descs = 0; - - if (unlikely(reserve_avail_buf_packed(dev, vq, - pkt_len, buf_vec, &nr_vec, - &num_buffers, &nr_descs) < 0)) { - VHOST_LOG_DEBUG(VHOST_DATA, - "(%d) failed to get enough desc from vring\n", - dev->vid); - vq->shadow_used_idx -= num_buffers; - break; + uint32_t remained = count; + + do { + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & + (vq->size - 1)]); + if (remained >= PACKED_BATCH_SIZE) { + if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) { + pkt_idx += PACKED_BATCH_SIZE; + remained -= PACKED_BATCH_SIZE; + continue; + } } - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", - dev->vid, vq->last_avail_idx, - vq->last_avail_idx + num_buffers); - - if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], - buf_vec, nr_vec, - num_buffers) < 0) { - vq->shadow_used_idx -= num_buffers; + if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx])) break; - } - vq->last_avail_idx += nr_descs; - if (vq->last_avail_idx >= vq->size) { - vq->last_avail_idx -= vq->size; - vq->avail_wrap_counter ^= 1; - } - } - - do_data_copy_enqueue(dev, vq); + pkt_idx++; + remained--; + } while (pkt_idx < count); - if (likely(vq->shadow_used_idx)) { - flush_enqueue_shadow_used_ring_packed(dev, vq); + if (pkt_idx) { + if (vq->shadow_used_idx) { + do_data_copy_enqueue(dev, vq); + flush_enqueue_shadow_used_ring_packed(dev, vq); + } vhost_vring_call_packed(dev, vq); } From patchwork Wed Sep 25 17:13:25 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59724 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4DEFF1BEF3; Wed, 25 Sep 2019 11:33:13 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id DEEAB1BEC1 for ; Wed, 25 Sep 2019 11:32:59 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:46 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986267" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:45 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:25 +0800 Message-Id: <20190925171329.63734-12-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 11/15] vhost: add batch and single zero dequeue functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Optimize vhost zero copy dequeue path like normal dequeue path. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index c485e7f49..9ab95763a 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1881,6 +1881,141 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } +static __rte_unused int +virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE]; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; + + uint16_t avail_idx = vq->last_avail_idx; + + if (vhost_dequeue_batch_packed(dev, vq, mbuf_pool, pkts, avail_idx, + desc_addrs, ids)) + return -1; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + zmbufs[i] = get_zmbuf(vq); + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + if (!zmbufs[i]) + goto free_pkt; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + zmbufs[i]->mbuf = pkts[i]; + zmbufs[i]->desc_idx = avail_idx + i; + zmbufs[i]->desc_count = 1; + } + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + rte_mbuf_refcnt_update(pkts[i], 1); + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next); + + vq->nr_zmbuf += PACKED_BATCH_SIZE; + vq->last_avail_idx += PACKED_BATCH_SIZE; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + return 0; + +free_pkt: + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + rte_pktmbuf_free(pkts[i]); + + return -1; +} + +static __rte_unused int +virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev, + struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + uint16_t buf_id, desc_count; + struct zcopy_mbuf *zmbuf; + + if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id, + &desc_count)) + return -1; + + zmbuf = get_zmbuf(vq); + if (!zmbuf) { + rte_pktmbuf_free(*pkts); + return -1; + } + zmbuf->mbuf = *pkts; + zmbuf->desc_idx = vq->last_avail_idx; + zmbuf->desc_count = desc_count; + + rte_mbuf_refcnt_update(*pkts, 1); + + vq->nr_zmbuf += 1; + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); + + vq->last_avail_idx += desc_count; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + return 0; +} + +static __rte_always_inline void +free_zmbuf(struct vhost_virtqueue *vq) +{ + struct zcopy_mbuf *next = NULL; + struct zcopy_mbuf *zmbuf; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + uint16_t last_used_idx = vq->last_used_idx; + + if (mbuf_is_consumed(zmbuf->mbuf)) { + uint16_t flags = 0; + + if (vq->used_wrap_counter) + flags = PACKED_TX_USED_FLAG; + else + flags = PACKED_TX_USED_WRAP_FLAG; + + vq->desc_packed[last_used_idx].id = zmbuf->desc_idx; + vq->desc_packed[last_used_idx].len = 0; + + rte_smp_wmb(); + vq->desc_packed[last_used_idx].flags = flags; + + vq->last_used_idx += zmbuf->desc_count; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } + } +} + static __rte_noinline uint16_t virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) From patchwork Wed Sep 25 17:13:26 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59723 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BF1EC1BEE7; Wed, 25 Sep 2019 11:33:10 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id DD7BB1BEC0 for ; Wed, 25 Sep 2019 11:32:59 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:48 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986289" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:46 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:26 +0800 Message-Id: <20190925171329.63734-13-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 12/15] vhost: optimize dequeue function of packed ring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Optimize vhost device Rx datapath by separate functions. No-chained and direct descriptors will be handled by batch and other will be handled one by one as before. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 9ab95763a..20624efdc 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -184,17 +184,6 @@ flush_dequeue_shadow_used_ring_packed(struct virtio_net *dev, vhost_log_cache_sync(dev, vq); } -static __rte_always_inline void -update_shadow_used_ring_packed(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint32_t len, uint16_t count) -{ - uint16_t i = vq->shadow_used_idx++; - - vq->shadow_used_packed[i].id = desc_idx; - vq->shadow_used_packed[i].len = len; - vq->shadow_used_packed[i].count = count; -} - static __rte_always_inline void flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t *lens, uint16_t *ids, uint16_t flags) @@ -378,7 +367,7 @@ flush_enqueue_packed(struct virtio_net *dev, } } -static __rte_unused void +static __rte_always_inline void flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { if (!vq->shadow_used_idx) @@ -1784,7 +1773,7 @@ vhost_dequeue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; } -static __rte_unused int +static __rte_always_inline int virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) { @@ -1859,7 +1848,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -static __rte_unused int +static __rte_always_inline int virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) { @@ -1881,7 +1870,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -static __rte_unused int +static __rte_always_inline int virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, @@ -1940,7 +1929,7 @@ virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, return -1; } -static __rte_unused int +static __rte_always_inline int virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) @@ -2017,118 +2006,74 @@ free_zmbuf(struct vhost_virtqueue *vq) } static __rte_noinline uint16_t -virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +virtio_dev_tx_packed_zmbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count) { - uint16_t i; - - if (unlikely(dev->dequeue_zero_copy)) { - struct zcopy_mbuf *zmbuf, *next; - - for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); - zmbuf != NULL; zmbuf = next) { - next = TAILQ_NEXT(zmbuf, next); + uint32_t pkt_idx = 0; + uint32_t remained = count; - if (mbuf_is_consumed(zmbuf->mbuf)) { - update_shadow_used_ring_packed(vq, - zmbuf->desc_idx, - 0, - zmbuf->desc_count); + free_zmbuf(vq); - TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); - restore_mbuf(zmbuf->mbuf); - rte_pktmbuf_free(zmbuf->mbuf); - put_zmbuf(zmbuf); - vq->nr_zmbuf -= 1; + do { + if (remained >= PACKED_BATCH_SIZE) { + if (virtio_dev_tx_batch_packed_zmbuf(dev, vq, + mbuf_pool, + &pkts[pkt_idx])) { + pkt_idx += PACKED_BATCH_SIZE; + remained -= PACKED_BATCH_SIZE; + continue; } } + if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool, + &pkts[pkt_idx])) + break; - if (likely(vq->shadow_used_idx)) { - flush_dequeue_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); - } - } - - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + pkt_idx++; + remained--; + } while (remained); - count = RTE_MIN(count, MAX_PKT_BURST); - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", - dev->vid, count); + if (pkt_idx) + vhost_vring_call_packed(dev, vq); - for (i = 0; i < count; i++) { - struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t buf_id; - uint32_t dummy_len; - uint16_t desc_count, nr_vec = 0; - int err; + return pkt_idx; +} - if (unlikely(fill_vec_buf_packed(dev, vq, - vq->last_avail_idx, &desc_count, - buf_vec, &nr_vec, - &buf_id, &dummy_len, - VHOST_ACCESS_RO) < 0)) - break; +static __rte_noinline uint16_t +virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count) +{ + uint32_t pkt_idx = 0; + uint32_t remained = count; - if (likely(dev->dequeue_zero_copy == 0)) - update_shadow_used_ring_packed(vq, buf_id, 0, - desc_count); + do { + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & + (vq->size - 1)]); - pkts[i] = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(pkts[i] == NULL)) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - break; + if (remained >= PACKED_BATCH_SIZE) { + if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool, + &pkts[pkt_idx])) { + flush_dequeue_packed(dev, vq); + pkt_idx += PACKED_BATCH_SIZE; + remained -= PACKED_BATCH_SIZE; + continue; + } } - err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], - mbuf_pool); - if (unlikely(err)) { - rte_pktmbuf_free(pkts[i]); + if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool, + &pkts[pkt_idx])) break; - } - - if (unlikely(dev->dequeue_zero_copy)) { - struct zcopy_mbuf *zmbuf; - - zmbuf = get_zmbuf(vq); - if (!zmbuf) { - rte_pktmbuf_free(pkts[i]); - break; - } - zmbuf->mbuf = pkts[i]; - zmbuf->desc_idx = buf_id; - zmbuf->desc_count = desc_count; - /* - * Pin lock the mbuf; we will check later to see - * whether the mbuf is freed (when we are the last - * user) or not. If that's the case, we then could - * update the used ring safely. - */ - rte_mbuf_refcnt_update(pkts[i], 1); - - vq->nr_zmbuf += 1; - TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); - } - - vq->last_avail_idx += desc_count; - if (vq->last_avail_idx >= vq->size) { - vq->last_avail_idx -= vq->size; - vq->avail_wrap_counter ^= 1; - } - } + pkt_idx++; + remained--; + flush_dequeue_packed(dev, vq); + } while (remained); - if (likely(dev->dequeue_zero_copy == 0)) { - do_data_copy_dequeue(vq); - if (unlikely(i < count)) - vq->shadow_used_idx = i; - if (likely(vq->shadow_used_idx)) { - flush_dequeue_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); - } + if (pkt_idx) { + if (vq->shadow_used_idx) + do_data_copy_dequeue(vq); } - return i; + return pkt_idx; } uint16_t @@ -2204,9 +2149,14 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, count -= 1; } - if (vq_is_packed(dev)) - count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count); - else + if (vq_is_packed(dev)) { + if (unlikely(dev->dequeue_zero_copy)) + count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool, + pkts, count); + else + count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, + count); + } else count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count); out: From patchwork Wed Sep 25 17:13:27 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59725 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 87BF21BEFE; Wed, 25 Sep 2019 11:33:16 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id CE3171BEB1 for ; Wed, 25 Sep 2019 11:33:00 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:49 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986301" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:48 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:27 +0800 Message-Id: <20190925171329.63734-14-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 13/15] vhost: cache address translation result X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Cache address translation result and use it in next translation. Due to limited regions are supported, buffers are most likely in same region when doing data transmission. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h index 7fb172912..d90235cd6 100644 --- a/lib/librte_vhost/rte_vhost.h +++ b/lib/librte_vhost/rte_vhost.h @@ -91,10 +91,18 @@ struct rte_vhost_mem_region { int fd; }; +struct rte_vhost_mem_region_cache { + uint64_t guest_phys_addr; + uint64_t guest_phys_addr_end; + int64_t host_user_addr_offset; + uint64_t size; +}; + /** * Memory structure includes region and mapping information. */ struct rte_vhost_memory { + struct rte_vhost_mem_region_cache cache_region; uint32_t nregions; struct rte_vhost_mem_region regions[]; }; @@ -232,11 +240,30 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem, struct rte_vhost_mem_region *r; uint32_t i; + struct rte_vhost_mem_region_cache *r_cache; + /* check with cached region */ + r_cache = &mem->cache_region; + if (likely(gpa >= r_cache->guest_phys_addr && gpa < + r_cache->guest_phys_addr_end)) { + if (unlikely(*len > r_cache->guest_phys_addr_end - gpa)) + *len = r_cache->guest_phys_addr_end - gpa; + + return gpa - r_cache->host_user_addr_offset; + } + + for (i = 0; i < mem->nregions; i++) { r = &mem->regions[i]; if (gpa >= r->guest_phys_addr && gpa < r->guest_phys_addr + r->size) { + r_cache->guest_phys_addr = r->guest_phys_addr; + r_cache->guest_phys_addr_end = r->guest_phys_addr + + r->size; + r_cache->size = r->size; + r_cache->host_user_addr_offset = r->guest_phys_addr - + r->host_user_addr; + if (unlikely(*len > r->guest_phys_addr + r->size - gpa)) *len = r->guest_phys_addr + r->size - gpa; From patchwork Wed Sep 25 17:13:28 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59726 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 71E1F1BF03; Wed, 25 Sep 2019 11:33:20 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id CD41A1BE98 for ; Wed, 25 Sep 2019 11:33:00 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:51 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986315" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:49 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:28 +0800 Message-Id: <20190925171329.63734-15-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 14/15] vhost: check whether disable software pre-fetch X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Disable software pre-fetch actions on Skylake and later platforms. Hardware can fetch needed data for vhost, additional software pre-fetch will impact performance. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile index 30839a001..5f3b42e56 100644 --- a/lib/librte_vhost/Makefile +++ b/lib/librte_vhost/Makefile @@ -16,6 +16,12 @@ CFLAGS += -I vhost_user CFLAGS += -fno-strict-aliasing LDLIBS += -lpthread +AVX512_SUPPORT=$(shell $(CC) -march=native -dM -E - pkt_len + dev->vhost_hlen; @@ -1144,8 +1146,10 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t remained = count; do { +#ifndef DISABLE_SWPREFETCH rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & (vq->size - 1)]); +#endif if (remained >= PACKED_BATCH_SIZE) { if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) { pkt_idx += PACKED_BATCH_SIZE; @@ -1790,7 +1794,9 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) for (i = 0; i < PACKED_BATCH_SIZE; i++) { +#ifndef DISABLE_SWPREFETCH rte_prefetch0((void *)(uintptr_t)desc_addrs[i]); +#endif rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0), (void *)(uintptr_t)(desc_addrs[i] + buf_offset), pkts[i]->pkt_len); @@ -2046,8 +2052,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t remained = count; do { +#ifndef DISABLE_SWPREFETCH rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & (vq->size - 1)]); +#endif if (remained >= PACKED_BATCH_SIZE) { if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool, From patchwork Wed Sep 25 17:13:29 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 59727 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7A9511BF0E; Wed, 25 Sep 2019 11:33:24 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 347081BEC2 for ; Wed, 25 Sep 2019 11:33:01 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 02:32:52 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,547,1559545200"; d="scan'208";a="213986327" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga004.fm.intel.com with ESMTP; 25 Sep 2019 02:32:51 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Thu, 26 Sep 2019 01:13:29 +0800 Message-Id: <20190925171329.63734-16-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190925171329.63734-1-yong.liu@intel.com> References: <20190919163643.24130-2-yong.liu@intel.com> <20190925171329.63734-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 15/15] vhost: optimize packed ring dequeue when in-order X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When VIRTIO_F_IN_ORDER feature is negotiated, vhost can optimize dequeue function by only update first used descriptor. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index e3872e384..1e113fb3a 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -31,6 +31,12 @@ rxvq_is_mergeable(struct virtio_net *dev) return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF); } +static __rte_always_inline bool +virtio_net_is_inorder(struct virtio_net *dev) +{ + return dev->features & (1ULL << VIRTIO_F_IN_ORDER); +} + static bool is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { @@ -214,6 +220,29 @@ flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } } +static __rte_always_inline void +update_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq, uint16_t id) +{ + vq->shadow_used_packed[0].id = id; + + if (!vq->shadow_used_idx) { + vq->dequeue_shadow_head = vq->last_used_idx; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = 1; + vq->shadow_used_packed[0].used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].used_wrap_counter = + vq->used_wrap_counter; + + vq->shadow_used_idx = 1; + } + + vq->last_used_idx += PACKED_BATCH_SIZE; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + static __rte_always_inline void update_dequeue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t *ids) @@ -321,6 +350,32 @@ update_dequeue_shadow_used_ring_packed(struct vhost_virtqueue *vq, } } +static __rte_always_inline void +update_dequeue_shadow_used_ring_packed_inorder(struct vhost_virtqueue *vq, + uint16_t buf_id, uint16_t count) +{ + vq->shadow_used_packed[0].id = buf_id; + + if (!vq->shadow_used_idx) { + vq->dequeue_shadow_head = vq->last_used_idx; + + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = count; + vq->shadow_used_packed[0].used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].used_wrap_counter = + vq->used_wrap_counter; + + vq->shadow_used_idx = 1; + } + + vq->last_used_idx += count; + + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + static inline void do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -1801,8 +1856,12 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, (void *)(uintptr_t)(desc_addrs[i] + buf_offset), pkts[i]->pkt_len); } + if (virtio_net_is_inorder(dev)) + update_dequeue_batch_packed_inorder(vq, + ids[PACKED_BATCH_MASK]); + else + update_dequeue_batch_packed(dev, vq, ids); - update_dequeue_batch_packed(dev, vq, ids); if (virtio_net_with_host_offload(dev)) { UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) @@ -1865,7 +1924,11 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, &desc_count)) return -1; - update_dequeue_shadow_used_ring_packed(vq, buf_id, desc_count); + if (virtio_net_is_inorder(dev)) + update_dequeue_shadow_used_ring_packed_inorder(vq, buf_id, + desc_count); + else + update_dequeue_shadow_used_ring_packed(vq, buf_id, desc_count); vq->last_avail_idx += desc_count; if (vq->last_avail_idx >= vq->size) {