From patchwork Wed Oct 9 13:38:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marvin Liu X-Patchwork-Id: 60740 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 295571C1A7; Wed, 9 Oct 2019 07:59:43 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 9090F1C124 for ; Wed, 9 Oct 2019 07:59:35 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 08 Oct 2019 22:59:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,273,1566889200"; d="scan'208";a="223473399" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga002.fm.intel.com with ESMTP; 08 Oct 2019 22:59:33 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Wed, 9 Oct 2019 21:38:42 +0800 Message-Id: <20191009133849.69002-8-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191009133849.69002-1-yong.liu@intel.com> References: <20190925171329.63734-1-yong.liu@intel.com> <20191009133849.69002-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v4 07/14] vhost: add flush function for batch enqueue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Flush used flags when batched enqueue function is finished. Descriptor's flags are pre-calculated as they will be reset by vhost. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 18a207fc6..7bf9ff9b7 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,9 @@ #define VHOST_LOG_CACHE_NR 32 +#define PACKED_RX_USED_FLAG (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \ + | VRING_DESC_F_WRITE) +#define PACKED_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE) #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ sizeof(struct vring_packed_desc)) #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f85619dc2..a629e66d4 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -169,6 +169,49 @@ update_shadow_used_ring_packed(struct vhost_virtqueue *vq, vq->shadow_used_packed[i].count = count; } +static __rte_always_inline void +flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t *lens, uint16_t *ids, uint16_t flags) +{ + uint16_t i; + + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) { + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + vq->desc_packed[vq->last_used_idx + i].len = lens[i]; + } + + rte_smp_wmb(); + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_BATCH_SIZE); + vhost_log_cache_sync(dev, vq); + + vq->last_used_idx += PACKED_BATCH_SIZE; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + +static __rte_always_inline void +flush_enqueue_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t *lens, uint16_t *ids) +{ + uint16_t flags = 0; + + if (vq->used_wrap_counter) + flags = PACKED_RX_USED_FLAG; + else + flags = PACKED_RX_USED_WRAP_FLAG; + flush_used_batch_packed(dev, vq, lens, ids, flags); +} + static __rte_always_inline void update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len, uint16_t count) @@ -937,6 +980,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE]; uint32_t buf_offset = dev->vhost_hlen; uint64_t lens[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; uint16_t i; if (unlikely(avail_idx & PACKED_BATCH_MASK)) @@ -1003,6 +1047,12 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, pkts[i]->pkt_len); } + UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM) + for (i = 0; i < PACKED_BATCH_SIZE; i++) + ids[i] = descs[avail_idx + i].id; + + flush_enqueue_batch_packed(dev, vq, lens, ids); + return 0; }