From patchwork Sun Dec 13 23:35:54 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Huawei Xie X-Patchwork-Id: 9542 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 2F272919D; Mon, 14 Dec 2015 16:30:55 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id BEB01918F for ; Mon, 14 Dec 2015 16:30:52 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP; 14 Dec 2015 07:30:52 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,427,1444719600"; d="scan'208";a="707075320" Received: from dpdk15.sh.intel.com ([10.239.129.25]) by orsmga003.jf.intel.com with ESMTP; 14 Dec 2015 07:30:51 -0800 From: Huawei Xie To: dev@dpdk.org Date: Mon, 14 Dec 2015 07:35:54 +0800 Message-Id: <1450049754-33635-3-git-send-email-huawei.xie@intel.com> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1450049754-33635-1-git-send-email-huawei.xie@intel.com> References: <1450049754-33635-1-git-send-email-huawei.xie@intel.com> Subject: [dpdk-dev] [PATCH 2/2] vhost: call rte_pktmbuf_alloc_bulk in vhost dequeue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" pre-allocate a bulk of mbufs instead of allocating one mbuf a time on demand Signed-off-by: Gerald Rogers Signed-off-by: Huawei Xie Acked-by: Konstantin Ananyev --- lib/librte_vhost/vhost_rxtx.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index bbf3fac..0faae58 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -576,6 +576,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, uint32_t i; uint16_t free_entries, entry_success = 0; uint16_t avail_idx; + uint8_t alloc_err = 0; + uint8_t seg_num; if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) { RTE_LOG(ERR, VHOST_DATA, @@ -609,6 +611,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries); + + if (unlikely(rte_pktmbuf_alloc_bulk(mbuf_pool, + pkts, free_entries)) < 0) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to bulk allocating %d mbufs\n", free_entries); + return 0; + } + /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < free_entries; i++) head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)]; @@ -621,9 +631,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, uint32_t vb_avail, vb_offset; uint32_t seg_avail, seg_offset; uint32_t cpy_len; - uint32_t seg_num = 0; + seg_num = 0; struct rte_mbuf *cur; - uint8_t alloc_err = 0; + desc = &vq->desc[head[entry_success]]; @@ -654,13 +664,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, vq->used->ring[used_idx].id = head[entry_success]; vq->used->ring[used_idx].len = 0; - /* Allocate an mbuf and populate the structure. */ - m = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(m == NULL)) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - break; - } + prev = cur = m = pkts[entry_success]; seg_offset = 0; seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; cpy_len = RTE_MIN(vb_avail, seg_avail); @@ -668,8 +672,6 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0); seg_num++; - cur = m; - prev = m; while (cpy_len != 0) { rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, seg_offset), (void *)((uintptr_t)(vb_addr + vb_offset)), @@ -761,16 +763,23 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, cpy_len = RTE_MIN(vb_avail, seg_avail); } - if (unlikely(alloc_err == 1)) + if (unlikely(alloc_err)) break; m->nb_segs = seg_num; - pkts[entry_success] = m; vq->last_used_idx++; entry_success++; } + if (unlikely(alloc_err)) { + uint16_t i = entry_success; + + m->nb_segs = seg_num; + for (; i < free_entries; i++) + rte_pktmbuf_free(pkts[entry_success]); + } + rte_compiler_barrier(); vq->used->idx += entry_success; /* Kick guest if required. */