From patchwork Mon Jun 20 16:10:32 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?N=C3=A9lio_Laranjeiro?= X-Patchwork-Id: 14105 X-Patchwork-Delegate: bruce.richardson@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 384BEB3BC; Mon, 20 Jun 2016 18:12:27 +0200 (CEST) Received: from mail-wm0-f54.google.com (mail-wm0-f54.google.com [74.125.82.54]) by dpdk.org (Postfix) with ESMTP id 71B9FAD9F for ; Mon, 20 Jun 2016 18:11:41 +0200 (CEST) Received: by mail-wm0-f54.google.com with SMTP id r201so68853933wme.1 for ; Mon, 20 Jun 2016 09:11:41 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=Zsk4qO2uqTq4ak7UGgAAAy4xvNHhEIasVb8XrqTouzA=; b=vStSt+8lpGJdvaFRP7yT1cQseFRMoqyWuVgCcI603t+Knx4yDulPeS0VQZ1zPHjklw FGYqwL6Y68IPl1D7uokdJ51l5iejkNzJvPq/2KTExXBVTjbhBlmQcDX95ZiMM0lifIDJ Rte5XyxxWBeqdi1Yi1D9P9e7C4MpFtJuSEJAiFwQSOhbdg//zymNFOVvRpzpjxlcbmAy V6A5N+tqPESIc9WZHVj/o98dRBZT14URTbnmG2ZdqJ8O4xIyW7fAshE3rqk55QS9tKVi syE6RLxm/719+GluC5oOm3HigjsY0fsOruwgvXbBt6CbosYOqu7bNsJHKT8AIuyWZlUy di9Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=Zsk4qO2uqTq4ak7UGgAAAy4xvNHhEIasVb8XrqTouzA=; b=FNd16sE+/fLtjAi920b29YCZVuRDBZTqb40uDSBmh5Y9bMGi6gvdWKvkhb28DQhjoW cW3VWktz+5P1H6KxFfzyeLN08TjfUAeszCfZLpU741Ua7x5mkeFNWfP2Qs6yyhkQ1YdO SklK8cjBI9I91zoWrDDdXK5l1O8OQ+GP4Nz6JSC45mPgkSs8EC2tQAbJ2v05/heikRCR JJW5yGdm3ODBgiINYrAnQE/jPNThQzWPjYHlBUwlGclAXmsAi2bZKlbSQ8MrJWeWIA5r yqcoT4/GBIoB95s+qgeRGSQgPldS53hCM3SwC6157YxIg04tVClTrw0EIsgF5APwnvH+ 4Ubg== X-Gm-Message-State: ALyK8tIFv2Tm25RzBiE72WKbSsZ+LIBI5iSsEnoOucdx6smZbHFgdM6WLdt6pURZYm3n4+tN X-Received: by 10.28.4.7 with SMTP id 7mr30944wme.77.1466439101029; Mon, 20 Jun 2016 09:11:41 -0700 (PDT) Received: from ping.vm.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by smtp.gmail.com with ESMTPSA id f189sm4543977wmf.19.2016.06.20.09.11.39 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 20 Jun 2016 09:11:40 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: Ferruh Yigit , Adrien Mazarguil Date: Mon, 20 Jun 2016 18:10:32 +0200 Message-Id: <1466439037-14095-21-git-send-email-nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1466439037-14095-1-git-send-email-nelio.laranjeiro@6wind.com> References: <1465379291-25310-1-git-send-email-nelio.laranjeiro@6wind.com> <1466439037-14095-1-git-send-email-nelio.laranjeiro@6wind.com> In-Reply-To: <1465379291-25310-1-git-send-email-nelio.laranjeiro@6wind.com> References: <1465379291-25310-1-git-send-email-nelio.laranjeiro@6wind.com> Subject: [dpdk-dev] [PATCH v2 20/25] mlx5: check remaining space while processing Tx burst X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Adrien Mazarguil The space necessary to store segmented packets cannot be known in advance and must be verified for each of them. Signed-off-by: Adrien Mazarguil --- drivers/net/mlx5/mlx5_rxtx.c | 136 ++++++++++++++++++++++--------------------- 1 file changed, 70 insertions(+), 66 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 2ee504d..7097713 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -583,50 +583,49 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; const unsigned int elts_n = txq->elts_n; - unsigned int i; + unsigned int i = 0; unsigned int max; unsigned int comp; volatile union mlx5_wqe *wqe; - struct rte_mbuf *buf; if (unlikely(!pkts_n)) return 0; - buf = pkts[0]; /* Prefetch first packet cacheline. */ tx_prefetch_cqe(txq, txq->cq_ci); tx_prefetch_cqe(txq, txq->cq_ci + 1); - rte_prefetch0(buf); + rte_prefetch0(*pkts); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; - assert(max >= 1); - assert(max <= elts_n); - /* Always leave one free entry in the ring. */ - --max; - if (max == 0) - return 0; - if (max > pkts_n) - max = pkts_n; - for (i = 0; (i != max); ++i) { - unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1); + do { + struct rte_mbuf *buf; + unsigned int elts_head_next; uintptr_t addr; uint32_t length; uint32_t lkey; + /* Make sure there is enough room to store this packet and + * that one ring entry remains unused. */ + if (max < 1 + 1) + break; + --max; + --pkts_n; + buf = *(pkts++); + elts_head_next = (elts_head + 1) & (elts_n - 1); wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; rte_prefetch0(wqe); - if (i + 1 < max) - rte_prefetch0(pkts[i + 1]); + if (pkts_n) + rte_prefetch0(*pkts); /* Retrieve buffer information. */ addr = rte_pktmbuf_mtod(buf, uintptr_t); length = DATA_LEN(buf); /* Update element. */ (*txq->elts)[elts_head] = buf; /* Prefetch next buffer data. */ - if (i + 1 < max) - rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1], + if (pkts_n) + rte_prefetch0(rte_pktmbuf_mtod(*pkts, volatile void *)); /* Retrieve Memory Region key for this memory pool. */ lkey = txq_mp2mr(txq, txq_mb2mp(buf)); @@ -649,8 +648,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) txq->stats.obytes += length; #endif elts_head = elts_head_next; - buf = pkts[i + 1]; - } + ++i; + } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0; @@ -693,44 +692,43 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; const unsigned int elts_n = txq->elts_n; - unsigned int i; + unsigned int i = 0; unsigned int max; unsigned int comp; volatile union mlx5_wqe *wqe; - struct rte_mbuf *buf; unsigned int max_inline = txq->max_inline; if (unlikely(!pkts_n)) return 0; - buf = pkts[0]; /* Prefetch first packet cacheline. */ tx_prefetch_cqe(txq, txq->cq_ci); tx_prefetch_cqe(txq, txq->cq_ci + 1); - rte_prefetch0(buf); + rte_prefetch0(*pkts); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; - assert(max >= 1); - assert(max <= elts_n); - /* Always leave one free entry in the ring. */ - --max; - if (max == 0) - return 0; - if (max > pkts_n) - max = pkts_n; - for (i = 0; (i != max); ++i) { - unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1); + do { + struct rte_mbuf *buf; + unsigned int elts_head_next; uintptr_t addr; uint32_t length; uint32_t lkey; + /* Make sure there is enough room to store this packet and + * that one ring entry remains unused. */ + if (max < 1 + 1) + break; + --max; + --pkts_n; + buf = *(pkts++); + elts_head_next = (elts_head + 1) & (elts_n - 1); wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; tx_prefetch_wqe(txq, txq->wqe_ci); tx_prefetch_wqe(txq, txq->wqe_ci + 1); - if (i + 1 < max) - rte_prefetch0(pkts[i + 1]); + if (pkts_n) + rte_prefetch0(*pkts); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { @@ -745,8 +743,8 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Update element. */ (*txq->elts)[elts_head] = buf; /* Prefetch next buffer data. */ - if (i + 1 < max) - rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1], + if (pkts_n) + rte_prefetch0(rte_pktmbuf_mtod(*pkts, volatile void *)); if (length <= max_inline) { if (buf->ol_flags & PKT_TX_VLAN_PKT) @@ -766,12 +764,12 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } wqe->inl.ctrl.data[2] = 0; elts_head = elts_head_next; - buf = pkts[i + 1]; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ txq->stats.obytes += length; #endif - } + ++i; + } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0; @@ -879,13 +877,15 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; const unsigned int elts_n = txq->elts_n; - unsigned int i; + unsigned int i = 0; unsigned int max; unsigned int comp; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; + if (unlikely(!pkts_n)) + return 0; /* Prefetch first packet cacheline. */ tx_prefetch_cqe(txq, txq->cq_ci); tx_prefetch_wqe(txq, txq->wqe_ci); @@ -895,22 +895,22 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; - assert(max >= 1); - assert(max <= elts_n); - /* Always leave one free entry in the ring. */ - --max; - if (max == 0) - return 0; - if (max > pkts_n) - max = pkts_n; - for (i = 0; (i != max); ++i) { - struct rte_mbuf *buf = pkts[i]; + do { + struct rte_mbuf *buf; volatile struct mlx5_wqe_data_seg *dseg; - unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1); + unsigned int elts_head_next; uintptr_t addr; uint32_t length; uint32_t cs_flags = 0; + /* Make sure there is enough room to store this packet and + * that one ring entry remains unused. */ + if (max < 1 + 1) + break; + --max; + --pkts_n; + buf = *(pkts++); + elts_head_next = (elts_head + 1) & (elts_n - 1); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) @@ -943,7 +943,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Increment sent bytes counter. */ txq->stats.obytes += length; #endif - } + ++i; + } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0; @@ -1048,7 +1049,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, struct txq *txq = (struct txq *)dpdk_txq; uint16_t elts_head = txq->elts_head; const unsigned int elts_n = txq->elts_n; - unsigned int i; + unsigned int i = 0; unsigned int max; unsigned int comp; unsigned int inline_room = txq->max_inline; @@ -1056,6 +1057,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, .state = MLX5_MPW_STATE_CLOSED, }; + if (unlikely(!pkts_n)) + return 0; /* Prefetch first packet cacheline. */ tx_prefetch_cqe(txq, txq->cq_ci); tx_prefetch_wqe(txq, txq->wqe_ci); @@ -1065,21 +1068,21 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; - assert(max >= 1); - assert(max <= elts_n); - /* Always leave one free entry in the ring. */ - --max; - if (max == 0) - return 0; - if (max > pkts_n) - max = pkts_n; - for (i = 0; (i != max); ++i) { - struct rte_mbuf *buf = pkts[i]; - unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1); + do { + struct rte_mbuf *buf; + unsigned int elts_head_next; uintptr_t addr; uint32_t length; uint32_t cs_flags = 0; + /* Make sure there is enough room to store this packet and + * that one ring entry remains unused. */ + if (max < 1 + 1) + break; + --max; + --pkts_n; + buf = *(pkts++); + elts_head_next = (elts_head + 1) & (elts_n - 1); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) @@ -1165,7 +1168,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, /* Increment sent bytes counter. */ txq->stats.obytes += length; #endif - } + ++i; + } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0;