From patchwork Mon Oct 23 10:04:22 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ophir Munk X-Patchwork-Id: 30696 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AF9E91B5D9; Mon, 23 Oct 2017 12:04:56 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F0E0C1B3D6 for ; Mon, 23 Oct 2017 12:04:55 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from ophirmu@mellanox.com) with ESMTPS (AES256-SHA encrypted); 23 Oct 2017 12:04:52 +0200 Received: from pegasus05.mtr.labs.mlnx (pegasus05.mtr.labs.mlnx [10.210.16.100]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v9NA4qF5027885; Mon, 23 Oct 2017 13:04:52 +0300 Received: from pegasus05.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus05.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id v9NA4qff030704; Mon, 23 Oct 2017 10:04:52 GMT Received: (from root@localhost) by pegasus05.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id v9NA4qOa030703; Mon, 23 Oct 2017 10:04:52 GMT From: Ophir Munk To: Adrien Mazarguil Cc: dev@dpdk.org, Thomas Monjalon , Olga Shern , Matan Azrad , Ophir Munk Date: Mon, 23 Oct 2017 10:04:22 +0000 Message-Id: <1508753067-30643-3-git-send-email-ophirmu@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1508753067-30643-1-git-send-email-ophirmu@mellanox.com> References: <1508753067-30643-1-git-send-email-ophirmu@mellanox.com> Subject: [dpdk-dev] [PATCH v1 2/7] net/mlx4: inline more Tx functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change functions to inline on Tx fast path to improve performance Inside the inline function call other functions to handle "unlikely" cases such that the inline function code footprint is small. Signed-off-by: Ophir Munk --- drivers/net/mlx4/mlx4_rxtx.c | 43 ++++++------------------------------ drivers/net/mlx4/mlx4_rxtx.h | 52 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 37 deletions(-) diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 011ea79..ae37f9b 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -220,54 +220,25 @@ mlx4_txq_complete(struct txq *txq) return 0; } -/** - * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. - * - * @param buf - * Pointer to mbuf. - * - * @return - * Memory pool where data is located for given mbuf. - */ -static struct rte_mempool * -mlx4_txq_mb2mp(struct rte_mbuf *buf) -{ - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; -} /** - * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. - * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, - * remove an entry first. + * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[]. + * If mp2mr[] is full, remove an entry first. * * @param txq * Pointer to Tx queue structure. * @param[in] mp - * Memory pool for which a memory region lkey must be returned. + * Memory pool for which a memory region lkey must be added + * @param[in] i + * Index in memory pool (MP) where to add memory region (MR) * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * Added mr->lkey on success, (uint32_t)-1 on failure. */ -uint32_t -mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp) +uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i) { - unsigned int i; struct ibv_mr *mr; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mp == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i].mp == mp) { - assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); - return txq->mp2mr[i].lkey; - } - } /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", (void *)txq, mp->name, (void *)mp); diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index e10bbca..719ef45 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -53,6 +53,7 @@ #include "mlx4.h" #include "mlx4_prm.h" +#include "mlx4_utils.h" /** Rx queue counters. */ struct mlx4_rxq_stats { @@ -160,7 +161,6 @@ void mlx4_rx_queue_release(void *dpdk_rxq); /* mlx4_rxtx.c */ -uint32_t mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp); uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, @@ -169,6 +169,8 @@ uint16_t mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, + unsigned int i); /* mlx4_txq.c */ @@ -177,4 +179,52 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, const struct rte_eth_txconf *conf); void mlx4_tx_queue_release(void *dpdk_txq); +/** + * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which + * the cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static __rte_always_inline struct rte_mempool * +mlx4_txq_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_INDIRECT(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. + * Call mlx4_txq_add_mr() if MP is not registered yet. + * + * @param txq + * Pointer to Tx queue structure. + * @param[in] mp + * Memory pool for which a memory region lkey must be returned. + * + * @return + * mr->lkey on success, (uint32_t)-1 on failure. + */ +static __rte_always_inline uint32_t +mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp) +{ + unsigned int i; + + for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { + if (unlikely(txq->mp2mr[i].mp == NULL)) { + /* Unknown MP, add a new MR for it. */ + break; + } + if (txq->mp2mr[i].mp == mp) { + assert(txq->mp2mr[i].lkey != (uint32_t)-1); + assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); + return txq->mp2mr[i].lkey; + } + } + return mlx4_txq_add_mr(txq, mp, i); +} #endif /* MLX4_RXTX_H_ */