From patchwork Tue Oct 27 12:27:17 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82309 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 769EAA04B5; Tue, 27 Oct 2020 13:36:11 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1DB809AEB; Tue, 27 Oct 2020 13:29:22 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 7A11B2BFF for ; Tue, 27 Oct 2020 13:28:22 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:17 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7K024637; Tue, 27 Oct 2020 14:28:16 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:17 +0800 Message-Id: <1603801650-442376-23-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 22/34] net/mlx5: optimize shared RSS list operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When create shared RSS hrxq, the hrxq will be created directly, no hrxq will be reused. In this case, add the shared RSS hrxq to the queue list is redundant. And it also hurts the generic queue lookup. This commit avoids add the shared RSS hrxq to the queue list. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_rxq.c | 57 +++++++++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_rxtx.h | 5 ++-- 3 files changed, 39 insertions(+), 25 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index ffc8b38..417e111 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -729,7 +729,7 @@ struct mlx5_ind_table_obj { struct mlx5_hrxq { ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - uint32_t shared:1; /* This object used in shared action. */ + uint32_t standalone:1; /* This object used in shared action. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 union { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ddd5df7..9c9f8c4 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1930,13 +1930,16 @@ struct mlx5_ind_table_obj * * Pointer to Ethernet device. * @param ind_table * Indirection table to release. + * @param standalone + * Indirection table for Standalone queue. * * @return * 1 while a reference on it exists, 0 when freed. */ int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl) + struct mlx5_ind_table_obj *ind_tbl, + bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; @@ -1946,7 +1949,8 @@ struct mlx5_ind_table_obj * for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { - LIST_REMOVE(ind_tbl, next); + if (!standalone) + LIST_REMOVE(ind_tbl, next); mlx5_free(ind_tbl); return 0; } @@ -1987,13 +1991,15 @@ struct mlx5_ind_table_obj * * Queues entering in the indirection table. * @param queues_n * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. * * @return * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ static struct mlx5_ind_table_obj * mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n) + uint32_t queues_n, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; @@ -2020,7 +2026,8 @@ struct mlx5_ind_table_obj * if (ret < 0) goto error; rte_atomic32_inc(&ind_tbl->refcnt); - LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + if (!standalone) + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); return ind_tbl; error: ret = rte_errno; @@ -2063,8 +2070,6 @@ struct mlx5_ind_table_obj * hrxq, next) { struct mlx5_ind_table_obj *ind_tbl; - if (hrxq->shared) - continue; if (hrxq->rss_key_len != rss_key_len) continue; if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) @@ -2075,7 +2080,8 @@ struct mlx5_ind_table_obj * if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, + hrxq->standalone); continue; } rte_atomic32_inc(&hrxq->refcnt); @@ -2136,7 +2142,8 @@ struct mlx5_ind_table_obj * } else { ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + hrxq->standalone); } if (!ind_tbl) { rte_errno = ENOMEM; @@ -2150,7 +2157,8 @@ struct mlx5_ind_table_obj * goto error; } if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, hrxq->ind_table); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); hrxq->ind_table = ind_tbl; } hrxq->hash_fields = hash_fields; @@ -2159,7 +2167,7 @@ struct mlx5_ind_table_obj * error: err = rte_errno; if (ind_tbl != hrxq->ind_table) - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); rte_errno = err; return -rte_errno; } @@ -2189,13 +2197,16 @@ struct mlx5_ind_table_obj * mlx5_glue->destroy_flow_action(hrxq->action); #endif priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, - hrxq_idx, hrxq, next); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + if (!hrxq->standalone) + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], + &priv->hrxqs, hrxq_idx, hrxq, next); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return 0; } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone)); return 1; } @@ -2217,8 +2228,8 @@ struct mlx5_ind_table_obj * * Number of queues. * @param tunnel * Tunnel type. - * @param shared - * If true new object of Rx Hash queue will be used in shared action. + * @param standalone + * Object of Rx Hash queue will be used in standalone shared action or not. * * @return * The DevX object initialized index, 0 otherwise and rte_errno is set. @@ -2228,7 +2239,7 @@ struct mlx5_ind_table_obj * const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel, bool shared) + int tunnel, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = NULL; @@ -2239,7 +2250,8 @@ struct mlx5_ind_table_obj * queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + standalone); if (!ind_tbl) { rte_errno = ENOMEM; return 0; @@ -2247,7 +2259,7 @@ struct mlx5_ind_table_obj * hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; - hrxq->shared = !!shared; + hrxq->standalone = !!standalone; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; @@ -2258,12 +2270,13 @@ struct mlx5_ind_table_obj * goto error; } rte_atomic32_inc(&hrxq->refcnt); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, - hrxq, next); + if (!hrxq->standalone) + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, + hrxq_idx, hrxq, next); return hrxq_idx; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, standalone); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); rte_errno = ret; /* Restore rte_errno. */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 1b5fba4..8fe0473 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -347,12 +347,13 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n); int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl); + struct mlx5_ind_table_obj *ind_tbl, + bool standalone); uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel, bool shared); + int tunnel, bool standalone); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields,