From patchwork Tue Oct 27 12:26:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82288 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CB59EA04B5; Tue, 27 Oct 2020 13:27:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E49242C2A; Tue, 27 Oct 2020 13:27:55 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F27EC2C1A for ; Tue, 27 Oct 2020 13:27:42 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:38 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ6x024637; Tue, 27 Oct 2020 14:27:36 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:26:56 +0800 Message-Id: <1603801650-442376-2-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 01/34] net/mlx5: use thread safe index pool for flow objects X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" As mlx5 PMD is changed to be thread safe, all the flow-related sub-objects inside the PMD should be thread safe. This commit changes the index memory pools' lock configuration to be enabled. That makes the index pool be thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 91aaee3..a812b51 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -191,7 +191,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -202,7 +202,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -213,7 +213,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -224,7 +224,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -235,7 +235,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -246,7 +246,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -257,7 +257,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -269,7 +269,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -280,7 +280,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -291,7 +291,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, @@ -306,7 +306,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, - .need_lock = 0, + .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, .free = mlx5_free, From patchwork Tue Oct 27 12:26:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82289 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D82C2A04B5; Tue, 27 Oct 2020 13:28:15 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 458E52E8B; Tue, 27 Oct 2020 13:27:58 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 070F72C27 for ; Tue, 27 Oct 2020 13:27:42 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:39 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ70024637; Tue, 27 Oct 2020 14:27:38 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:26:57 +0800 Message-Id: <1603801650-442376-3-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 02/34] net/mlx5: use thread specific flow workspace X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li As part of multi-thread flow support, this patch moves flow intermediate data to thread specific, makes them a flow workspace. The workspace is allocated per thread, destroyed along with thread life-cycle. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 -- drivers/net/mlx5/mlx5.c | 2 - drivers/net/mlx5/mlx5.h | 6 -- drivers/net/mlx5/mlx5_flow.c | 168 ++++++++++++++++++++++++++----------- drivers/net/mlx5/mlx5_flow.h | 15 +++- drivers/net/mlx5/mlx5_flow_dv.c | 40 +++++---- drivers/net/mlx5/mlx5_flow_verbs.c | 24 +++--- 7 files changed, 171 insertions(+), 89 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index c890998..4ba6d8e 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1449,11 +1449,6 @@ err = ENOTSUP; goto error; } - /* - * Allocate the buffer for flow creating, just once. - * The allocation must be done before any flow creating. - */ - mlx5_flow_alloc_intermediate(eth_dev); /* Query availability of metadata reg_c's. */ err = mlx5_flow_discover_mreg_c(eth_dev); if (err < 0) { diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index a812b51..f78cbb0 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1403,8 +1403,6 @@ struct mlx5_dev_ctx_shared * mlx5_flow_list_flush(dev, &priv->flows, true); mlx5_shared_action_flush(dev); mlx5_flow_meter_flush(dev, NULL); - /* Free the intermediate buffers for flow creation. */ - mlx5_flow_free_intermediate(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 7429811..ca2c70f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -859,10 +859,6 @@ struct mlx5_priv { struct mlx5_drop drop_queue; /* Flow drop queues. */ uint32_t flows; /* RTE Flow rules. */ uint32_t ctrl_flows; /* Control flow rules. */ - void *inter_flows; /* Intermediate resources for flow creation. */ - void *rss_desc; /* Intermediate rss description resources. */ - int flow_idx; /* Intermediate device flow index. */ - int flow_nested_idx; /* Intermediate device flow index, nested. */ struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ @@ -1109,8 +1105,6 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list); int mlx5_flow_start_default(struct rte_eth_dev *dev); void mlx5_flow_stop_default(struct rte_eth_dev *dev); -void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev); -void mlx5_flow_free_intermediate(struct rte_eth_dev *dev); int mlx5_flow_verify(struct rte_eth_dev *dev); int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue); int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 02e19e8..0e958c7 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -832,6 +832,13 @@ struct mlx5_flow_tunnel_info { }, }; +/* Key of thread specific flow workspace data. */ +static pthread_key_t key_workspace; + +/* Thread specific flow workspace data once initialization data. */ +static pthread_once_t key_workspace_init; + + /** * Translate tag ID to register. * @@ -5523,6 +5530,38 @@ struct tunnel_default_miss_ctx { } /** + * Adjust flow RSS workspace if needed. + * + * @param wks + * Pointer to thread flow work space. + * @param rss_desc + * Pointer to RSS descriptor. + * @param[in] nrssq_num + * New RSS queue number. + * + * @return + * 0 on success, -1 otherwise and rte_errno is set. + */ +static int +flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t nrssq_num) +{ + bool fidx = !!wks->flow_idx; + + if (likely(nrssq_num <= wks->rssq_num[fidx])) + return 0; + rss_desc->queue = realloc(rss_desc->queue, + sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2)); + if (!rss_desc->queue) { + rte_errno = ENOMEM; + return -1; + } + wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2); + return 0; +} + +/** * Create a flow and add it to @p list. * * @param dev @@ -5577,8 +5616,7 @@ struct tunnel_default_miss_ctx { uint8_t buffer[2048]; } items_tx; struct mlx5_flow_expand_rss *buf = &expand_buffer.buf; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc)[!!priv->flow_idx]; + struct mlx5_flow_rss_desc *rss_desc; const struct rte_flow_action *p_actions_rx; uint32_t i; uint32_t idx = 0; @@ -5590,11 +5628,16 @@ struct tunnel_default_miss_ctx { struct rte_flow_action *translated_actions = NULL; struct mlx5_flow_tunnel *tunnel; struct tunnel_default_miss_ctx default_miss_ctx = { 0, }; - int ret = flow_shared_actions_translate(original_actions, - shared_actions, - &shared_actions_n, - &translated_actions, error); + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + bool fidx = !!wks->flow_idx; + int ret; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[fidx]; + ret = flow_shared_actions_translate(original_actions, + shared_actions, + &shared_actions_n, + &translated_actions, error); if (ret < 0) { MLX5_ASSERT(translated_actions == NULL); return 0; @@ -5627,9 +5670,11 @@ struct tunnel_default_miss_ctx { flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - memset(rss_desc, 0, sizeof(*rss_desc)); + memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); rss = flow_get_rss_action(p_actions_rx); if (rss) { + if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) + return 0; /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. @@ -5659,9 +5704,9 @@ struct tunnel_default_miss_ctx { * need to be translated before another calling. * No need to use ping-pong buffer to save memory here. */ - if (priv->flow_idx) { - MLX5_ASSERT(!priv->flow_nested_idx); - priv->flow_nested_idx = priv->flow_idx; + if (fidx) { + MLX5_ASSERT(!wks->flow_nested_idx); + wks->flow_nested_idx = fidx; } for (i = 0; i < buf->entries; ++i) { /* @@ -5740,9 +5785,9 @@ struct tunnel_default_miss_ctx { flow_rxq_flags_set(dev, flow); rte_free(translated_actions); /* Nested flow creation index recovery. */ - priv->flow_idx = priv->flow_nested_idx; - if (priv->flow_nested_idx) - priv->flow_nested_idx = 0; + wks->flow_idx = wks->flow_nested_idx; + if (wks->flow_nested_idx) + wks->flow_nested_idx = 0; tunnel = flow_tunnel_from_rule(dev, attr, items, actions); if (tunnel) { flow->tunnel = 1; @@ -5764,9 +5809,9 @@ struct tunnel_default_miss_ctx { mlx5_flow_id_release(priv->sh->flow_id_pool, hairpin_id); rte_errno = ret; - priv->flow_idx = priv->flow_nested_idx; - if (priv->flow_nested_idx) - priv->flow_nested_idx = 0; + wks->flow_idx = wks->flow_nested_idx; + if (wks->flow_nested_idx) + wks->flow_nested_idx = 0; error_before_hairpin_split: rte_free(translated_actions); return 0; @@ -6072,48 +6117,75 @@ struct rte_flow * } /** - * Allocate intermediate resources for flow creation. - * - * @param dev - * Pointer to Ethernet device. + * Release key of thread specific flow workspace data. */ -void -mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +static void +flow_release_workspace(void *data) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = data; - if (!priv->inter_flows) { - priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO, - MLX5_NUM_MAX_DEV_FLOWS * - sizeof(struct mlx5_flow) + - (sizeof(struct mlx5_flow_rss_desc) + - sizeof(uint16_t) * UINT16_MAX) * 2, 0, - SOCKET_ID_ANY); - if (!priv->inter_flows) { - DRV_LOG(ERR, "can't allocate intermediate memory."); - return; - } - } - priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) - [MLX5_NUM_MAX_DEV_FLOWS]; - /* Reset the index. */ - priv->flow_idx = 0; - priv->flow_nested_idx = 0; + if (!wks) + return; + free(wks->rss_desc[0].queue); + free(wks->rss_desc[1].queue); + free(wks); } /** - * Free intermediate resources for flows. + * Initialize key of thread specific flow workspace data. + */ +static void +flow_alloc_workspace(void) +{ + if (pthread_key_create(&key_workspace, flow_release_workspace)) + DRV_LOG(ERR, "Can't create flow workspace data thread key."); +} + +/** + * Get thread specific flow workspace. * - * @param dev - * Pointer to Ethernet device. + * @return pointer to thread specific flowworkspace data, NULL on error. */ -void -mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +struct mlx5_flow_workspace* +mlx5_flow_get_thread_workspace(void) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *data; - mlx5_free(priv->inter_flows); - priv->inter_flows = NULL; + if (pthread_once(&key_workspace_init, flow_alloc_workspace)) { + DRV_LOG(ERR, "Failed to init flow workspace data thread key."); + return NULL; + } + data = pthread_getspecific(key_workspace); + if (!data) { + data = calloc(1, sizeof(*data)); + if (!data) { + DRV_LOG(ERR, "Failed to allocate flow workspace " + "memory."); + return NULL; + } + data->rss_desc[0].queue = calloc(1, + sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); + if (!data->rss_desc[0].queue) + goto err; + data->rss_desc[1].queue = calloc(1, + sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); + if (!data->rss_desc[1].queue) + goto err; + data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM; + data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM; + if (pthread_setspecific(key_workspace, data)) { + DRV_LOG(ERR, "Failed to set flow workspace to thread."); + goto err; + } + } + return data; +err: + if (data->rss_desc[0].queue) + free(data->rss_desc[0].queue); + if (data->rss_desc[1].queue) + free(data->rss_desc[1].queue); + free(data); + return NULL; } /** diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 8b5a93f..4f9e091 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -76,6 +76,9 @@ enum mlx5_feature_name { MLX5_MTR_SFX, }; +/* Default queue number. */ +#define MLX5_RSSQ_DEFAULT_NUM 16 + #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) @@ -604,7 +607,7 @@ struct mlx5_flow_rss_desc { uint32_t queue_num; /**< Number of entries in @p queue. */ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t queue[]; /**< Destination queues to redirect traffic to. */ + uint16_t *queue; /**< Destination queues. */ }; /* PMD flow priority for tunnel */ @@ -1103,6 +1106,15 @@ struct rte_flow_shared_action { }; }; +/* Thread specific flow workspace intermediate data. */ +struct mlx5_flow_workspace { + struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; + struct mlx5_flow_rss_desc rss_desc[2]; + uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */ + int flow_idx; /* Intermediate device flow index. */ + int flow_nested_idx; /* Intermediate device flow index, nested. */ +}; + typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], @@ -1200,6 +1212,7 @@ struct mlx5_flow_driver_ops { /* mlx5_flow.c */ +struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dafe07f..c7d1441 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -6248,9 +6248,11 @@ struct field_modify_info modify_tcp[] = { struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -6264,8 +6266,8 @@ struct field_modify_info modify_tcp[] = { "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -8935,11 +8937,12 @@ struct field_modify_info modify_tcp[] = { const struct rte_flow_action_queue *queue; struct mlx5_flow_sub_actions_list *sample_act; struct mlx5_flow_sub_actions_idx *sample_idx; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t action_flags = 0; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; sample_act = &res->sample_act; sample_idx = &res->sample_idx; sample_action = (const struct rte_flow_action_sample *)action->conf; @@ -9141,18 +9144,18 @@ struct field_modify_info modify_tcp[] = { uint64_t action_flags, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; /* update normal path action resource into last index of array */ uint32_t dest_index = MLX5_MAX_DEST_NUM - 1; struct mlx5_flow_sub_actions_list *sample_act = &mdest_res->sample_act[dest_index]; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint32_t normal_idx = 0; struct mlx5_hrxq *hrxq; uint32_t hrxq_idx; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ @@ -9242,9 +9245,8 @@ struct field_modify_info modify_tcp[] = { struct mlx5_dev_config *dev_conf = &priv->config; struct rte_flow *flow = dev_flow->flow; struct mlx5_flow_handle *handle = dev_flow->handle; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; @@ -9290,6 +9292,8 @@ struct field_modify_info modify_tcp[] = { .fdb_def_rule = !!priv->fdb_def_rule, }; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource)); memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource)); mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : @@ -10273,6 +10277,7 @@ struct field_modify_info modify_tcp[] = { struct mlx5_hrxq **hrxq) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); uint32_t hrxq_idx; if (flow->shared_rss) { @@ -10287,8 +10292,7 @@ struct field_modify_info modify_tcp[] = { } } else { struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; + &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, @@ -10339,9 +10343,11 @@ struct field_modify_info modify_tcp[] = { int n; int err; int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; dh = dev_flow->handle; dv_h = &dh->dvh; diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index f0e1bca..052cfd4 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1623,7 +1623,9 @@ struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); size += flow_verbs_get_actions_size(actions); size += flow_verbs_get_items_size(items); if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { @@ -1633,7 +1635,7 @@ return NULL; } /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -1647,8 +1649,8 @@ "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* Memcpy is used, only size needs to be cleared to 0. */ @@ -1692,10 +1694,11 @@ uint64_t priority = attr->priority; uint32_t subpriority = 0; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -1951,9 +1954,11 @@ uint32_t dev_handles; int err; int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + dev_flow = &wks->flows[idx]; handle = dev_flow->handle; if (handle->fate_action == MLX5_FLOW_FATE_DROP) { hrxq = mlx5_drop_action_create(dev); @@ -1967,8 +1972,7 @@ } else { uint32_t hrxq_idx; struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; + &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, From patchwork Tue Oct 27 12:26:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82292 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BEA66A04B5; Tue, 27 Oct 2020 13:29:27 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6A27B4C7A; Tue, 27 Oct 2020 13:28:04 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 1BF972C2A for ; Tue, 27 Oct 2020 13:27:48 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:41 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ71024637; Tue, 27 Oct 2020 14:27:40 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:26:58 +0800 Message-Id: <1603801650-442376-4-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 03/34] net/mlx5: reuse flow Id as hairpin Id X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li Hairpin flow matching required a unique flow ID for matching. This patch reuses flow ID as hairpin flow ID, this will save some code to generate a separate hairpin ID, also saves flow memory by removing hairpin ID. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 11 ----------- drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_flow.c | 30 +++++++++--------------------- drivers/net/mlx5/mlx5_flow.h | 6 +----- 4 files changed, 10 insertions(+), 38 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index f78cbb0..19c7035 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -979,13 +979,6 @@ struct mlx5_dev_ctx_shared * MLX5_ASSERT(sh->devx_rx_uar); MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); } - sh->flow_id_pool = mlx5_flow_id_pool_alloc - ((1 << HAIRPIN_FLOW_ID_BITS) - 1); - if (!sh->flow_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ rte_spinlock_init(&sh->uar_lock_cq); @@ -1047,8 +1040,6 @@ struct mlx5_dev_ctx_shared * claim_zero(mlx5_glue->dealloc_pd(sh->pd)); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); mlx5_free(sh); MLX5_ASSERT(err > 0); rte_errno = err; @@ -1119,8 +1110,6 @@ struct mlx5_dev_ctx_shared * mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); - if (sh->flow_id_pool) - mlx5_flow_id_pool_release(sh->flow_id_pool); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index ca2c70f..5ba14df 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -672,7 +672,6 @@ struct mlx5_dev_ctx_shared { void *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ - struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */ void *tx_uar; /* Tx/packet pacing shared UAR. */ struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX]; /* Flex parser profiles information. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 0e958c7..7ca6f16 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -4145,9 +4145,8 @@ struct rte_flow_shared_action * struct rte_flow_action actions_rx[], struct rte_flow_action actions_tx[], struct rte_flow_item pattern_tx[], - uint32_t *flow_id) + uint32_t flow_id) { - struct mlx5_priv *priv = dev->data->dev_private; const struct rte_flow_action_raw_encap *raw_encap; const struct rte_flow_action_raw_decap *raw_decap; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -4157,7 +4156,6 @@ struct rte_flow_shared_action * char *addr; int encap = 0; - mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: @@ -4226,7 +4224,7 @@ struct rte_flow_shared_action * set_tag = (void *)actions_rx; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); - set_tag->data = *flow_id; + set_tag->data = flow_id; tag_action->conf = set_tag; /* Create Tx item list. */ rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); @@ -4235,7 +4233,7 @@ struct rte_flow_shared_action * item->type = (enum rte_flow_item_type) MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item = (void *)addr; - tag_item->data = *flow_id; + tag_item->data = flow_id; tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); MLX5_ASSERT(set_tag->id > REG_NON); item->spec = tag_item; @@ -5621,7 +5619,6 @@ struct tunnel_default_miss_ctx { uint32_t i; uint32_t idx = 0; int hairpin_flow; - uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; struct rte_flow_attr attr_factor = {0}; const struct rte_flow_action *actions; @@ -5650,6 +5647,11 @@ struct tunnel_default_miss_ctx { external, hairpin_flow, error); if (ret < 0) goto error_before_hairpin_split; + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + if (!flow) { + rte_errno = ENOMEM; + goto error_before_hairpin_split; + } if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { rte_errno = EINVAL; @@ -5657,17 +5659,10 @@ struct tunnel_default_miss_ctx { } flow_hairpin_split(dev, actions, actions_rx.actions, actions_hairpin_tx.actions, items_tx.items, - &hairpin_id); + idx); p_actions_rx = actions_rx.actions; } - flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); - if (!flow) { - rte_errno = ENOMEM; - goto error_before_flow; - } flow->drv_type = flow_get_drv_type(dev, &attr_factor); - if (hairpin_id != 0) - flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); @@ -5803,11 +5798,7 @@ struct tunnel_default_miss_ctx { flow_drv_destroy(dev, flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); rte_errno = ret; /* Restore rte_errno. */ -error_before_flow: ret = rte_errno; - if (hairpin_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - hairpin_id); rte_errno = ret; wks->flow_idx = wks->flow_nested_idx; if (wks->flow_nested_idx) @@ -5965,9 +5956,6 @@ struct rte_flow * */ if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); - if (flow->hairpin_flow_id) - mlx5_flow_id_release(priv->sh->flow_id_pool, - flow->hairpin_flow_id); flow_drv_destroy(dev, flow); if (list) ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 4f9e091..6ca9d0c 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -923,8 +923,6 @@ struct mlx5_fdir_flow { uint32_t rix_flow; /* Index to flow. */ }; -#define HAIRPIN_FLOW_ID_BITS 28 - #define MLX5_MAX_TUNNELS 256 #define MLX5_TNL_MISS_RULE_PRIORITY 3 #define MLX5_TNL_MISS_FDB_JUMP_GRP 0x1234faac @@ -1040,14 +1038,12 @@ struct rte_flow { uint32_t drv_type:2; /**< Driver type. */ uint32_t fdir:1; /**< Identifier of associated FDIR if any. */ uint32_t tunnel:1; - uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS; - /**< The flow id used for hairpin. */ uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + uint32_t meter:16; /**< Holds flow meter id. */ uint32_t rix_mreg_copy; /**< Index to metadata register copy table resource. */ uint32_t counter; /**< Holds flow counter. */ uint32_t tunnel_id; /**< Tunnel id */ - uint16_t meter; /**< Holds flow meter id. */ } __rte_packed; /* From patchwork Tue Oct 27 12:26:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82290 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9493CA04B5; Tue, 27 Oct 2020 13:28:43 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5011D3421; Tue, 27 Oct 2020 13:28:00 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 15C982C1A for ; Tue, 27 Oct 2020 13:27:48 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:43 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ72024637; Tue, 27 Oct 2020 14:27:42 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:26:59 +0800 Message-Id: <1603801650-442376-5-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 04/34] net/mlx5: indexed pool supports zero size entry X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To make indexed pool to be used as ID generator, this patch allows entry size to be zero. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_utils.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 9a54fda..3a1f87a 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -230,7 +230,7 @@ struct mlx5_indexed_pool * struct mlx5_indexed_pool *pool; uint32_t i; - if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) || + if (!cfg || (!cfg->malloc ^ !cfg->free) || (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) return NULL; @@ -391,7 +391,7 @@ struct mlx5_indexed_pool * { void *entry = mlx5_ipool_malloc(pool, idx); - if (entry) + if (entry && pool->cfg.size) memset(entry, 0, pool->cfg.size); return entry; } From patchwork Tue Oct 27 12:27:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82291 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E8234A04B5; Tue, 27 Oct 2020 13:29:01 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9D39A37AF; Tue, 27 Oct 2020 13:28:02 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 169D12C27 for ; Tue, 27 Oct 2020 13:27:48 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:45 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ73024637; Tue, 27 Oct 2020 14:27:44 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:00 +0800 Message-Id: <1603801650-442376-6-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 05/34] net/mlx5: use indexed pool as ID generator X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li The ID generation API used an integer pool to save released ID, To support multiple flow, it has to be enhanced to be thread safe. Indexed pool could be used to generate unique ID by setting size of pool entry to zero. Since bitmap is used, an extra benefits is saving memory to about one bit per entry. Further more indexed pool could be thread safe by enabling lock. This patch leverages indexed pool to generate ID, removes unused ID generating API. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 13 ---- drivers/net/mlx5/mlx5.c | 136 +++++---------------------------------- drivers/net/mlx5/mlx5.h | 4 +- drivers/net/mlx5/mlx5_flow.c | 116 +++++++++++++++------------------ drivers/net/mlx5/mlx5_flow.h | 7 -- drivers/net/mlx5/mlx5_flow_dv.c | 4 +- drivers/net/mlx5/mlx5_utils.c | 5 ++ 7 files changed, 78 insertions(+), 207 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 4ba6d8e..0b59e74 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1395,17 +1395,6 @@ err = mlx5_alloc_shared_dr(priv); if (err) goto error; - /* - * RSS id is shared with meter flow id. Meter flow id can only - * use the 24 MSB of the register. - */ - priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >> - MLX5_MTR_COLOR_BITS); - if (!priv->qrss_id_pool) { - DRV_LOG(ERR, "can't create flow id pool"); - err = ENOMEM; - goto error; - } } if (config->devx && config->dv_flow_en && config->dest_tir) { priv->obj_ops = devx_obj_ops; @@ -1492,8 +1481,6 @@ close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); - if (priv->qrss_id_pool) - mlx5_flow_id_pool_release(priv->qrss_id_pool); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); mlx5_free(priv); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 19c7035..db0243f 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -321,6 +321,21 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = .free = mlx5_free, .type = "rte_flow_ipool", }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_rss_id_ipool", + }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_flow_ipool", + }, + { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_tbl_ipool", + }, }; @@ -330,127 +345,6 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 /** - * Allocate ID pool structure. - * - * @param[in] max_id - * The maximum id can be allocated from the pool. - * - * @return - * Pointer to pool object, NULL value otherwise. - */ -struct mlx5_flow_id_pool * -mlx5_flow_id_pool_alloc(uint32_t max_id) -{ - struct mlx5_flow_id_pool *pool; - void *mem; - - pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!pool) { - DRV_LOG(ERR, "can't allocate id pool"); - rte_errno = ENOMEM; - return NULL; - } - mem = mlx5_malloc(MLX5_MEM_ZERO, - MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - goto error; - } - pool->free_arr = mem; - pool->curr = pool->free_arr; - pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; - pool->base_index = 0; - pool->max_id = max_id; - return pool; -error: - mlx5_free(pool); - return NULL; -} - -/** - * Release ID pool structure. - * - * @param[in] pool - * Pointer to flow id pool object to free. - */ -void -mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) -{ - mlx5_free(pool->free_arr); - mlx5_free(pool); -} - -/** - * Generate ID. - * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. - */ -uint32_t -mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) -{ - if (pool->curr == pool->free_arr) { - if (pool->base_index == pool->max_id) { - rte_errno = ENOMEM; - DRV_LOG(ERR, "no free id"); - return -rte_errno; - } - *id = ++pool->base_index; - return 0; - } - *id = *(--pool->curr); - return 0; -} - -/** - * Release ID. - * - * @param[in] pool - * Pointer to flow id pool. - * @param[out] id - * The generated ID. - * - * @return - * 0 on success, error value otherwise. - */ -uint32_t -mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) -{ - uint32_t size; - uint32_t size2; - void *mem; - - if (pool->curr == pool->last) { - size = pool->curr - pool->free_arr; - size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; - MLX5_ASSERT(size2 > size); - mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0, - SOCKET_ID_ANY); - if (!mem) { - DRV_LOG(ERR, "can't allocate mem for id pool"); - rte_errno = ENOMEM; - return -rte_errno; - } - memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); - mlx5_free(pool->free_arr); - pool->free_arr = mem; - pool->curr = pool->free_arr + size; - pool->last = pool->free_arr + size2; - } - *pool->curr = id; - pool->curr++; - return 0; -} - -/** * Initialize the shared aging list information per port. * * @param[in] sh diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 5ba14df..126f0a3 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -47,6 +47,9 @@ enum mlx5_ipool_index { MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */ MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ + MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */ + MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */ + MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */ MLX5_IPOOL_MAX, }; @@ -882,7 +885,6 @@ struct mlx5_priv { int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ struct mlx5_dbr_page_list dbrpgs; /* Door-bell pages. */ struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ - struct mlx5_flow_id_pool *qrss_id_pool; struct mlx5_hlist *mreg_cp_tbl; /* Hash table of Rx metadata register copy table. */ uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 7ca6f16..93ca6cb 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2935,30 +2935,6 @@ struct mlx5_flow_tunnel_info { MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } -/* Allocate unique ID for the split Q/RSS subflows. */ -static uint32_t -flow_qrss_get_id(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t qrss_id, ret; - - ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); - if (ret) - return 0; - MLX5_ASSERT(qrss_id); - return qrss_id; -} - -/* Free unique ID for the split Q/RSS subflows. */ -static void -flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (qrss_id) - mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); -} - /** * Release resource related QUEUE/RSS action split. * @@ -2978,7 +2954,9 @@ struct mlx5_flow_tunnel_info { SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) if (dev_handle->split_flow_id) - flow_qrss_free_id(dev, dev_handle->split_flow_id); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + dev_handle->split_flow_id); } static int @@ -4490,6 +4468,7 @@ struct tunnel_default_miss_ctx { struct rte_flow_action actions_sfx[], struct rte_flow_action actions_pre[]) { + struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_action *tag_action = NULL; struct rte_flow_item *tag_item; struct mlx5_rte_flow_action_set_tag *set_tag; @@ -4498,7 +4477,7 @@ struct tunnel_default_miss_ctx { const struct rte_flow_action_raw_decap *raw_decap; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; - uint32_t tag_id; + uint32_t tag_id = 0; bool copy_vlan = false; /* Prepare the actions for prefix and suffix flow. */ @@ -4547,10 +4526,17 @@ struct tunnel_default_miss_ctx { /* Set the tag. */ set_tag = (void *)actions_pre; set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); - /* - * Get the id from the qrss_pool to make qrss share the id with meter. - */ - tag_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + &tag_id); + if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) { + DRV_LOG(ERR, "Port %u meter flow id exceed max limit.", + dev->data->port_id); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id); + return 0; + } else if (!tag_id) { + return 0; + } set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; assert(tag_action); tag_action->conf = set_tag; @@ -4643,6 +4629,7 @@ struct tunnel_default_miss_ctx { const struct rte_flow_action *qrss, int actions_n, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct rte_flow_action_jump *jump; const int qrss_idx = qrss - actions; @@ -4674,7 +4661,8 @@ struct tunnel_default_miss_ctx { * representors) domain even if they have coinciding * IDs. */ - flow_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id); if (!flow_id) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4886,6 +4874,7 @@ struct tunnel_default_miss_ctx { int qrss_action_pos, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_action_set_tag *set_tag; struct mlx5_rte_flow_item_tag *tag_spec; struct mlx5_rte_flow_item_tag *tag_mask; @@ -4905,7 +4894,8 @@ struct tunnel_default_miss_ctx { if (ret < 0) return ret; set_tag->id = ret; - tag_id = flow_qrss_get_id(dev); + mlx5_ipool_malloc(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id); set_tag->data = tag_id; /* Prepare the suffix subflow items. */ tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); @@ -5200,7 +5190,8 @@ struct tunnel_default_miss_ctx { * These ones are included into parent flow list and will be destroyed * by flow_drv_destroy. */ - flow_qrss_free_id(dev, qrss_id); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + qrss_id); mlx5_free(ext_actions); return ret; } @@ -7567,6 +7558,7 @@ struct mlx5_meter_domains_infos * uint32_t group, uint32_t *table, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hlist_entry *he; struct tunnel_tbl_entry *tte; union tunnel_tbl_key key = { @@ -7579,16 +7571,21 @@ struct mlx5_meter_domains_infos * group_hash = tunnel ? tunnel->groups : thub->groups; he = mlx5_hlist_lookup(group_hash, key.val); if (!he) { - int ret; tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tte), 0, SOCKET_ID_ANY); if (!tte) goto err; tte->hash.key = key.val; - ret = mlx5_flow_id_get(thub->table_ids, &tte->flow_table); - if (ret) { - mlx5_free(tte); + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { goto err; } tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); @@ -7602,6 +7599,8 @@ struct mlx5_meter_domains_infos * return 0; err: + if (tte) + mlx5_free(tte); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, "tunnel group index not supported"); } @@ -8069,14 +8068,14 @@ struct mlx5_meter_domains_infos * mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids; + struct mlx5_priv *priv = dev->data->dev_private; DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); LIST_REMOVE(tunnel, chain); - mlx5_flow_id_release(id_pool, tunnel->tunnel_id); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], + tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups, NULL, NULL); mlx5_free(tunnel); } @@ -8099,15 +8098,20 @@ struct mlx5_meter_domains_infos * mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel) { - int ret; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_tunnel *tunnel; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_id_pool *id_pool = thub->tunnel_ids; uint32_t id; - ret = mlx5_flow_id_get(id_pool, &id); - if (ret) + mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], + &id); + if (id >= MLX5_MAX_TUNNELS) { + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); + DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); + return NULL; + } else if (!id) { return NULL; + } /** * mlx5 flow tunnel is an auxlilary data structure * It's not part of IO. No need to allocate it from @@ -8116,12 +8120,14 @@ struct mlx5_meter_domains_infos * tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), 0, SOCKET_ID_ANY); if (!tunnel) { - mlx5_flow_id_pool_release(id_pool); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); return NULL; } tunnel->groups = mlx5_hlist_create("tunnel groups", 1024); if (!tunnel->groups) { - mlx5_flow_id_pool_release(id_pool); + mlx5_ipool_free(priv->sh->ipool + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); mlx5_free(tunnel); return NULL; } @@ -8183,8 +8189,6 @@ void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) return; if (!LIST_EMPTY(&thub->tunnels)) DRV_LOG(WARNING, "port %u tunnels present\n", port_id); - mlx5_flow_id_pool_release(thub->tunnel_ids); - mlx5_flow_id_pool_release(thub->table_ids); mlx5_hlist_destroy(thub->groups, NULL, NULL); mlx5_free(thub); } @@ -8199,16 +8203,6 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); - thub->tunnel_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TUNNELS); - if (!thub->tunnel_ids) { - err = -rte_errno; - goto err; - } - thub->table_ids = mlx5_flow_id_pool_alloc(MLX5_MAX_TABLES); - if (!thub->table_ids) { - err = -rte_errno; - goto err; - } thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES); if (!thub->groups) { err = -rte_errno; @@ -8221,10 +8215,6 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) err: if (thub->groups) mlx5_hlist_destroy(thub->groups, NULL, NULL); - if (thub->table_ids) - mlx5_flow_id_pool_release(thub->table_ids); - if (thub->tunnel_ids) - mlx5_flow_id_pool_release(thub->tunnel_ids); if (thub) mlx5_free(thub); return err; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 6ca9d0c..b1cb4b2 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -952,8 +952,6 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { LIST_HEAD(, mlx5_flow_tunnel) tunnels; - struct mlx5_flow_id_pool *tunnel_ids; - struct mlx5_flow_id_pool *table_ids; struct mlx5_hlist *groups; /** non tunnel groups */ }; @@ -1209,11 +1207,6 @@ struct mlx5_flow_driver_ops { /* mlx5_flow.c */ struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); -struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); -void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); -uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); -uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, - uint32_t id); __extension__ struct flow_grp_info { uint64_t external:1; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index c7d1441..896a9e8 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8127,8 +8127,8 @@ struct field_modify_info modify_tcp[] = { mlx5_hlist_remove(tunnel_grp_hash, he); mlx5_free(tte); } - mlx5_flow_id_release(mlx5_tunnel_hub(dev)->table_ids, - tunnel_flow_tbl_to_id(table_id)); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(table_id)); DRV_LOG(DEBUG, "port %u release table_id %#x tunnel %u group %u", dev->data->port_id, table_id, diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 3a1f87a..7a6b0c6 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -362,6 +362,11 @@ struct mlx5_indexed_pool * MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); rte_bitmap_clear(trunk->bmp, iidx); p = &trunk->data[iidx * pool->cfg.size]; + /* + * The ipool index should grow continually from small to big, + * some features as metering only accept limited bits of index. + * Random index with MSB set may be rejected. + */ iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); iidx += 1; /* non-zero index. */ trunk->free--; From patchwork Tue Oct 27 12:27:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82293 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 97DFCA04B5; Tue, 27 Oct 2020 13:29:46 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0D5544C90; Tue, 27 Oct 2020 13:28:06 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 0C46A2BD3 for ; Tue, 27 Oct 2020 13:27:52 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:47 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ74024637; Tue, 27 Oct 2020 14:27:46 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:01 +0800 Message-Id: <1603801650-442376-7-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 06/34] net/mlx5: make rte flow list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow operations, this patch introduces list lock for the rte_flow list manages all the rte_flow handlers. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 1 + drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow.c | 10 ++++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 0b59e74..a579dde 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1358,6 +1358,7 @@ MLX5_MAX_MAC_ADDRESSES); priv->flows = 0; priv->ctrl_flows = 0; + rte_spinlock_init(&priv->flow_list_lock); TAILQ_INIT(&priv->flow_meters); TAILQ_INIT(&priv->flow_meter_profiles); /* Hint libmlx5 to use PMD allocator for data plane resources */ diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 126f0a3..4446be2 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -861,6 +861,7 @@ struct mlx5_priv { struct mlx5_drop drop_queue; /* Flow drop queues. */ uint32_t flows; /* RTE Flow rules. */ uint32_t ctrl_flows; /* Control flow rules. */ + rte_spinlock_t flow_list_lock; struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 93ca6cb..441fe4b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -5765,9 +5765,12 @@ struct tunnel_default_miss_ctx { if (ret < 0) goto error; } - if (list) + if (list) { + rte_spinlock_lock(&priv->flow_list_lock); ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, flow, next); + rte_spinlock_unlock(&priv->flow_list_lock); + } flow_rxq_flags_set(dev, flow); rte_free(translated_actions); /* Nested flow creation index recovery. */ @@ -5948,9 +5951,12 @@ struct rte_flow * if (dev->data->dev_started) flow_rxq_flags_trim(dev, flow); flow_drv_destroy(dev, flow); - if (list) + if (list) { + rte_spinlock_lock(&priv->flow_list_lock); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, flow_idx, flow, next); + rte_spinlock_unlock(&priv->flow_list_lock); + } flow_mreg_del_copy_action(dev, flow); if (flow->fdir) { LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { From patchwork Tue Oct 27 12:27:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82295 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A4AC7A04B5; Tue, 27 Oct 2020 13:30:35 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BDD8D4F9C; Tue, 27 Oct 2020 13:28:09 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 0D4B82C1A for ; Tue, 27 Oct 2020 13:27:52 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:49 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ75024637; Tue, 27 Oct 2020 14:27:48 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:02 +0800 Message-Id: <1603801650-442376-8-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 07/34] net/mlx5: make meter action thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit adds the spinlock for the meter action to make it be thread safe. Atomic reference counter in all is not enough as the meter action should be created synchronized with reference counter increment. With only atomic reference counter, even the counter is increased, the action may still not be created. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.h | 2 ++ drivers/net/mlx5/mlx5_flow_meter.c | 72 ++++++++++++++++++++------------------ 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index b1cb4b2..be6b9bb 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -846,6 +846,8 @@ struct mlx5_flow_meter { struct mlx5_flow_meter_profile *profile; /**< Meter profile parameters. */ + rte_spinlock_t sl; /**< Meter action spinlock. */ + /** Policer actions (per meter output color). */ enum rte_mtr_policer_action action[RTE_COLORS]; diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c index b36bc7b..03a5e79 100644 --- a/drivers/net/mlx5/mlx5_flow_meter.c +++ b/drivers/net/mlx5/mlx5_flow_meter.c @@ -679,6 +679,7 @@ fm->shared = !!shared; fm->policer_stats.stats_mask = params->stats_mask; fm->profile->ref_cnt++; + rte_spinlock_init(&fm->sl); return 0; error: mlx5_flow_destroy_policer_rules(dev, fm, &attr); @@ -1167,49 +1168,49 @@ struct mlx5_flow_meter * struct rte_flow_error *error) { struct mlx5_flow_meter *fm; + int ret = 0; fm = mlx5_flow_meter_find(priv, meter_id); if (fm == NULL) { rte_flow_error_set(error, ENOENT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Meter object id not valid"); - goto error; - } - if (!fm->shared && fm->ref_cnt) { - DRV_LOG(ERR, "Cannot share a non-shared meter."); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Meter can't be shared"); - goto error; + return fm; } - if (!fm->ref_cnt++) { - MLX5_ASSERT(!fm->mfts->meter_action); + rte_spinlock_lock(&fm->sl); + if (fm->mfts->meter_action) { + if (fm->shared && + attr->transfer == fm->transfer && + attr->ingress == fm->ingress && + attr->egress == fm->egress) + fm->ref_cnt++; + else + ret = -1; + } else { fm->ingress = attr->ingress; fm->egress = attr->egress; fm->transfer = attr->transfer; + fm->ref_cnt = 1; /* This also creates the meter object. */ fm->mfts->meter_action = mlx5_flow_meter_action_create(priv, fm); - if (!fm->mfts->meter_action) - goto error_detach; - } else { - MLX5_ASSERT(fm->mfts->meter_action); - if (attr->transfer != fm->transfer || - attr->ingress != fm->ingress || - attr->egress != fm->egress) { - DRV_LOG(ERR, "meter I/O attributes do not " - "match flow I/O attributes."); - goto error_detach; + if (!fm->mfts->meter_action) { + fm->ref_cnt = 0; + fm->ingress = 0; + fm->egress = 0; + fm->transfer = 0; + ret = -1; + DRV_LOG(ERR, "Meter action create failed."); } } - return fm; -error_detach: - mlx5_flow_meter_detach(fm); - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - fm->mfts->meter_action ? "Meter attr not match" : - "Meter action create failed"); -error: - return NULL; + rte_spinlock_unlock(&fm->sl); + if (ret) + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + fm->mfts->meter_action ? + "Meter attr not match" : + "Meter action create failed"); + return ret ? NULL : fm; } /** @@ -1222,15 +1223,16 @@ struct mlx5_flow_meter * mlx5_flow_meter_detach(struct mlx5_flow_meter *fm) { #ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER + rte_spinlock_lock(&fm->sl); MLX5_ASSERT(fm->ref_cnt); - if (--fm->ref_cnt) - return; - if (fm->mfts->meter_action) + if (--fm->ref_cnt == 0) { mlx5_glue->destroy_flow_action(fm->mfts->meter_action); - fm->mfts->meter_action = NULL; - fm->ingress = 0; - fm->egress = 0; - fm->transfer = 0; + fm->mfts->meter_action = NULL; + fm->ingress = 0; + fm->egress = 0; + fm->transfer = 0; + } + rte_spinlock_unlock(&fm->sl); #else (void)fm; #endif From patchwork Tue Oct 27 12:27:03 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82294 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EC525A04B5; Tue, 27 Oct 2020 13:30:07 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 78B044C9B; Tue, 27 Oct 2020 13:28:07 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 1228F2C27 for ; Tue, 27 Oct 2020 13:27:53 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:51 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ76024637; Tue, 27 Oct 2020 14:27:49 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:03 +0800 Message-Id: <1603801650-442376-9-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 08/34] net/mlx5: make VLAN network interface thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit protects the VLAN VM workaround area using a spinlock in multiple-thread flow insertion to make it thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/common/mlx5/linux/mlx5_nl.h | 1 + drivers/net/mlx5/linux/mlx5_vlan_os.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/common/mlx5/linux/mlx5_nl.h b/drivers/common/mlx5/linux/mlx5_nl.h index 53021e1..15129ff 100644 --- a/drivers/common/mlx5/linux/mlx5_nl.h +++ b/drivers/common/mlx5/linux/mlx5_nl.h @@ -25,6 +25,7 @@ struct mlx5_nl_vlan_dev { struct mlx5_nl_vlan_vmwa_context { int nl_socket; uint32_t vf_ifindex; + rte_spinlock_t sl; struct mlx5_nl_vlan_dev vlan_dev[4096]; }; diff --git a/drivers/net/mlx5/linux/mlx5_vlan_os.c b/drivers/net/mlx5/linux/mlx5_vlan_os.c index 92fc17d..40e895e 100644 --- a/drivers/net/mlx5/linux/mlx5_vlan_os.c +++ b/drivers/net/mlx5/linux/mlx5_vlan_os.c @@ -44,12 +44,14 @@ if (!vlan->created || !vmwa) return; vlan->created = 0; + rte_spinlock_lock(&vmwa->sl); MLX5_ASSERT(vlan_dev[vlan->tag].refcnt); if (--vlan_dev[vlan->tag].refcnt == 0 && vlan_dev[vlan->tag].ifindex) { mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex); vlan_dev[vlan->tag].ifindex = 0; } + rte_spinlock_unlock(&vmwa->sl); } /** @@ -72,6 +74,7 @@ MLX5_ASSERT(priv->vmwa_context); if (vlan->created || !vmwa) return; + rte_spinlock_lock(&vmwa->sl); if (vlan_dev[vlan->tag].refcnt == 0) { MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex); vlan_dev[vlan->tag].ifindex = @@ -82,6 +85,7 @@ vlan_dev[vlan->tag].refcnt++; vlan->created = 1; } + rte_spinlock_unlock(&vmwa->sl); } /* @@ -131,6 +135,7 @@ " for VLAN workaround context"); return NULL; } + rte_spinlock_init(&vmwa->sl); vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE); if (vmwa->nl_socket < 0) { DRV_LOG(WARNING, From patchwork Tue Oct 27 12:27:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82297 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8DF87A04B5; Tue, 27 Oct 2020 13:31:31 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 74352593A; Tue, 27 Oct 2020 13:29:03 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 1984F2D41 for ; Tue, 27 Oct 2020 13:27:58 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:52 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ77024637; Tue, 27 Oct 2020 14:27:51 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:04 +0800 Message-Id: <1603801650-442376-10-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 09/34] net/mlx5: create global jump action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit changes the jump action in table to be created with table creation in advanced. In this case, the jump action is safe to be used in multiple thread. The jump action will be destroyed when table is not used anymore and released. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.h | 2 -- drivers/net/mlx5/mlx5_flow_dv.c | 54 ++++++++++++----------------------------- 2 files changed, 16 insertions(+), 40 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index be6b9bb..22349df 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -485,8 +485,6 @@ struct mlx5_flow_dv_modify_hdr_resource { /* Jump action resource structure. */ struct mlx5_flow_dv_jump_tbl_resource { - rte_atomic32_t refcnt; /**< Reference counter. */ - uint8_t ft_type; /**< Flow table type, Rx or Tx. */ void *action; /**< Pointer to the rdma core action. */ }; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 896a9e8..767e580 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2937,31 +2937,13 @@ struct field_modify_info modify_tcp[] = { (struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_tbl_resource *tbl, struct mlx5_flow *dev_flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - int cnt, ret; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); - if (!cnt) { - ret = mlx5_flow_os_create_flow_action_dest_flow_tbl - (tbl->obj, &tbl_data->jump.action); - if (ret) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create jump action"); - DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } else { - /* old jump should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - MLX5_ASSERT(tbl_data->jump.action); - DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", - (void *)&tbl_data->jump, cnt); - } - rte_atomic32_inc(&tbl_data->jump.refcnt); + MLX5_ASSERT(tbl_data->jump.action); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; @@ -8059,8 +8041,19 @@ struct field_modify_info modify_tcp[] = { * count before insert it into the hash list. */ rte_atomic32_init(&tbl->refcnt); - /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); + if (table_id) { + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (tbl->obj, &tbl_data->jump.action); + if (ret) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot create flow jump action"); + mlx5_flow_os_destroy_flow_tbl(tbl->obj); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + } pos->key = table_key.v64; ret = mlx5_hlist_insert(sh->flow_tbls, pos); if (ret < 0) { @@ -10539,28 +10532,13 @@ struct field_modify_info modify_tcp[] = { struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_jump_tbl_resource *cache_resource; struct mlx5_flow_tbl_data_entry *tbl_data; tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], handle->rix_jump); if (!tbl_data) return 0; - cache_resource = &tbl_data->jump; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - /* jump action memory free is inside the table release. */ - flow_dv_tbl_resource_release(dev, &tbl_data->tbl); - DRV_LOG(DEBUG, "jump table resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return flow_dv_tbl_resource_release(dev, &tbl_data->tbl); } /** From patchwork Tue Oct 27 12:27:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82298 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1A3B0A04B5; Tue, 27 Oct 2020 13:31:51 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F30CB5958; Tue, 27 Oct 2020 13:29:04 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 2B4272DCC for ; Tue, 27 Oct 2020 13:27:58 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:54 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ78024637; Tue, 27 Oct 2020 14:27:53 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:05 +0800 Message-Id: <1603801650-442376-11-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 10/34] net/mlx5: create global default miss action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit creates the global default miss action instead of maintain it in flow insertion time. This makes the action to be thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 ++++ drivers/net/mlx5/mlx5.h | 9 +--- drivers/net/mlx5/mlx5_flow_dv.c | 88 +++------------------------------------- 3 files changed, 13 insertions(+), 91 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index a579dde..ae735a3 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -305,6 +305,10 @@ } sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); #endif /* HAVE_MLX5DV_DR */ + sh->default_miss_action = + mlx5_glue->dr_create_flow_action_default_miss(); + if (!sh->default_miss_action) + DRV_LOG(WARNING, "Default miss action is not supported."); return 0; error: /* Rollback the created objects. */ @@ -388,6 +392,9 @@ } pthread_mutex_destroy(&sh->dv_mutex); #endif /* HAVE_MLX5DV_DR */ + if (sh->default_miss_action) + mlx5_glue->destroy_flow_action + (sh->default_miss_action); if (sh->encaps_decaps) { mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL); sh->encaps_decaps = NULL; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 4446be2..057a761 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -464,12 +464,6 @@ struct mlx5_flow_counter_mng { LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws; }; -/* Default miss action resource structure. */ -struct mlx5_flow_default_miss_resource { - void *action; /* Pointer to the rdma-core action. */ - rte_atomic32_t refcnt; /* Default miss action reference counter. */ -}; - #define MLX5_AGE_EVENT_NEW 1 #define MLX5_AGE_TRIGGER 2 #define MLX5_AGE_SET(age_info, BIT) \ @@ -664,8 +658,7 @@ struct mlx5_dev_ctx_shared { uint32_t sample_action_list; /* List of sample actions. */ uint32_t dest_array_list; /* List of destination array actions. */ struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ - struct mlx5_flow_default_miss_resource default_miss; - /* Default miss action resource structure. */ + void *default_miss_action; /* Default miss action. */ struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX]; /* Memory Pool for mlx5 flow resources. */ struct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 767e580..b28cc6d 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -74,9 +74,6 @@ struct mlx5_flow_tbl_resource *tbl); static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); - -static int flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, uint32_t encap_decap_idx); @@ -2950,42 +2947,6 @@ struct field_modify_info modify_tcp[] = { } /** - * Find existing default miss resource or create and register a new one. - * - * @param[in, out] dev - * Pointer to rte_eth_dev structure. - * @param[out] error - * pointer to error structure. - * - * @return - * 0 on success otherwise -errno and errno is set. - */ -static int -flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, - struct rte_flow_error *error) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); - - if (!cnt) { - MLX5_ASSERT(cache_resource->action); - cache_resource->action = - mlx5_glue->dr_create_flow_action_default_miss(); - if (!cache_resource->action) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss action"); - DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", - (void *)cache_resource->action, cnt); - } - rte_atomic32_inc(&cache_resource->refcnt); - return 0; -} - -/** * Find existing table port ID resource or create and register a new one. * * @param[in, out] dev @@ -10384,16 +10345,14 @@ struct field_modify_info modify_tcp[] = { dh->rix_hrxq = hrxq_idx; dv->actions[n++] = hrxq->action; } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { - if (flow_dv_default_miss_resource_register - (dev, error)) { + if (!priv->sh->default_miss_action) { rte_flow_error_set (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot create default miss resource"); - goto error_default_miss; + "default miss action not be created."); + goto error; } - dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; - dv->actions[n++] = priv->sh->default_miss.action; + dv->actions[n++] = priv->sh->default_miss_action; } err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, @@ -10418,9 +10377,6 @@ struct field_modify_info modify_tcp[] = { } return 0; error: - if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) - flow_dv_default_miss_resource_release(dev); -error_default_miss: err = rte_errno; /* Save rte_errno before cleanup. */ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dh, next) { @@ -10542,36 +10498,6 @@ struct field_modify_info modify_tcp[] = { } /** - * Release a default miss resource. - * - * @param dev - * Pointer to Ethernet device. - * @return - * 1 while a reference on it exists, 0 when freed. - */ -static int -flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_default_miss_resource *cache_resource = - &sh->default_miss; - - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", - (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - DRV_LOG(DEBUG, "default miss resource %p: removed", - (void *)cache_resource->action); - return 0; - } - return 1; -} - -/** * Release a modify-header resource. * * @param dev @@ -10717,9 +10643,6 @@ struct field_modify_info modify_tcp[] = { flow_dv_port_id_action_resource_release(dev, handle->rix_port_id_action); break; - case MLX5_FLOW_FATE_DEFAULT_MISS: - flow_dv_default_miss_resource_release(dev); - break; default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); break; @@ -10890,8 +10813,7 @@ struct field_modify_info modify_tcp[] = { dh->drv_flow = NULL; } if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE || - dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) + dh->fate_action == MLX5_FLOW_FATE_QUEUE) flow_dv_fate_resource_release(dev, dh); if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); From patchwork Tue Oct 27 12:27:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82296 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 972BBA04B5; Tue, 27 Oct 2020 13:30:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0083B2C55; Tue, 27 Oct 2020 13:29:01 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 8BB553253 for ; Tue, 27 Oct 2020 13:27:58 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:56 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ79024637; Tue, 27 Oct 2020 14:27:55 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:06 +0800 Message-Id: <1603801650-442376-12-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 11/34] net/mlx5: create global drop action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit creates the global drop action for flows instead of maintain it in flow insertion time. The uniqueu global drop action makes it thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 +++++ drivers/net/mlx5/mlx5.c | 2 ++ drivers/net/mlx5/mlx5_flow_dv.c | 38 +++++++---------------------------- drivers/net/mlx5/mlx5_flow_verbs.c | 41 +++++++++++--------------------------- 4 files changed, 26 insertions(+), 60 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index ae735a3..b12d1d5 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1424,6 +1424,9 @@ } else { priv->obj_ops = ibv_obj_ops; } + priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); + if (!priv->drop_queue.hrxq) + goto error; /* Supported Verbs flow priority number detection. */ err = mlx5_flow_discover_priorities(eth_dev); if (err < 0) { @@ -1489,6 +1492,8 @@ close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); + if (eth_dev && priv->drop_queue.hrxq) + mlx5_drop_action_destroy(eth_dev); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); mlx5_free(priv); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index db0243f..307d279 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1311,6 +1311,8 @@ struct mlx5_dev_ctx_shared * priv->txqs = NULL; } mlx5_proc_priv_uninit(dev); + if (priv->drop_queue.hrxq) + mlx5_drop_action_destroy(dev); if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); mlx5_mprq_free_mp(dev); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index b28cc6d..50e8ff4 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -10310,24 +10310,9 @@ struct field_modify_info modify_tcp[] = { if (dv->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { - struct mlx5_hrxq *drop_hrxq; - drop_hrxq = mlx5_drop_action_create(dev); - if (!drop_hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot get drop hash queue"); - goto error; - } - /* - * Drop queues will be released by the specify - * mlx5_drop_action_destroy() function. Assign - * the special index to hrxq to mark the queue - * has been allocated. - */ - dh->rix_hrxq = UINT32_MAX; - dv->actions[n++] = drop_hrxq->action; + MLX5_ASSERT(priv->drop_queue.hrxq); + dv->actions[n++] = + priv->drop_queue.hrxq->action; } } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && !dv_h->rix_sample && !dv_h->rix_dest_array) { @@ -10381,14 +10366,9 @@ struct field_modify_info modify_tcp[] = { SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dh, next) { /* hrxq is union, don't clear it if the flag is not set. */ - if (dh->rix_hrxq) { - if (dh->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_drop_action_destroy(dev); - dh->rix_hrxq = 0; - } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { - mlx5_hrxq_release(dev, dh->rix_hrxq); - dh->rix_hrxq = 0; - } + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; } if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); @@ -10630,9 +10610,6 @@ struct field_modify_info modify_tcp[] = { if (!handle->rix_fate) return; switch (handle->fate_action) { - case MLX5_FLOW_FATE_DROP: - mlx5_drop_action_destroy(dev); - break; case MLX5_FLOW_FATE_QUEUE: mlx5_hrxq_release(dev, handle->rix_hrxq); break; @@ -10812,8 +10789,7 @@ struct field_modify_info modify_tcp[] = { claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); dh->drv_flow = NULL; } - if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE) + if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) flow_dv_fate_resource_release(dev, dh); if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 052cfd4..ba6731a 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -72,12 +72,12 @@ }, }; struct ibv_flow *flow; - struct mlx5_hrxq *drop = mlx5_drop_action_create(dev); + struct mlx5_hrxq *drop = priv->drop_queue.hrxq; uint16_t vprio[] = { 8, 16 }; int i; int priority = 0; - if (!drop) { + if (!drop->qp) { rte_errno = ENOTSUP; return -rte_errno; } @@ -89,7 +89,6 @@ claim_zero(mlx5_glue->destroy_flow(flow)); priority = vprio[i]; } - mlx5_drop_action_destroy(dev); switch (priority) { case 8: priority = RTE_DIM(priority_map_3); @@ -1881,15 +1880,10 @@ handle->drv_flow = NULL; } /* hrxq is union, don't touch it only the flag is set. */ - if (handle->rix_hrxq) { - if (handle->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_drop_action_destroy(dev); - handle->rix_hrxq = 0; - } else if (handle->fate_action == - MLX5_FLOW_FATE_QUEUE) { - mlx5_hrxq_release(dev, handle->rix_hrxq); - handle->rix_hrxq = 0; - } + if (handle->rix_hrxq && + handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); @@ -1961,14 +1955,8 @@ dev_flow = &wks->flows[idx]; handle = dev_flow->handle; if (handle->fate_action == MLX5_FLOW_FATE_DROP) { - hrxq = mlx5_drop_action_create(dev); - if (!hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot get drop hash queue"); - goto error; - } + MLX5_ASSERT(priv->drop_queue.hrxq); + hrxq = priv->drop_queue.hrxq; } else { uint32_t hrxq_idx; struct mlx5_flow_rss_desc *rss_desc = @@ -2028,15 +2016,10 @@ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, dev_handles, handle, next) { /* hrxq is union, don't touch it only the flag is set. */ - if (handle->rix_hrxq) { - if (handle->fate_action == MLX5_FLOW_FATE_DROP) { - mlx5_drop_action_destroy(dev); - handle->rix_hrxq = 0; - } else if (handle->fate_action == - MLX5_FLOW_FATE_QUEUE) { - mlx5_hrxq_release(dev, handle->rix_hrxq); - handle->rix_hrxq = 0; - } + if (handle->rix_hrxq && + handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; } if (handle->vf_vlan.tag && handle->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); From patchwork Tue Oct 27 12:27:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82299 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 868C4A04B5; Tue, 27 Oct 2020 13:32:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8CADD5AA4; Tue, 27 Oct 2020 13:29:06 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 69B0D493D for ; Tue, 27 Oct 2020 13:28:03 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:58 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7A024637; Tue, 27 Oct 2020 14:27:56 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:07 +0800 Message-Id: <1603801650-442376-13-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 12/34] net/mlx5: support concurrent access for hash list X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li In order to support hash list concurrent access, adding next: 1. List level read/write lock. 2. Entry reference counter. 3. Entry create/match/remove callback. 4. Remove insert/lookup/remove function which are not thread safe. 5. Add register/unregister function to support entry reuse. For better performance, lookup function uses read lock to allow concurrent lookup from different thread, all other hash list modification functions uses write lock which blocks concurrent modification and lookups from other thread. The exact objects change will be applied in the next patches. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 27 ++++--- drivers/net/mlx5/mlx5.c | 13 ++-- drivers/net/mlx5/mlx5_flow.c | 23 +++--- drivers/net/mlx5/mlx5_flow_dv.c | 9 ++- drivers/net/mlx5/mlx5_utils.c | 154 ++++++++++++++++++++++++++++++++------- drivers/net/mlx5/mlx5_utils.h | 149 ++++++++++++++++++++++++++++++------- 6 files changed, 287 insertions(+), 88 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index b12d1d5..4db5d33 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -236,14 +236,16 @@ return err; /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); - sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE); + sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, + 0, NULL, NULL, NULL); if (!sh->tag_table) { DRV_LOG(ERR, "tags with hash creation failed."); err = ENOMEM; goto error; } snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name); - sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ); + sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ, + 0, 0, NULL, NULL, NULL); if (!sh->modify_cmds) { DRV_LOG(ERR, "hdr modify hash creation failed"); err = ENOMEM; @@ -251,7 +253,8 @@ } snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name); sh->encaps_decaps = mlx5_hlist_create(s, - MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ); + MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ, + 0, 0, NULL, NULL, NULL); if (!sh->encaps_decaps) { DRV_LOG(ERR, "encap decap hash creation failed"); err = ENOMEM; @@ -333,16 +336,16 @@ sh->pop_vlan_action = NULL; } if (sh->encaps_decaps) { - mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL); + mlx5_hlist_destroy(sh->encaps_decaps); sh->encaps_decaps = NULL; } if (sh->modify_cmds) { - mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL); + mlx5_hlist_destroy(sh->modify_cmds); sh->modify_cmds = NULL; } if (sh->tag_table) { /* tags should be destroyed with flow before. */ - mlx5_hlist_destroy(sh->tag_table, NULL, NULL); + mlx5_hlist_destroy(sh->tag_table); sh->tag_table = NULL; } if (sh->tunnel_hub) { @@ -396,16 +399,16 @@ mlx5_glue->destroy_flow_action (sh->default_miss_action); if (sh->encaps_decaps) { - mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL); + mlx5_hlist_destroy(sh->encaps_decaps); sh->encaps_decaps = NULL; } if (sh->modify_cmds) { - mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL); + mlx5_hlist_destroy(sh->modify_cmds); sh->modify_cmds = NULL; } if (sh->tag_table) { /* tags should be destroyed with flow before. */ - mlx5_hlist_destroy(sh->tag_table, NULL, NULL); + mlx5_hlist_destroy(sh->tag_table); sh->tag_table = NULL; } if (sh->tunnel_hub) { @@ -1472,7 +1475,9 @@ mlx5_flow_ext_mreg_supported(eth_dev) && priv->sh->dv_regc0_mask) { priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, - MLX5_FLOW_MREG_HTABLE_SZ); + MLX5_FLOW_MREG_HTABLE_SZ, + 0, 0, + NULL, NULL, NULL); if (!priv->mreg_cp_tbl) { err = ENOMEM; goto error; @@ -1483,7 +1488,7 @@ error: if (priv) { if (priv->mreg_cp_tbl) - mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); + mlx5_hlist_destroy(priv->mreg_cp_tbl); if (priv->sh) mlx5_os_free_shared_dr(priv); if (priv->nl_socket_route >= 0) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 307d279..42ab40b 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1034,7 +1034,7 @@ struct mlx5_dev_ctx_shared * if (!sh->flow_tbls) return; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); if (pos) { tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); @@ -1043,7 +1043,7 @@ struct mlx5_dev_ctx_shared * mlx5_free(tbl_data); } table_key.direction = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); if (pos) { tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); @@ -1053,7 +1053,7 @@ struct mlx5_dev_ctx_shared * } table_key.direction = 0; table_key.domain = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); if (pos) { tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); @@ -1061,7 +1061,7 @@ struct mlx5_dev_ctx_shared * mlx5_hlist_remove(sh->flow_tbls, pos); mlx5_free(tbl_data); } - mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); + mlx5_hlist_destroy(sh->flow_tbls); } /** @@ -1083,7 +1083,8 @@ struct mlx5_dev_ctx_shared * MLX5_ASSERT(sh); snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); - sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); + sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, + 0, 0, NULL, NULL, NULL); if (!sh->flow_tbls) { DRV_LOG(ERR, "flow tables with hash creation failed."); err = ENOMEM; @@ -1314,7 +1315,7 @@ struct mlx5_dev_ctx_shared * if (priv->drop_queue.hrxq) mlx5_drop_action_destroy(dev); if (priv->mreg_cp_tbl) - mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); + mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 441fe4b..95258ab 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3738,7 +3738,7 @@ struct rte_flow_shared_action * cp_mreg.src = ret; /* Check if already registered. */ MLX5_ASSERT(priv->mreg_cp_tbl); - mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, NULL); if (mcp_res) { /* For non-default rule. */ if (mark_id != MLX5_DEFAULT_COPY_ID) @@ -3815,8 +3815,7 @@ struct rte_flow_shared_action * goto error; mcp_res->refcnt++; mcp_res->hlist_ent.key = mark_id; - ret = mlx5_hlist_insert(priv->mreg_cp_tbl, - &mcp_res->hlist_ent); + ret = !mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent); MLX5_ASSERT(!ret); if (ret) goto error; @@ -3966,7 +3965,7 @@ struct rte_flow_shared_action * if (!priv->mreg_cp_tbl) return; mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, - MLX5_DEFAULT_COPY_ID); + MLX5_DEFAULT_COPY_ID, NULL); if (!mcp_res) return; MLX5_ASSERT(mcp_res->rix_flow); @@ -7553,7 +7552,7 @@ struct mlx5_meter_domains_infos * .direction = 0, } }; - he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); return he ? container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; } @@ -7575,7 +7574,7 @@ struct mlx5_meter_domains_infos * struct mlx5_hlist *group_hash; group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_lookup(group_hash, key.val); + he = mlx5_hlist_lookup(group_hash, key.val, NULL); if (!he) { tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tte), 0, @@ -8082,7 +8081,7 @@ struct mlx5_meter_domains_infos * LIST_REMOVE(tunnel, chain); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], tunnel->tunnel_id); - mlx5_hlist_destroy(tunnel->groups, NULL, NULL); + mlx5_hlist_destroy(tunnel->groups); mlx5_free(tunnel); } @@ -8130,7 +8129,8 @@ struct mlx5_meter_domains_infos * [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); return NULL; } - tunnel->groups = mlx5_hlist_create("tunnel groups", 1024); + tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, + NULL, NULL, NULL); if (!tunnel->groups) { mlx5_ipool_free(priv->sh->ipool [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); @@ -8195,7 +8195,7 @@ void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) return; if (!LIST_EMPTY(&thub->tunnels)) DRV_LOG(WARNING, "port %u tunnels present\n", port_id); - mlx5_hlist_destroy(thub->groups, NULL, NULL); + mlx5_hlist_destroy(thub->groups); mlx5_free(thub); } @@ -8209,7 +8209,8 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); - thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES); + thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, + 0, NULL, NULL, NULL); if (!thub->groups) { err = -rte_errno; goto err; @@ -8220,7 +8221,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) err: if (thub->groups) - mlx5_hlist_destroy(thub->groups, NULL, NULL); + mlx5_hlist_destroy(thub->groups); if (thub) mlx5_free(thub); return err; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 50e8ff4..3ae5a95 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -7956,7 +7956,7 @@ struct field_modify_info modify_tcp[] = { } }; struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, - table_key.v64); + table_key.v64, NULL); struct mlx5_flow_tbl_data_entry *tbl_data; uint32_t idx = 0; int ret; @@ -8016,7 +8016,7 @@ struct field_modify_info modify_tcp[] = { } } pos->key = table_key.v64; - ret = mlx5_hlist_insert(sh->flow_tbls, pos); + ret = !mlx5_hlist_insert(sh->flow_tbls, pos); if (ret < 0) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8073,7 +8073,8 @@ struct field_modify_info modify_tcp[] = { tunnel_grp_hash = tbl_data->tunnel ? tbl_data->tunnel->groups : thub->groups; - he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val); + he = mlx5_hlist_lookup(tunnel_grp_hash, + tunnel_key.val, NULL); if (he) { struct tunnel_tbl_entry *tte; tte = container_of(he, typeof(*tte), hash); @@ -8231,7 +8232,7 @@ struct field_modify_info modify_tcp[] = { int ret; /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); + entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24, NULL); if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index 7a6b0c6..d041b07 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -9,14 +9,40 @@ #include "mlx5_utils.h" +/********************* Hash List **********************/ + +static struct mlx5_hlist_entry * +mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused, + void *ctx __rte_unused) +{ + return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY); +} + +static void +mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused, + struct mlx5_hlist_entry *entry) +{ + mlx5_free(entry); +} + +static int +mlx5_hlist_default_match_cb(struct mlx5_hlist *h __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key, void *ctx __rte_unused) +{ + return entry->key != key; +} + struct mlx5_hlist * -mlx5_hlist_create(const char *name, uint32_t size) +mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size, + uint32_t flags, mlx5_hlist_create_cb cb_create, + mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove) { struct mlx5_hlist *h; uint32_t act_size; uint32_t alloc_size; - if (!size) + if (!size || (!cb_create ^ !cb_remove)) return NULL; /* Align to the next power of 2, 32bits integer is enough now. */ if (!rte_is_power_of_2(size)) { @@ -40,45 +66,108 @@ struct mlx5_hlist * snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); h->table_sz = act_size; h->mask = act_size - 1; + h->entry_sz = entry_size; + h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY); + h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST); + h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb; + h->cb_match = cb_match ? cb_match : mlx5_hlist_default_match_cb; + h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb; + rte_rwlock_init(&h->lock); DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.", h->name, act_size); return h; } -struct mlx5_hlist_entry * -mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key) +static struct mlx5_hlist_entry * +__hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse) { uint32_t idx; struct mlx5_hlist_head *first; struct mlx5_hlist_entry *node; MLX5_ASSERT(h); - idx = rte_hash_crc_8byte(key, 0) & h->mask; + if (h->direct_key) + idx = (uint32_t)(key & h->mask); + else + idx = rte_hash_crc_8byte(key, 0) & h->mask; first = &h->heads[idx]; LIST_FOREACH(node, first, next) { - if (node->key == key) - return node; + if (!h->cb_match(h, node, key, ctx)) { + if (reuse) { + __atomic_add_fetch(&node->ref_cnt, 1, + __ATOMIC_RELAXED); + DRV_LOG(DEBUG, "Hash list %s entry %p " + "reuse: %u.", + h->name, (void *)node, node->ref_cnt); + } + break; + } } - return NULL; + return node; } -int -mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) +static struct mlx5_hlist_entry * +hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse) +{ + struct mlx5_hlist_entry *node; + + MLX5_ASSERT(h); + rte_rwlock_read_lock(&h->lock); + node = __hlist_lookup(h, key, ctx, reuse); + rte_rwlock_read_unlock(&h->lock); + return node; +} + +struct mlx5_hlist_entry * +mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) +{ + return hlist_lookup(h, key, ctx, false); +} + +struct mlx5_hlist_entry* +mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx) { uint32_t idx; struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; + struct mlx5_hlist_entry *entry; + uint32_t prev_gen_cnt = 0; MLX5_ASSERT(h && entry); - idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; + /* Use write lock directly for write-most list. */ + if (!h->write_most) { + prev_gen_cnt = __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE); + entry = hlist_lookup(h, key, ctx, true); + if (entry) + return entry; + } + rte_rwlock_write_lock(&h->lock); + /* Check if the list changed by other threads. */ + if (h->write_most || + prev_gen_cnt != __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE)) { + entry = __hlist_lookup(h, key, ctx, true); + if (entry) + goto done; + } + if (h->direct_key) + idx = (uint32_t)(key & h->mask); + else + idx = rte_hash_crc_8byte(key, 0) & h->mask; first = &h->heads[idx]; - /* No need to reuse the lookup function. */ - LIST_FOREACH(node, first, next) { - if (node->key == entry->key) - return -EEXIST; + entry = h->cb_create(h, key, ctx); + if (!entry) { + rte_errno = ENOMEM; + DRV_LOG(ERR, "Can't allocate hash list %s entry.", h->name); + goto done; } + entry->key = key; + entry->ref_cnt = 1; LIST_INSERT_HEAD(first, entry, next); - return 0; + __atomic_add_fetch(&h->gen_cnt, 1, __ATOMIC_ACQ_REL); + DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.", + h->name, (void *)entry, entry->ref_cnt); +done: + rte_rwlock_write_unlock(&h->lock); + return entry; } struct mlx5_hlist_entry * @@ -119,26 +208,36 @@ struct mlx5_hlist_entry * return 0; } -void -mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, - struct mlx5_hlist_entry *entry) +int +mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) { - MLX5_ASSERT(entry && entry->next.le_prev); + rte_rwlock_write_lock(&h->lock); + MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev); + DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.", + h->name, (void *)entry, entry->ref_cnt); + if (--entry->ref_cnt) { + rte_rwlock_write_unlock(&h->lock); + return 1; + } LIST_REMOVE(entry, next); /* Set to NULL to get rid of removing action for more than once. */ entry->next.le_prev = NULL; + h->cb_remove(h, entry); + rte_rwlock_write_unlock(&h->lock); + DRV_LOG(DEBUG, "Hash list %s entry %p removed.", + h->name, (void *)entry); + return 0; } void -mlx5_hlist_destroy(struct mlx5_hlist *h, - mlx5_hlist_destroy_callback_fn cb, void *ctx) +mlx5_hlist_destroy(struct mlx5_hlist *h) { uint32_t idx; struct mlx5_hlist_entry *entry; MLX5_ASSERT(h); for (idx = 0; idx < h->table_sz; ++idx) { - /* no LIST_FOREACH_SAFE, using while instead */ + /* No LIST_FOREACH_SAFE, using while instead. */ while (!LIST_EMPTY(&h->heads[idx])) { entry = LIST_FIRST(&h->heads[idx]); LIST_REMOVE(entry, next); @@ -150,15 +249,14 @@ struct mlx5_hlist_entry * * the beginning). Or else the default free function * will be used. */ - if (cb) - cb(entry, ctx); - else - mlx5_free(entry); + h->cb_remove(h, entry); } } mlx5_free(h); } +/********************* Indexed pool **********************/ + static inline void mlx5_ipool_lock(struct mlx5_indexed_pool *pool) { diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index ca9bb76..c665558 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -20,6 +21,11 @@ #include "mlx5_defs.h" +#define mlx5_hlist_remove(h, e) \ + mlx5_hlist_unregister(h, e) + +#define mlx5_hlist_insert(h, e) \ + mlx5_hlist_register(h, 0, e) /* Convert a bit number to the corresponding 64-bit mask */ #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) @@ -259,9 +265,14 @@ struct mlx5_indexed_pool { return l + r; } +#define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */ +#define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */ + /** Maximum size of string for naming the hlist table. */ #define MLX5_HLIST_NAMESIZE 32 +struct mlx5_hlist; + /** * Structure of the entry in the hash list, user should define its own struct * that contains this in order to store the data. The 'key' is 64-bits right @@ -270,6 +281,7 @@ struct mlx5_indexed_pool { struct mlx5_hlist_entry { LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */ uint64_t key; /* user defined 'key', could be the hash signature. */ + uint32_t ref_cnt; /* Reference count. */ }; /** Structure for hash head. */ @@ -292,13 +304,77 @@ struct mlx5_hlist_entry { typedef int (*mlx5_hlist_match_callback_fn)(struct mlx5_hlist_entry *entry, void *ctx); -/** hash list table structure */ +/** + * Type of callback function for entry removal. + * + * @param list + * The hash list. + * @param entry + * The entry in the list. + */ +typedef void (*mlx5_hlist_remove_cb)(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); + +/** + * Type of function for user defined matching. + * + * @param list + * The hash list. + * @param entry + * The entry in the list. + * @param key + * The new entry key. + * @param ctx + * The pointer to new entry context. + * + * @return + * 0 if matching, non-zero number otherwise. + */ +typedef int (*mlx5_hlist_match_cb)(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry, + uint64_t key, void *ctx); + +/** + * Type of function for user defined hash list entry creation. + * + * @param list + * The hash list. + * @param key + * The key of the new entry. + * @param ctx + * The pointer to new entry context. + * + * @return + * Pointer to allocated entry on success, NULL otherwise. + */ +typedef struct mlx5_hlist_entry *(*mlx5_hlist_create_cb) + (struct mlx5_hlist *list, + uint64_t key, void *ctx); + +/** + * Hash list table structure + * + * Entry in hash list could be reused if entry already exists, reference + * count will increase and the existing entry returns. + * + * When destroy an entry from list, decrease reference count and only + * destroy when no further reference. + */ struct mlx5_hlist { char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */ /**< number of heads, need to be power of 2. */ uint32_t table_sz; + uint32_t entry_sz; /**< Size of entry, used to allocate entry. */ /**< mask to get the index of the list heads. */ uint32_t mask; + rte_rwlock_t lock; + uint32_t gen_cnt; /* List modification will update generation count. */ + bool direct_key; /* Use the new entry key directly as hash index. */ + bool write_most; /* List mostly used for append new or destroy. */ + void *ctx; + mlx5_hlist_create_cb cb_create; /**< entry create callback. */ + mlx5_hlist_match_cb cb_match; /**< entry match callback. */ + mlx5_hlist_remove_cb cb_remove; /**< entry remove callback. */ struct mlx5_hlist_head heads[]; /**< list head arrays. */ }; @@ -314,40 +390,43 @@ struct mlx5_hlist { * Name of the hash list(optional). * @param size * Heads array size of the hash list. - * + * @param entry_size + * Entry size to allocate if cb_create not specified. + * @param flags + * The hash list attribute flags. + * @param cb_create + * Callback function for entry create. + * @param cb_match + * Callback function for entry match. + * @param cb_destroy + * Callback function for entry destroy. * @return * Pointer of the hash list table created, NULL on failure. */ -struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size); +struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size, + uint32_t entry_size, uint32_t flags, + mlx5_hlist_create_cb cb_create, + mlx5_hlist_match_cb cb_match, + mlx5_hlist_remove_cb cb_destroy); /** * Search an entry matching the key. * + * Result returned might be destroyed by other thread, must use + * this function only in main thread. + * * @param h * Pointer to the hast list table. * @param key * Key for the searching entry. + * @param ctx + * Common context parameter used by entry callback function. * * @return * Pointer of the hlist entry if found, NULL otherwise. */ -struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key); - -/** - * Insert an entry to the hash list table, the entry is only part of whole data - * element and a 64B key is used for matching. User should construct the key or - * give a calculated hash signature and guarantee there is no collision. - * - * @param h - * Pointer to the hast list table. - * @param entry - * Entry to be inserted into the hash list table. - * - * @return - * - zero for success. - * - -EEXIST if the entry is already inserted. - */ -int mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry); +struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, + void *ctx); /** * Extended routine to search an entry matching the context with @@ -393,6 +472,24 @@ int mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry, mlx5_hlist_match_callback_fn cb, void *ctx); /** + * Insert an entry to the hash list table, the entry is only part of whole data + * element and a 64B key is used for matching. User should construct the key or + * give a calculated hash signature and guarantee there is no collision. + * + * @param h + * Pointer to the hast list table. + * @param entry + * Entry to be inserted into the hash list table. + * @param ctx + * Common context parameter used by callback function. + * + * @return + * registered entry on success, NULL otherwise + */ +struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, + void *ctx); + +/** * Remove an entry from the hash list table. User should guarantee the validity * of the entry. * @@ -400,9 +497,10 @@ int mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry, * Pointer to the hast list table. (not used) * @param entry * Entry to be removed from the hash list table. + * @return + * 0 on entry removed, 1 on entry still referenced. */ -void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, - struct mlx5_hlist_entry *entry); +int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry); /** * Destroy the hash list table, all the entries already inserted into the lists @@ -411,13 +509,8 @@ void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, * * @param h * Pointer to the hast list table. - * @param cb - * Callback function for each inserted entry when destroying the hash list. - * @param ctx - * Common context parameter used by callback function for each entry. */ -void mlx5_hlist_destroy(struct mlx5_hlist *h, - mlx5_hlist_destroy_callback_fn cb, void *ctx); +void mlx5_hlist_destroy(struct mlx5_hlist *h); /** * This function allocates non-initialized memory entry from pool. From patchwork Tue Oct 27 12:27:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82300 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 96D2AA04B5; Tue, 27 Oct 2020 13:32:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1D35B5AB9; Tue, 27 Oct 2020 13:29:08 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 51DC037B0 for ; Tue, 27 Oct 2020 13:28:03 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:27:59 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7B024637; Tue, 27 Oct 2020 14:27:58 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:08 +0800 Message-Id: <1603801650-442376-14-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 13/34] net/mlx5: add flow table tunnel offload attribute X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" As flow table is shared between the ports in the same shared IB device, flow table may be created by one port and released by other port. Currently, the tunnel offloading active check in flow table release is based on the port which release the flow table. Since the flow table create port and release port may have different tunnel offloading configuration, it will cause invalid tunnel offloading release or tunnel offloading resource leaks. Add the flow table tunnel offloading attribute to indicate the flow table has tunnel offloading resource or not to avoid wrong tunnel offloading operation. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.h | 1 + drivers/net/mlx5/mlx5_flow_dv.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 22349df..eaf8221 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -539,6 +539,7 @@ struct mlx5_flow_tbl_data_entry { const struct mlx5_flow_tunnel *tunnel; uint32_t group_id; bool external; + bool tunnel_offload; /* Tunnel offlod table or not. */ }; /* Sub rdma-core actions list. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 3ae5a95..e43e88c 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -7981,6 +7981,7 @@ struct field_modify_info modify_tcp[] = { tbl_data->tunnel = tunnel; tbl_data->group_id = group_id; tbl_data->external = external; + tbl_data->tunnel_offload = is_tunnel_offload_active(dev); tbl = &tbl_data->tbl; pos = &tbl_data->entry; if (transfer) @@ -8055,7 +8056,7 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_os_destroy_flow_tbl(tbl->obj); tbl->obj = NULL; - if (is_tunnel_offload_active(dev) && tbl_data->external) { + if (tbl_data->tunnel_offload && tbl_data->external) { struct mlx5_hlist_entry *he; struct mlx5_hlist *tunnel_grp_hash; struct mlx5_flow_tunnel_hub *thub = From patchwork Tue Oct 27 12:27:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82302 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 951E1A04B5; Tue, 27 Oct 2020 13:33:25 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A9D7E6883; Tue, 27 Oct 2020 13:29:11 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 450574F9C for ; Tue, 27 Oct 2020 13:28:08 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:02 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7C024637; Tue, 27 Oct 2020 14:28:00 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:09 +0800 Message-Id: <1603801650-442376-15-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 14/34] net/mlx5: make flow table cache thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion/removal, this patch uses thread safe hash list API for flow table cache hash list. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.c | 102 +++------------- drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_flow.c | 2 +- drivers/net/mlx5/mlx5_flow.h | 25 ++++ drivers/net/mlx5/mlx5_flow_dv.c | 262 +++++++++++++++++++++------------------- 5 files changed, 179 insertions(+), 214 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 42ab40b..1d25a8e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1012,7 +1012,7 @@ struct mlx5_dev_ctx_shared * } /** - * Destroy table hash list and all the root entries per domain. + * Destroy table hash list. * * @param[in] priv * Pointer to the private device data structure. @@ -1021,46 +1021,9 @@ struct mlx5_dev_ctx_shared * mlx5_free_table_hash_list(struct mlx5_priv *priv) { struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_data_entry *tbl_data; - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_hlist_entry *pos; if (!sh->flow_tbls) return; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } - table_key.direction = 0; - table_key.domain = 1; - pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - MLX5_ASSERT(tbl_data); - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_free(tbl_data); - } mlx5_hlist_destroy(sh->flow_tbls); } @@ -1075,80 +1038,45 @@ struct mlx5_dev_ctx_shared * * Zero on success, positive error code otherwise. */ int -mlx5_alloc_table_hash_list(struct mlx5_priv *priv) +mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) { + int err = 0; + /* Tables are only used in DV and DR modes. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_dev_ctx_shared *sh = priv->sh; char s[MLX5_HLIST_NAMESIZE]; - int err = 0; MLX5_ASSERT(sh); snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, - 0, 0, NULL, NULL, NULL); + 0, 0, flow_dv_tbl_create_cb, NULL, + flow_dv_tbl_remove_cb); if (!sh->flow_tbls) { DRV_LOG(ERR, "flow tables with hash creation failed."); err = ENOMEM; return err; } + sh->flow_tbls->ctx = sh; #ifndef HAVE_MLX5DV_DR + struct rte_flow_error error; + struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; + /* * In case we have not DR support, the zero tables should be created * because DV expect to see them even if they cannot be created by * RDMA-CORE. */ - union mlx5_flow_tbl_key table_key = { - { - .table_id = 0, - .reserved = 0, - .domain = 0, - .direction = 0, - } - }; - struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { + if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) || + !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) { err = ENOMEM; goto error; } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); - table_key.direction = 0; - table_key.domain = 1; - tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, - SOCKET_ID_ANY); - if (!tbl_data) { - err = ENOMEM; - goto error; - } - tbl_data->entry.key = table_key.v64; - err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); - if (err) - goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); return err; error: mlx5_free_table_hash_list(priv); #endif /* HAVE_MLX5DV_DR */ +#endif return err; } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 057a761..b5a6c95 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -502,7 +502,7 @@ struct mlx5_dev_shared_port { struct { /* Table ID should be at the lowest address. */ uint32_t table_id; /**< ID of the table. */ - uint16_t reserved; /**< must be zero for comparison. */ + uint16_t dummy; /**< Dummy table for DV API. */ uint8_t domain; /**< 1 - FDB, 0 - NIC TX/RX. */ uint8_t direction; /**< 1 - egress, 0 - ingress. */ }; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 95258ab..35e6771 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -7547,7 +7547,7 @@ struct mlx5_meter_domains_infos * union mlx5_flow_tbl_key table_key = { { .table_id = tunnel_id_to_flow_tbl(mbits.table_id), - .reserved = 0, + .dummy = 0, .domain = !!mbits.transfer, .direction = 0, } diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index eaf8221..2e1e0c1 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -385,6 +385,13 @@ enum mlx5_flow_fate_type { MLX5_FLOW_FATE_MAX, }; +/* Hash list callback context */ +struct mlx5_flow_cb_ctx { + struct rte_eth_dev *dev; + struct rte_flow_error *error; + void *data; +}; + /* Matcher PRM representation */ struct mlx5_flow_dv_match_params { size_t size; @@ -524,6 +531,13 @@ struct mlx5_flow_mreg_copy_resource { uint32_t rix_flow; /* Built flow for copy. */ }; +/* Table tunnel parameter. */ +struct mlx5_flow_tbl_tunnel_prm { + const struct mlx5_flow_tunnel *tunnel; + uint32_t group_id; + bool external; +}; + /* Table data structure of the hash organization. */ struct mlx5_flow_tbl_data_entry { struct mlx5_hlist_entry entry; @@ -1397,4 +1411,15 @@ int mlx5_flow_meter_flush(struct rte_eth_dev *dev, int mlx5_shared_action_flush(struct rte_eth_dev *dev); void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id); int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh); + +/* Hash list callbacks for flow tables: */ +struct mlx5_hlist_entry *flow_dv_tbl_create_cb(struct mlx5_hlist *list, + uint64_t key, void *entry_ctx); +void flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); +struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev, + uint32_t table_id, uint8_t egress, uint8_t transfer, + bool external, const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, uint8_t dummy, struct rte_flow_error *error); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index e43e88c..6b1eeb4 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -7917,58 +7917,21 @@ struct field_modify_info modify_tcp[] = { return match_criteria_enable; } - -/** - * Get a flow table. - * - * @param[in, out] dev - * Pointer to rte_eth_dev structure. - * @param[in] table_id - * Table id to use. - * @param[in] egress - * Direction of the table. - * @param[in] transfer - * E-Switch or NIC flow. - * @param[out] error - * pointer to error structure. - * - * @return - * Returns tables resource based on the index, NULL in case of failed. - */ -static struct mlx5_flow_tbl_resource * -flow_dv_tbl_resource_get(struct rte_eth_dev *dev, - uint32_t table_id, uint8_t egress, - uint8_t transfer, - bool external, - const struct mlx5_flow_tunnel *tunnel, - uint32_t group_id, - struct rte_flow_error *error) +struct mlx5_hlist_entry * +flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_tbl_resource *tbl; - union mlx5_flow_tbl_key table_key = { - { - .table_id = table_id, - .reserved = 0, - .domain = !!transfer, - .direction = !!egress, - } - }; - struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, - table_key.v64, NULL); + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; struct mlx5_flow_tbl_data_entry *tbl_data; + struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data; + struct rte_flow_error *error = ctx->error; + union mlx5_flow_tbl_key key = { .v64 = key64 }; + struct mlx5_flow_tbl_resource *tbl; + void *domain; uint32_t idx = 0; int ret; - void *domain; - if (pos) { - tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, - entry); - tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); - return tbl; - } tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); if (!tbl_data) { rte_flow_error_set(error, ENOMEM, @@ -7978,19 +7941,20 @@ struct field_modify_info modify_tcp[] = { return NULL; } tbl_data->idx = idx; - tbl_data->tunnel = tunnel; - tbl_data->group_id = group_id; - tbl_data->external = external; + tbl_data->tunnel = tt_prm->tunnel; + tbl_data->group_id = tt_prm->group_id; + tbl_data->external = tt_prm->external; tbl_data->tunnel_offload = is_tunnel_offload_active(dev); tbl = &tbl_data->tbl; - pos = &tbl_data->entry; - if (transfer) + if (key.dummy) + return &tbl_data->entry; + if (key.domain) domain = sh->fdb_domain; - else if (egress) + else if (key.direction) domain = sh->tx_domain; else domain = sh->rx_domain; - ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); + ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj); if (ret) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -7998,12 +7962,7 @@ struct field_modify_info modify_tcp[] = { mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); return NULL; } - /* - * No multi-threads now, but still better to initialize the reference - * count before insert it into the hash list. - */ - rte_atomic32_init(&tbl->refcnt); - if (table_id) { + if (key.table_id) { ret = mlx5_flow_os_create_flow_action_dest_flow_tbl (tbl->obj, &tbl_data->jump.action); if (ret) { @@ -8016,17 +7975,116 @@ struct field_modify_info modify_tcp[] = { return NULL; } } - pos->key = table_key.v64; - ret = !mlx5_hlist_insert(sh->flow_tbls, pos); - if (ret < 0) { - rte_flow_error_set(error, -ret, + return &tbl_data->entry; +} + +/** + * Get a flow table. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] table_id + * Table id to use. + * @param[in] egress + * Direction of the table. + * @param[in] transfer + * E-Switch or NIC flow. + * @param[in] dummy + * Dummy entry for dv API. + * @param[out] error + * pointer to error structure. + * + * @return + * Returns tables resource based on the index, NULL in case of failed. + */ +struct mlx5_flow_tbl_resource * +flow_dv_tbl_resource_get(struct rte_eth_dev *dev, + uint32_t table_id, uint8_t egress, + uint8_t transfer, + bool external, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, uint8_t dummy, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + union mlx5_flow_tbl_key table_key = { + { + .table_id = table_id, + .dummy = dummy, + .domain = !!transfer, + .direction = !!egress, + } + }; + struct mlx5_flow_tbl_tunnel_prm tt_prm = { + .tunnel = tunnel, + .group_id = group_id, + .external = external, + }; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = &tt_prm, + }; + struct mlx5_hlist_entry *entry; + struct mlx5_flow_tbl_data_entry *tbl_data; + + entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx); + if (!entry) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot insert flow table data entry"); - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + "cannot get table"); + return NULL; } - rte_atomic32_inc(&tbl->refcnt); - return tbl; + tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + return &tbl_data->tbl; +} + +void +flow_dv_tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(entry, struct mlx5_flow_tbl_data_entry, entry); + + MLX5_ASSERT(entry && sh); + if (tbl_data->tbl.obj) + mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj); + if (tbl_data->tunnel_offload && tbl_data->external) { + struct mlx5_hlist_entry *he; + struct mlx5_hlist *tunnel_grp_hash; + struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub; + union tunnel_tbl_key tunnel_key = { + .tunnel_id = tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + .group = tbl_data->group_id + }; + union mlx5_flow_tbl_key table_key = { + .v64 = entry->key + }; + uint32_t table_id = table_key.table_id; + + tunnel_grp_hash = tbl_data->tunnel ? + tbl_data->tunnel->groups : + thub->groups; + he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL); + if (he) { + struct tunnel_tbl_entry *tte; + tte = container_of(he, typeof(*tte), hash); + MLX5_ASSERT(tte->flow_table == table_id); + mlx5_hlist_remove(tunnel_grp_hash, he); + mlx5_free(tte); + } + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(table_id)); + DRV_LOG(DEBUG, + "Table_id %#x tunnel %u group %u released.", + table_id, + tbl_data->tunnel ? + tbl_data->tunnel->tunnel_id : 0, + tbl_data->group_id); + } + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } /** @@ -8051,54 +8109,7 @@ struct field_modify_info modify_tcp[] = { if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { - struct mlx5_hlist_entry *pos = &tbl_data->entry; - - mlx5_flow_os_destroy_flow_tbl(tbl->obj); - tbl->obj = NULL; - if (tbl_data->tunnel_offload && tbl_data->external) { - struct mlx5_hlist_entry *he; - struct mlx5_hlist *tunnel_grp_hash; - struct mlx5_flow_tunnel_hub *thub = - mlx5_tunnel_hub(dev); - union tunnel_tbl_key tunnel_key = { - .tunnel_id = tbl_data->tunnel ? - tbl_data->tunnel->tunnel_id : 0, - .group = tbl_data->group_id - }; - union mlx5_flow_tbl_key table_key = { - .v64 = pos->key - }; - uint32_t table_id = table_key.table_id; - - tunnel_grp_hash = tbl_data->tunnel ? - tbl_data->tunnel->groups : - thub->groups; - he = mlx5_hlist_lookup(tunnel_grp_hash, - tunnel_key.val, NULL); - if (he) { - struct tunnel_tbl_entry *tte; - tte = container_of(he, typeof(*tte), hash); - MLX5_ASSERT(tte->flow_table == table_id); - mlx5_hlist_remove(tunnel_grp_hash, he); - mlx5_free(tte); - } - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tunnel_flow_tbl_to_id(table_id)); - DRV_LOG(DEBUG, - "port %u release table_id %#x tunnel %u group %u", - dev->data->port_id, table_id, - tbl_data->tunnel ? - tbl_data->tunnel->tunnel_id : 0, - tbl_data->group_id); - } - /* remove the entry from the hash list and free memory. */ - mlx5_hlist_remove(sh->flow_tbls, pos); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], - tbl_data->idx); - return 0; - } - return 1; + return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); } /** @@ -8137,7 +8148,7 @@ struct field_modify_info modify_tcp[] = { int ret; tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, false, NULL, 0, error); + key->domain, false, NULL, 0, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -8629,7 +8640,7 @@ struct field_modify_info modify_tcp[] = { /* Create normal path table level */ tbl = flow_dv_tbl_resource_get(dev, next_ft_id, attr->egress, attr->transfer, - dev_flow->external, NULL, 0, error); + dev_flow->external, NULL, 0, 0, error); if (!tbl) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -9309,7 +9320,7 @@ struct field_modify_info modify_tcp[] = { tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, attr->transfer, !!dev_flow->external, tunnel, - attr->group, error); + attr->group, 0, error); if (!tbl) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9591,7 +9602,7 @@ struct field_modify_info modify_tcp[] = { tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, attr->transfer, !!dev_flow->external, - tunnel, jump_group, + tunnel, jump_group, 0, error); if (!tbl) return rte_flow_error_set @@ -11541,7 +11552,7 @@ struct field_modify_info modify_tcp[] = { /* Create the meter table with METER level. */ dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, egress, transfer, false, NULL, 0, - &error); + 0, &error); if (!dtb->tbl) { DRV_LOG(ERR, "Failed to create meter policer table."); return -1; @@ -11550,7 +11561,7 @@ struct field_modify_info modify_tcp[] = { dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_SUFFIX, egress, transfer, false, NULL, 0, - &error); + 0, &error); if (!dtb->sfx_tbl) { DRV_LOG(ERR, "Failed to create meter suffix table."); return -1; @@ -11869,10 +11880,11 @@ struct field_modify_info modify_tcp[] = { void *flow = NULL; int i, ret = -1; - tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, NULL); + tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL); if (!tbl) goto err; - dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, NULL, 0, NULL); + dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false, + NULL, 0, 0, NULL); if (!dest_tbl) goto err; dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); From patchwork Tue Oct 27 12:27:10 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82301 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 57FE4A04B5; Tue, 27 Oct 2020 13:33:03 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E96142E8D; Tue, 27 Oct 2020 13:29:09 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 44F814F96 for ; Tue, 27 Oct 2020 13:28:08 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:04 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7D024637; Tue, 27 Oct 2020 14:28:02 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, stable@dpdk.org Date: Tue, 27 Oct 2020 20:27:10 +0800 Message-Id: <1603801650-442376-16-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 15/34] net/mlx5: fix redundant Direct Verbs resources allocate X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" All table, tag, header modify, header reformat are supported only on DV mode. For the OFED version doesn't support these, create the related redundant DV resources waste the memory. Add the code section in the HAVE_IBV_FLOW_DV_SUPPORT macro to avoid the redundant resources allocation. Fixes: 2eb4d0107acc ("net/mlx5: refactor PCI probing on Linux") Cc: stable@dpdk.org Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 4db5d33..db32b39 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -225,7 +225,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) { struct mlx5_dev_ctx_shared *sh = priv->sh; - char s[MLX5_HLIST_NAMESIZE]; + char s[MLX5_HLIST_NAMESIZE] __rte_unused; int err; MLX5_ASSERT(sh && sh->refcnt); @@ -233,7 +233,9 @@ return 0; err = mlx5_alloc_table_hash_list(priv); if (err) - return err; + goto error; + /* The resources below are only valid with DV support. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, @@ -260,6 +262,7 @@ err = ENOMEM; goto error; } +#endif #ifdef HAVE_MLX5DV_DR void *domain; From patchwork Tue Oct 27 12:27:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82303 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C6091A04B5; Tue, 27 Oct 2020 13:33:45 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 63E6369FA; Tue, 27 Oct 2020 13:29:13 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 8477E54AE for ; Tue, 27 Oct 2020 13:28:07 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:06 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7E024637; Tue, 27 Oct 2020 14:28:04 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:11 +0800 Message-Id: <1603801650-442376-17-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 16/34] net/mlx5: make flow tag list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch updates flow tag list to use thread safe hash list with write-most mode. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 ++- drivers/net/mlx5/mlx5_flow.h | 5 +++ drivers/net/mlx5/mlx5_flow_dv.c | 97 +++++++++++++++++++--------------------- 3 files changed, 56 insertions(+), 51 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index db32b39..540e1a7 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -239,12 +239,15 @@ /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, - 0, NULL, NULL, NULL); + MLX5_HLIST_WRITE_MOST, + flow_dv_tag_create_cb, NULL, + flow_dv_tag_remove_cb); if (!sh->tag_table) { DRV_LOG(ERR, "tags with hash creation failed."); err = ENOMEM; goto error; } + sh->tag_table->ctx = sh; snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name); sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ, 0, 0, NULL, NULL, NULL); diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 2e1e0c1..007dd31 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -1422,4 +1422,9 @@ struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev, bool external, const struct mlx5_flow_tunnel *tunnel, uint32_t group_id, uint8_t dummy, struct rte_flow_error *error); +struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list, + uint64_t key, void *cb_ctx); +void flow_dv_tag_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 6b1eeb4..578019a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8215,6 +8215,35 @@ struct mlx5_flow_tbl_resource * return 0; } +struct mlx5_hlist_entry * +flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct rte_flow_error *error = ctx; + struct mlx5_flow_dv_tag_resource *entry; + uint32_t idx = 0; + int ret; + + entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx); + if (!entry) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + entry->idx = idx; + ret = mlx5_flow_os_create_flow_action_tag(key, + &entry->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + return &entry->entry; +} + /** * Find existing tag resource or create and register a new one. * @@ -8238,54 +8267,32 @@ struct mlx5_flow_tbl_resource * struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *cache_resource; struct mlx5_hlist_entry *entry; - int ret; - /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24, NULL); + entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error); if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); return 0; } - /* Register new resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], - &dev_flow->handle->dvh.rix_tag); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - cache_resource->entry.key = (uint64_t)tag_be24; - ret = mlx5_flow_os_create_flow_action_tag(tag_be24, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { - mlx5_flow_os_destroy_flow_action(cache_resource->action); - mlx5_free(cache_resource); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot insert tag"); - } - dev_flow->dv.tag_resource = cache_resource; - DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - return 0; + return -rte_errno; +} + +void +flow_dv_tag_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_tag_resource *tag = + container_of(entry, struct mlx5_flow_dv_tag_resource, entry); + + MLX5_ASSERT(tag && sh && tag->action); + claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); + DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx); } /** @@ -8304,24 +8311,14 @@ struct mlx5_flow_tbl_resource * uint32_t tag_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *tag; tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); if (!tag) return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", - dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); - mlx5_hlist_remove(sh->tag_table, &tag->entry); - DRV_LOG(DEBUG, "port %u tag %p: removed", - dev->data->port_id, (void *)tag); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); - return 0; - } - return 1; + dev->data->port_id, (void *)tag, tag->entry.ref_cnt); + return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry); } /** From patchwork Tue Oct 27 12:27:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82305 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5EE34A04B5; Tue, 27 Oct 2020 13:34:39 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 66D6A6CA1; Tue, 27 Oct 2020 13:29:16 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 8A57858CD for ; Tue, 27 Oct 2020 13:28:13 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:08 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7F024637; Tue, 27 Oct 2020 14:28:06 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:12 +0800 Message-Id: <1603801650-442376-18-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 17/34] net/mlx5: make flow modify action list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch updates flow modify action list to use thread safe hash list with write-most mode. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 +- drivers/net/mlx5/mlx5_flow.h | 14 ++- drivers/net/mlx5/mlx5_flow_dv.c | 194 +++++++++++++++++---------------------- 3 files changed, 102 insertions(+), 113 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 540e1a7..eb63bcd 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -250,12 +250,17 @@ sh->tag_table->ctx = sh; snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name); sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ, - 0, 0, NULL, NULL, NULL); + 0, MLX5_HLIST_WRITE_MOST | + MLX5_HLIST_DIRECT_KEY, + flow_dv_modify_create_cb, + flow_dv_modify_match_cb, + flow_dv_modify_remove_cb); if (!sh->modify_cmds) { DRV_LOG(ERR, "hdr modify hash creation failed"); err = ENOMEM; goto error; } + sh->modify_cmds->ctx = sh; snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name); sh->encaps_decaps = mlx5_hlist_create(s, MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 007dd31..d54739f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -468,10 +468,8 @@ struct mlx5_flow_dv_tag_resource { /* Modify resource structure */ struct mlx5_flow_dv_modify_hdr_resource { struct mlx5_hlist_entry entry; - /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ - void *action; - /**< Modify header action object. */ + void *action; /**< Modify header action object. */ + /* Key area for hash list matching: */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ uint32_t actions_num; /**< Number of modification actions. */ uint64_t flags; /**< Flags for RDMA API. */ @@ -1427,4 +1425,12 @@ struct mlx5_hlist_entry *flow_dv_tag_create_cb(struct mlx5_hlist *list, void flow_dv_tag_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry); +int flow_dv_modify_match_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx); +struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list, + uint64_t key, void *ctx); +void flow_dv_modify_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 578019a..3274a3b 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4214,35 +4214,75 @@ struct field_modify_info modify_tcp[] = { /** * Match modify-header resource. * + * @param list + * Pointer to the hash list. * @param entry * Pointer to exist resource entry object. + * @param key + * Key of the new entry. * @param ctx * Pointer to new modify-header resource. * * @return - * 0 on matching, -1 otherwise. + * 0 on matching, non-zero otherwise. */ -static int -flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx) +int +flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) { - struct mlx5_flow_dv_modify_hdr_resource *resource; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - uint32_t actions_len; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + struct mlx5_flow_dv_modify_hdr_resource *resource = + container_of(entry, typeof(*resource), entry); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); - resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx; - cache_resource = container_of(entry, - struct mlx5_flow_dv_modify_hdr_resource, - entry); - actions_len = resource->actions_num * sizeof(resource->actions[0]); - if (resource->entry.key == cache_resource->entry.key && - resource->ft_type == cache_resource->ft_type && - resource->actions_num == cache_resource->actions_num && - resource->flags == cache_resource->flags && - !memcmp((const void *)resource->actions, - (const void *)cache_resource->actions, - actions_len)) - return 0; - return -1; + key_len += ref->actions_num * sizeof(ref->actions[0]); + return ref->actions_num != resource->actions_num || + memcmp(&ref->ft_type, &resource->ft_type, key_len); +} + +struct mlx5_hlist_entry * +flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *ns; + struct mlx5_flow_dv_modify_hdr_resource *entry; + struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data; + int ret; + uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]); + uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type); + + entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0, + SOCKET_ID_ANY); + if (!entry) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + rte_memcpy(&entry->ft_type, + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + ns = sh->fdb_domain; + else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + ns = sh->tx_domain; + else + ns = sh->rx_domain; + ret = mlx5_flow_os_create_flow_action_modify_header + (sh->ctx, ns, entry, + data_len, &entry->action); + if (ret) { + mlx5_free(entry); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create modification action"); + return NULL; + } + return &entry->entry; } /** @@ -4453,19 +4493,14 @@ struct field_modify_info modify_tcp[] = { { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource; - struct mlx5dv_dr_domain *ns; - uint32_t actions_len; + uint32_t key_len = sizeof(*resource) - + offsetof(typeof(*resource), ft_type) + + resource->actions_num * sizeof(resource->actions[0]); struct mlx5_hlist_entry *entry; - union mlx5_flow_modify_hdr_key hdr_mod_key = { - { - .ft_type = resource->ft_type, - .actions_num = resource->actions_num, - .group = dev_flow->dv.group, - .cksum = 0, - } + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, }; - int ret; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -4474,66 +4509,12 @@ struct field_modify_info modify_tcp[] = { return rte_flow_error_set(error, EOVERFLOW, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "too many modify header items"); - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - ns = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) - ns = sh->tx_domain; - else - ns = sh->rx_domain; - /* Lookup a matching resource from cache. */ - actions_len = resource->actions_num * sizeof(resource->actions[0]); - hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0); - resource->entry.key = hdr_mod_key.v64; - entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key, - flow_dv_modify_hdr_resource_match, - (void *)resource); - if (entry) { - cache_resource = container_of(entry, - struct mlx5_flow_dv_modify_hdr_resource, - entry); - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.modify_hdr = cache_resource; - return 0; - - } - /* Register new modify-header resource. */ - cache_resource = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(*cache_resource) + actions_len, 0, - SOCKET_ID_ANY); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - rte_memcpy(cache_resource->actions, resource->actions, actions_len); - ret = mlx5_flow_os_create_flow_action_modify_header - (sh->ctx, ns, cache_resource, - actions_len, &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, - flow_dv_modify_hdr_resource_match, - (void *)cache_resource)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_free(cache_resource); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "action exist"); - } - dev_flow->handle->dvh.modify_hdr = cache_resource; - DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0); + entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->handle->dvh.modify_hdr = resource; return 0; } @@ -10487,6 +10468,17 @@ struct mlx5_hlist_entry * return flow_dv_tbl_resource_release(dev, &tbl_data->tbl); } +void +flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_flow_dv_modify_hdr_resource *res = + container_of(entry, typeof(*res), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_free(entry); +} + /** * Release a modify-header resource. * @@ -10503,24 +10495,10 @@ struct mlx5_hlist_entry * struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - handle->dvh.modify_hdr; + struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_hlist_remove(priv->sh->modify_cmds, - &cache_resource->entry); - mlx5_free(cache_resource); - DRV_LOG(DEBUG, "modify-header resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + MLX5_ASSERT(entry->action); + return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry); } /** From patchwork Tue Oct 27 12:27:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82304 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0AB26A04B5; Tue, 27 Oct 2020 13:34:18 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E5D626A16; Tue, 27 Oct 2020 13:29:14 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 62A6B58C4 for ; Tue, 27 Oct 2020 13:28:12 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:09 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7G024637; Tue, 27 Oct 2020 14:28:08 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:13 +0800 Message-Id: <1603801650-442376-19-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 18/34] net/mlx5: remove unused mreg copy code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" After non-cache mode feature was implemented, the flows can only be created when port started. No need to check if the mreg flows are created in port stopped status, and apply the mreg flows after port start will also never happen. This commit removed the relevant not used mreg copy code. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 2 - drivers/net/mlx5/mlx5_flow.c | 185 ------------------------------------------- drivers/net/mlx5/mlx5_flow.h | 2 - 3 files changed, 189 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index b5a6c95..ffc8b38 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1096,8 +1096,6 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg); -int mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list); -void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list); int mlx5_flow_start_default(struct rte_eth_dev *dev); void mlx5_flow_stop_default(struct rte_eth_dev *dev); int mlx5_flow_verify(struct rte_eth_dev *dev); diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 35e6771..c3dbf3e 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3230,28 +3230,6 @@ struct mlx5_flow_tunnel_info { } /** - * Flow driver remove API. This abstracts calling driver specific functions. - * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow - * on device. All the resources of the flow should be freed by calling - * flow_drv_destroy(). - * - * @param[in] dev - * Pointer to Ethernet device. - * @param[in, out] flow - * Pointer to flow structure. - */ -static inline void -flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - const struct mlx5_flow_driver_ops *fops; - enum mlx5_flow_drv_type type = flow->drv_type; - - MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); - fops = flow_get_drv_ops(type); - fops->remove(dev, flow); -} - -/** * Flow driver destroy API. This abstracts calling driver specific functions. * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow * on device and releases resources of the flow. @@ -3848,19 +3826,6 @@ struct rte_flow_shared_action * flow->rix_mreg_copy); if (!mcp_res || !priv->mreg_cp_tbl) return; - if (flow->copy_applied) { - MLX5_ASSERT(mcp_res->appcnt); - flow->copy_applied = 0; - --mcp_res->appcnt; - if (!mcp_res->appcnt) { - struct rte_flow *mcp_flow = mlx5_ipool_get - (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], - mcp_res->rix_flow); - - if (mcp_flow) - flow_drv_remove(dev, mcp_flow); - } - } /* * We do not check availability of metadata registers here, * because copy resources are not allocated in this case. @@ -3875,81 +3840,6 @@ struct rte_flow_shared_action * } /** - * Start flow in RX_CP_TBL. - * - * @param dev - * Pointer to Ethernet device. - * @flow - * Parent flow for wich copying is provided. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -flow_mreg_start_copy_action(struct rte_eth_dev *dev, - struct rte_flow *flow) -{ - struct mlx5_flow_mreg_copy_resource *mcp_res; - struct mlx5_priv *priv = dev->data->dev_private; - int ret; - - if (!flow->rix_mreg_copy || flow->copy_applied) - return 0; - mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], - flow->rix_mreg_copy); - if (!mcp_res) - return 0; - if (!mcp_res->appcnt) { - struct rte_flow *mcp_flow = mlx5_ipool_get - (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], - mcp_res->rix_flow); - - if (mcp_flow) { - ret = flow_drv_apply(dev, mcp_flow, NULL); - if (ret) - return ret; - } - } - ++mcp_res->appcnt; - flow->copy_applied = 1; - return 0; -} - -/** - * Stop flow in RX_CP_TBL. - * - * @param dev - * Pointer to Ethernet device. - * @flow - * Parent flow for wich copying is provided. - */ -static void -flow_mreg_stop_copy_action(struct rte_eth_dev *dev, - struct rte_flow *flow) -{ - struct mlx5_flow_mreg_copy_resource *mcp_res; - struct mlx5_priv *priv = dev->data->dev_private; - - if (!flow->rix_mreg_copy || !flow->copy_applied) - return; - mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], - flow->rix_mreg_copy); - if (!mcp_res) - return; - MLX5_ASSERT(mcp_res->appcnt); - --mcp_res->appcnt; - flow->copy_applied = 0; - if (!mcp_res->appcnt) { - struct rte_flow *mcp_flow = mlx5_ipool_get - (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], - mcp_res->rix_flow); - - if (mcp_flow) - flow_drv_remove(dev, mcp_flow); - } -} - -/** * Remove the default copy action from RX_CP_TBL. * * @param dev @@ -4064,10 +3954,6 @@ struct rte_flow_shared_action * if (!mcp_res) return -rte_errno; flow->rix_mreg_copy = mcp_res->idx; - if (dev->data->dev_started) { - mcp_res->appcnt++; - flow->copy_applied = 1; - } return 0; case RTE_FLOW_ACTION_TYPE_MARK: mark = (const struct rte_flow_action_mark *) @@ -4077,10 +3963,6 @@ struct rte_flow_shared_action * if (!mcp_res) return -rte_errno; flow->rix_mreg_copy = mcp_res->idx; - if (dev->data->dev_started) { - mcp_res->appcnt++; - flow->copy_applied = 1; - } return 0; default: break; @@ -6004,73 +5886,6 @@ struct rte_flow * } /** - * Remove all flows. - * - * @param dev - * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. - */ -void -mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = NULL; - uint32_t idx; - - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, - flow, next) { - flow_drv_remove(dev, flow); - flow_mreg_stop_copy_action(dev, flow); - } - flow_mreg_del_default_copy_action(dev); - flow_rxq_flags_clear(dev); -} - -/** - * Add all flows. - * - * @param dev - * Pointer to Ethernet device. - * @param list - * Pointer to the Indexed flow list. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -int -mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = NULL; - struct rte_flow_error error; - uint32_t idx; - int ret = 0; - - /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ - ret = flow_mreg_add_default_copy_action(dev, &error); - if (ret < 0) - return -rte_errno; - /* Apply Flows created by application. */ - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, - flow, next) { - ret = flow_mreg_start_copy_action(dev, flow); - if (ret < 0) - goto error; - ret = flow_drv_apply(dev, flow, &error); - if (ret < 0) - goto error; - flow_rxq_flags_set(dev, flow); - } - return 0; -error: - ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_flow_stop(dev, list); - rte_errno = ret; /* Restore rte_errno. */ - return -rte_errno; -} - -/** * Stop all default actions for flows. * * @param dev diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index d54739f..11bdb72 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -524,7 +524,6 @@ struct mlx5_flow_mreg_copy_resource { LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; /* List entry for device flows. */ uint32_t refcnt; /* Reference counter. */ - uint32_t appcnt; /* Apply/Remove counter. */ uint32_t idx; uint32_t rix_flow; /* Built flow for copy. */ }; @@ -1049,7 +1048,6 @@ struct rte_flow { uint32_t drv_type:2; /**< Driver type. */ uint32_t fdir:1; /**< Identifier of associated FDIR if any. */ uint32_t tunnel:1; - uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ uint32_t meter:16; /**< Holds flow meter id. */ uint32_t rix_mreg_copy; /**< Index to metadata register copy table resource. */ From patchwork Tue Oct 27 12:27:14 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82307 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7B50FA04B5; Tue, 27 Oct 2020 13:35:22 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6FB9872E3; Tue, 27 Oct 2020 13:29:19 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 674912BFF for ; Tue, 27 Oct 2020 13:28:17 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:12 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7H024637; Tue, 27 Oct 2020 14:28:10 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:14 +0800 Message-Id: <1603801650-442376-20-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 19/34] net/mlx5: make metadata copy flow list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch updates metadata copy flow list to use thread safe hash list. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 +- drivers/net/mlx5/mlx5_flow.c | 162 ++++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_flow.h | 6 +- 3 files changed, 99 insertions(+), 74 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index eb63bcd..46dbc18 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1488,11 +1488,14 @@ priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, MLX5_FLOW_MREG_HTABLE_SZ, 0, 0, - NULL, NULL, NULL); + flow_dv_mreg_create_cb, + NULL, + flow_dv_mreg_remove_cb); if (!priv->mreg_cp_tbl) { err = ENOMEM; goto error; } + priv->mreg_cp_tbl->ctx = eth_dev; } mlx5_flow_counter_mode_config(eth_dev); return eth_dev; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index c3dbf3e..f7f4faa 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -3648,36 +3648,18 @@ struct rte_flow_shared_action * flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, uint32_t flow_idx); -/** - * Add a flow of copying flow metadata registers in RX_CP_TBL. - * - * As mark_id is unique, if there's already a registered flow for the mark_id, - * return by increasing the reference counter of the resource. Otherwise, create - * the resource (mcp_res) and flow. - * - * Flow looks like, - * - If ingress port is ANY and reg_c[1] is mark_id, - * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. - * - * For default flow (zero mark_id), flow is like, - * - If ingress port is ANY, - * reg_b := reg_c[0] and jump to RX_ACT_TBL. - * - * @param dev - * Pointer to Ethernet device. - * @param mark_id - * ID of MARK action, zero means default flow for META. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * Associated resource on success, NULL otherwise and rte_errno is set. - */ -static struct mlx5_flow_mreg_copy_resource * -flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, - struct rte_flow_error *error) +struct mlx5_hlist_entry * +flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key, + void *cb_ctx) { + struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct rte_flow_error *error = ctx->error; + uint32_t idx = 0; + int ret; + uint32_t mark_id = key; struct rte_flow_attr attr = { .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, .ingress = 1, @@ -3701,9 +3683,6 @@ struct rte_flow_shared_action * struct rte_flow_action actions[] = { [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct mlx5_flow_mreg_copy_resource *mcp_res; - uint32_t idx = 0; - int ret; /* Fill the register fileds in the flow. */ ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); @@ -3714,17 +3693,6 @@ struct rte_flow_shared_action * if (ret < 0) return NULL; cp_mreg.src = ret; - /* Check if already registered. */ - MLX5_ASSERT(priv->mreg_cp_tbl); - mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, NULL); - if (mcp_res) { - /* For non-default rule. */ - if (mark_id != MLX5_DEFAULT_COPY_ID) - mcp_res->refcnt++; - MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || - mcp_res->refcnt == 1); - return mcp_res; - } /* Provide the full width of FLAG specific value. */ if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) tag_spec.data = MLX5_FLOW_MARK_DEFAULT; @@ -3789,20 +3757,69 @@ struct rte_flow_shared_action * */ mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, actions, false, error); - if (!mcp_res->rix_flow) - goto error; - mcp_res->refcnt++; - mcp_res->hlist_ent.key = mark_id; - ret = !mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - MLX5_ASSERT(!ret); - if (ret) - goto error; - return mcp_res; -error: - if (mcp_res->rix_flow) - flow_list_destroy(dev, NULL, mcp_res->rix_flow); + if (!mcp_res->rix_flow) { + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx); + return NULL; + } + return &mcp_res->hlist_ent; +} + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * As mark_id is unique, if there's already a registered flow for the mark_id, + * return by increasing the reference counter of the resource. Otherwise, create + * the resource (mcp_res) and flow. + * + * Flow looks like, + * - If ingress port is ANY and reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For default flow (zero mark_id), flow is like, + * - If ingress port is ANY, + * reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param mark_id + * ID of MARK action, zero means default flow for META. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Associated resource on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_mreg_copy_resource * +flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hlist_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + }; + + /* Check if already registered. */ + MLX5_ASSERT(priv->mreg_cp_tbl); + entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx); + if (!entry) + return NULL; + return container_of(entry, struct mlx5_flow_mreg_copy_resource, + hlist_ent); +} + +void +flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = + container_of(entry, typeof(*mcp_res), hlist_ent); + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); - return NULL; } /** @@ -3826,47 +3843,42 @@ struct rte_flow_shared_action * flow->rix_mreg_copy); if (!mcp_res || !priv->mreg_cp_tbl) return; - /* - * We do not check availability of metadata registers here, - * because copy resources are not allocated in this case. - */ - if (--mcp_res->refcnt) - return; MLX5_ASSERT(mcp_res->rix_flow); - flow_list_destroy(dev, NULL, mcp_res->rix_flow); - mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent); flow->rix_mreg_copy = 0; } /** * Remove the default copy action from RX_CP_TBL. * + * This functions is called in the mlx5_dev_start(). No thread safe + * is guaranteed. + * * @param dev * Pointer to Ethernet device. */ static void flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) { - struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_hlist_entry *entry; struct mlx5_priv *priv = dev->data->dev_private; /* Check if default flow is registered. */ if (!priv->mreg_cp_tbl) return; - mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, - MLX5_DEFAULT_COPY_ID, NULL); - if (!mcp_res) + entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, + MLX5_DEFAULT_COPY_ID, NULL); + if (!entry) return; - MLX5_ASSERT(mcp_res->rix_flow); - flow_list_destroy(dev, NULL, mcp_res->rix_flow); - mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); } /** * Add the default copy action in in RX_CP_TBL. * + * This functions is called in the mlx5_dev_start(). No thread safe + * is guaranteed. + * * @param dev * Pointer to Ethernet device. * @param[out] error @@ -3888,6 +3900,12 @@ struct rte_flow_shared_action * !mlx5_flow_ext_mreg_supported(dev) || !priv->sh->dv_regc0_mask) return 0; + /* + * Add default mreg copy flow may be called multiple time, but + * only be called once in stop. Avoid register it twice. + */ + if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL)) + return 0; mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); if (!mcp_res) return -rte_errno; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 11bdb72..b44789a 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -523,7 +523,6 @@ struct mlx5_flow_mreg_copy_resource { struct mlx5_hlist_entry hlist_ent; LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; /* List entry for device flows. */ - uint32_t refcnt; /* Reference counter. */ uint32_t idx; uint32_t rix_flow; /* Built flow for copy. */ }; @@ -1431,4 +1430,9 @@ struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list, void flow_dv_modify_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry); +struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list, + uint64_t key, void *ctx); +void flow_dv_mreg_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ From patchwork Tue Oct 27 12:27:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82308 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13AE9A04B5; Tue, 27 Oct 2020 13:35:49 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B2C1672EC; Tue, 27 Oct 2020 13:29:20 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6FACC4C6B for ; Tue, 27 Oct 2020 13:28:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:13 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7I024637; Tue, 27 Oct 2020 14:28:12 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:15 +0800 Message-Id: <1603801650-442376-21-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 20/34] net/mlx5: make header reformat action thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" To support multi-thread flow insertion, this patch updates flow header reformat action list to use thread safe hash list with write-most mode. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 +- drivers/net/mlx5/mlx5_flow.h | 7 ++ drivers/net/mlx5/mlx5_flow_dv.c | 184 ++++++++++++++++++++++----------------- 3 files changed, 116 insertions(+), 82 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 46dbc18..d017c23 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -264,12 +264,17 @@ snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name); sh->encaps_decaps = mlx5_hlist_create(s, MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ, - 0, 0, NULL, NULL, NULL); + 0, MLX5_HLIST_DIRECT_KEY | + MLX5_HLIST_WRITE_MOST, + flow_dv_encap_decap_create_cb, + flow_dv_encap_decap_match_cb, + flow_dv_encap_decap_remove_cb); if (!sh->encaps_decaps) { DRV_LOG(ERR, "encap decap hash creation failed"); err = ENOMEM; goto error; } + sh->encaps_decaps->ctx = sh; #endif #ifdef HAVE_MLX5DV_DR void *domain; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index b44789a..4bc540b 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -1435,4 +1435,11 @@ struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list, void flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry); +int flow_dv_encap_decap_match_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry, + uint64_t key, void *cb_ctx); +struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, + uint64_t key, void *cb_ctx); +void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry); #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 3274a3b..1bcbe38 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2783,21 +2783,27 @@ struct field_modify_info modify_tcp[] = { /** * Match encap_decap resource. * + * @param list + * Pointer to the hash list. * @param entry * Pointer to exist resource entry object. - * @param ctx + * @param key + * Key of the new entry. + * @param ctx_cb * Pointer to new encap_decap resource. * * @return - * 0 on matching, -1 otherwise. + * 0 on matching, none-zero otherwise. */ -static int -flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx) +int +flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused, + struct mlx5_hlist_entry *entry, + uint64_t key __rte_unused, void *cb_ctx) { - struct mlx5_flow_dv_encap_decap_resource *resource; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; struct mlx5_flow_dv_encap_decap_resource *cache_resource; - resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx; cache_resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource, entry); @@ -2814,6 +2820,63 @@ struct field_modify_info modify_tcp[] = { } /** + * Allocate encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + * @param ctx_cb + * Pointer to new encap_decap resource. + * + * @return + * 0 on matching, none-zero otherwise. + */ +struct mlx5_hlist_entry * +flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, + uint64_t key __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5dv_dr_domain *domain; + struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + uint32_t idx; + int ret; + + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + /* Register new encap/decap resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + &idx); + if (!cache_resource) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + return NULL; + } + *cache_resource = *resource; + cache_resource->idx = idx; + ret = mlx5_flow_os_create_flow_action_packet_reformat + (sh->ctx, domain, cache_resource, + &cache_resource->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + return NULL; + } + + return &cache_resource->entry; +} + +/** * Find existing encap/decap resource or create and register a new one. * * @param[in, out] dev @@ -2837,8 +2900,6 @@ struct field_modify_info modify_tcp[] = { { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_encap_decap_resource *cache_resource; - struct mlx5dv_dr_domain *domain; struct mlx5_hlist_entry *entry; union mlx5_flow_encap_decap_key encap_decap_key = { { @@ -2849,68 +2910,22 @@ struct field_modify_info modify_tcp[] = { .cksum = 0, } }; - int ret; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; resource->flags = dev_flow->dv.group ? 0 : 1; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; encap_decap_key.cksum = __rte_raw_cksum(resource->buf, resource->size, 0); resource->entry.key = encap_decap_key.v64; - /* Lookup a matching resource from cache. */ - entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key, - flow_dv_encap_decap_resource_match, - (void *)resource); - if (entry) { - cache_resource = container_of(entry, - struct mlx5_flow_dv_encap_decap_resource, entry); - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; - dev_flow->dv.encap_decap = cache_resource; - return 0; - } - /* Register new encap/decap resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - &dev_flow->handle->dvh.rix_encap_decap); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap; - ret = mlx5_flow_os_create_flow_action_packet_reformat - (sh->ctx, domain, cache_resource, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, - flow_dv_encap_decap_resource_match, - (void *)cache_resource)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - cache_resource->idx); - return rte_flow_error_set(error, EEXIST, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "action exist"); - } - dev_flow->dv.encap_decap = cache_resource; - DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key, + &ctx); + if (!entry) + return -rte_errno; + resource = container_of(entry, typeof(*resource), entry); + dev_flow->dv.encap_decap = resource; + dev_flow->handle->dvh.rix_encap_decap = resource->idx; return 0; } @@ -10404,6 +10419,26 @@ struct mlx5_hlist_entry * } /** + * Release encap_decap resource. + * + * @param list + * Pointer to the hash list. + * @param entry + * Pointer to exist resource entry object. + */ +void +flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_encap_decap_resource *res = + container_of(entry, typeof(*res), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(res->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx); +} + +/** * Release an encap/decap resource. * * @param dev @@ -10419,28 +10454,15 @@ struct mlx5_hlist_entry * uint32_t encap_decap_idx) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = encap_decap_idx; struct mlx5_flow_dv_encap_decap_resource *cache_resource; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], - idx); + encap_decap_idx); if (!cache_resource) return 0; MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - mlx5_hlist_remove(priv->sh->encaps_decaps, - &cache_resource->entry); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); - DRV_LOG(DEBUG, "encap/decap resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + return mlx5_hlist_unregister(priv->sh->encaps_decaps, + &cache_resource->entry); } /** From patchwork Tue Oct 27 12:27:16 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82306 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6B99EA04B5; Tue, 27 Oct 2020 13:35:05 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D565C72D9; Tue, 27 Oct 2020 13:29:17 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6F80C3253 for ; Tue, 27 Oct 2020 13:28:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:15 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7J024637; Tue, 27 Oct 2020 14:28:14 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:16 +0800 Message-Id: <1603801650-442376-22-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 21/34] net/mlx5: introduce thread safe linked list cache X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li New API of linked list for cache: - Optimized for small amount cache list. - Optimized for read-most list. - Thread safe. - Since number of entries are limited, entries allocated by API. - For dynamic entry size, pass 0 as entry size, then the creation callback allocate the entry. - Since number of entries are limited, no need to use indexed pool to allocate memory. API will remove entry and free with mlx5_free. - Search API is not supposed to be used in multi-thread. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_utils.c | 160 ++++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_utils.h | 183 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 343 insertions(+) diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index d041b07..df15f63 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -255,6 +255,166 @@ struct mlx5_hlist_entry * mlx5_free(h); } +/********************* Cache list ************************/ + +static struct mlx5_cache_entry * +mlx5_clist_default_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *ctx __rte_unused) +{ + return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); +} + +static void +mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) +{ + mlx5_free(entry); +} + +int +mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, + uint32_t entry_size, void *ctx, + mlx5_cache_create_cb cb_create, + mlx5_cache_match_cb cb_match, + mlx5_cache_remove_cb cb_remove) +{ + MLX5_ASSERT(list); + if (!cb_match || (!cb_create ^ !cb_remove)) + return -1; + if (name) + snprintf(list->name, sizeof(list->name), "%s", name); + list->entry_sz = entry_size; + list->ctx = ctx; + list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; + list->cb_match = cb_match; + list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; + rte_rwlock_init(&list->lock); + DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); + LIST_INIT(&list->head); + return 0; +} + +static struct mlx5_cache_entry * +__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) +{ + struct mlx5_cache_entry *entry; + + LIST_FOREACH(entry, &list->head, next) { + if (list->cb_match(list, entry, ctx)) + continue; + if (reuse) { + __atomic_add_fetch(&entry->ref_cnt, 1, + __ATOMIC_RELAXED); + DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", + list->name, (void *)entry, entry->ref_cnt); + } + break; + } + return entry; +} + +static struct mlx5_cache_entry * +cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) +{ + struct mlx5_cache_entry *entry; + + rte_rwlock_read_lock(&list->lock); + entry = __cache_lookup(list, ctx, reuse); + rte_rwlock_read_unlock(&list->lock); + return entry; +} + +struct mlx5_cache_entry * +mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) +{ + return cache_lookup(list, ctx, false); +} + +struct mlx5_cache_entry * +mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) +{ + struct mlx5_cache_entry *entry; + uint32_t prev_gen_cnt = 0; + + MLX5_ASSERT(list); + prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); + /* Lookup with read lock, reuse if found. */ + entry = cache_lookup(list, ctx, true); + if (entry) + return entry; + /* Not found, append with write lock - block read from other threads. */ + rte_rwlock_write_lock(&list->lock); + /* If list changed by other threads before lock, search again. */ + if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { + /* Lookup and reuse w/o read lock. */ + entry = __cache_lookup(list, ctx, true); + if (entry) + goto done; + } + entry = list->cb_create(list, entry, ctx); + if (!entry) { + DRV_LOG(ERR, "Failed to init cache list %s entry %p.", + list->name, (void *)entry); + goto done; + } + entry->ref_cnt = 1; + LIST_INSERT_HEAD(&list->head, entry, next); + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); + __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); + DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", + list->name, (void *)entry, entry->ref_cnt); +done: + rte_rwlock_write_unlock(&list->lock); + return entry; +} + +int +mlx5_cache_unregister(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + rte_rwlock_write_lock(&list->lock); + MLX5_ASSERT(entry && entry->next.le_prev); + DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", + list->name, (void *)entry, entry->ref_cnt); + if (--entry->ref_cnt) { + rte_rwlock_write_unlock(&list->lock); + return 1; + } + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); + __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); + LIST_REMOVE(entry, next); + list->cb_remove(list, entry); + rte_rwlock_write_unlock(&list->lock); + DRV_LOG(DEBUG, "Cache list %s entry %p removed.", + list->name, (void *)entry); + return 0; +} + +void +mlx5_cache_list_destroy(struct mlx5_cache_list *list) +{ + struct mlx5_cache_entry *entry; + + MLX5_ASSERT(list); + /* no LIST_FOREACH_SAFE, using while instead */ + while (!LIST_EMPTY(&list->head)) { + entry = LIST_FIRST(&list->head); + LIST_REMOVE(entry, next); + list->cb_remove(list, entry); + DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", + list->name, (void *)entry); + } + memset(list, 0, sizeof(*list)); +} + +uint32_t +mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) +{ + MLX5_ASSERT(list); + return __atomic_load_n(&list->count, __ATOMIC_RELAXED); +} + /********************* Indexed pool **********************/ static inline void diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index c665558..b00789c 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -512,6 +512,189 @@ struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, */ void mlx5_hlist_destroy(struct mlx5_hlist *h); +/************************ cache list *****************************/ + +/** Maximum size of string for naming. */ +#define MLX5_NAME_SIZE 32 + +struct mlx5_cache_list; + +/** + * Structure of the entry in the cache list, user should define its own struct + * that contains this in order to store the data. + */ +struct mlx5_cache_entry { + LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */ + uint32_t ref_cnt; /* Reference count. */ +}; + +/** + * Type of callback function for entry removal. + * + * @param list + * The cache list. + * @param entry + * The entry in the list. + */ +typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + +/** + * Type of function for user defined matching. + * + * @param list + * The cache list. + * @param entry + * The entry in the list. + * @param ctx + * The pointer to new entry context. + * + * @return + * 0 if matching, non-zero number otherwise. + */ +typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *ctx); + +/** + * Type of function for user defined cache list entry creation. + * + * @param list + * The cache list. + * @param entry + * The new allocated entry, NULL if list entry size unspecified, + * New entry has to be allocated in callback and return. + * @param ctx + * The pointer to new entry context. + * + * @return + * Pointer of entry on success, NULL otherwise. + */ +typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb) + (struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *ctx); + +/** + * Linked cache list structure. + * + * Entry in cache list could be reused if entry already exists, + * reference count will increase and the existing entry returns. + * + * When destroy an entry from list, decrease reference count and only + * destroy when no further reference. + * + * Linked list cache is designed for limited number of entries cache, + * read mostly, less modification. + * + * For huge amount of entries cache, please consider hash list cache. + * + */ +struct mlx5_cache_list { + char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */ + uint32_t entry_sz; /**< Entry size, 0: use create callback. */ + rte_rwlock_t lock; /* read/write lock. */ + uint32_t gen_cnt; /* List modification will update generation count. */ + uint32_t count; /* number of entries in list. */ + void *ctx; /* user objects target to callback. */ + mlx5_cache_create_cb cb_create; /**< entry create callback. */ + mlx5_cache_match_cb cb_match; /**< entry match callback. */ + mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */ + LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head; +}; + +/** + * Initialize a cache list. + * + * @param list + * Pointer to the hast list table. + * @param name + * Name of the cache list. + * @param entry_size + * Entry size to allocate, 0 to allocate by creation callback. + * @param ctx + * Pointer to the list context data. + * @param cb_create + * Callback function for entry create. + * @param cb_match + * Callback function for entry match. + * @param cb_remove + * Callback function for entry remove. + * @return + * 0 on success, otherwise failure. + */ +int mlx5_cache_list_init(struct mlx5_cache_list *list, + const char *name, uint32_t entry_size, void *ctx, + mlx5_cache_create_cb cb_create, + mlx5_cache_match_cb cb_match, + mlx5_cache_remove_cb cb_remove); + +/** + * Search an entry matching the key. + * + * Result returned might be destroyed by other thread, must use + * this function only in main thread. + * + * @param list + * Pointer to the cache list. + * @param ctx + * Common context parameter used by entry callback function. + * + * @return + * Pointer of the cache entry if found, NULL otherwise. + */ +struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list, + void *ctx); + +/** + * Reuse or create an entry to the cache list. + * + * @param list + * Pointer to the hast list table. + * @param ctx + * Common context parameter used by callback function. + * + * @return + * registered entry on success, NULL otherwise + */ +struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list, + void *ctx); + +/** + * Remove an entry from the cache list. + * + * User should guarantee the validity of the entry. + * + * @param list + * Pointer to the hast list. + * @param entry + * Entry to be removed from the cache list table. + * @return + * 0 on entry removed, 1 on entry still referenced. + */ +int mlx5_cache_unregister(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + +/** + * Destroy the cache list. + * + * @param list + * Pointer to the cache list. + */ +void mlx5_cache_list_destroy(struct mlx5_cache_list *list); + +/** + * Get entry number from the cache list. + * + * @param list + * Pointer to the hast list. + * @return + * Cache list entry number. + */ +uint32_t +mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list); + +/********************************* indexed pool *************************/ + /** * This function allocates non-initialized memory entry from pool. * In NUMA systems, the memory entry allocated resides on the same From patchwork Tue Oct 27 12:27:17 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82309 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 769EAA04B5; Tue, 27 Oct 2020 13:36:11 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1DB809AEB; Tue, 27 Oct 2020 13:29:22 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 7A11B2BFF for ; Tue, 27 Oct 2020 13:28:22 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:17 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7K024637; Tue, 27 Oct 2020 14:28:16 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:17 +0800 Message-Id: <1603801650-442376-23-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 22/34] net/mlx5: optimize shared RSS list operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When create shared RSS hrxq, the hrxq will be created directly, no hrxq will be reused. In this case, add the shared RSS hrxq to the queue list is redundant. And it also hurts the generic queue lookup. This commit avoids add the shared RSS hrxq to the queue list. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_rxq.c | 57 +++++++++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_rxtx.h | 5 ++-- 3 files changed, 39 insertions(+), 25 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index ffc8b38..417e111 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -729,7 +729,7 @@ struct mlx5_ind_table_obj { struct mlx5_hrxq { ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - uint32_t shared:1; /* This object used in shared action. */ + uint32_t standalone:1; /* This object used in shared action. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 union { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ddd5df7..9c9f8c4 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1930,13 +1930,16 @@ struct mlx5_ind_table_obj * * Pointer to Ethernet device. * @param ind_table * Indirection table to release. + * @param standalone + * Indirection table for Standalone queue. * * @return * 1 while a reference on it exists, 0 when freed. */ int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl) + struct mlx5_ind_table_obj *ind_tbl, + bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; @@ -1946,7 +1949,8 @@ struct mlx5_ind_table_obj * for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { - LIST_REMOVE(ind_tbl, next); + if (!standalone) + LIST_REMOVE(ind_tbl, next); mlx5_free(ind_tbl); return 0; } @@ -1987,13 +1991,15 @@ struct mlx5_ind_table_obj * * Queues entering in the indirection table. * @param queues_n * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. * * @return * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ static struct mlx5_ind_table_obj * mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n) + uint32_t queues_n, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; @@ -2020,7 +2026,8 @@ struct mlx5_ind_table_obj * if (ret < 0) goto error; rte_atomic32_inc(&ind_tbl->refcnt); - LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + if (!standalone) + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); return ind_tbl; error: ret = rte_errno; @@ -2063,8 +2070,6 @@ struct mlx5_ind_table_obj * hrxq, next) { struct mlx5_ind_table_obj *ind_tbl; - if (hrxq->shared) - continue; if (hrxq->rss_key_len != rss_key_len) continue; if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) @@ -2075,7 +2080,8 @@ struct mlx5_ind_table_obj * if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, + hrxq->standalone); continue; } rte_atomic32_inc(&hrxq->refcnt); @@ -2136,7 +2142,8 @@ struct mlx5_ind_table_obj * } else { ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + hrxq->standalone); } if (!ind_tbl) { rte_errno = ENOMEM; @@ -2150,7 +2157,8 @@ struct mlx5_ind_table_obj * goto error; } if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, hrxq->ind_table); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); hrxq->ind_table = ind_tbl; } hrxq->hash_fields = hash_fields; @@ -2159,7 +2167,7 @@ struct mlx5_ind_table_obj * error: err = rte_errno; if (ind_tbl != hrxq->ind_table) - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); rte_errno = err; return -rte_errno; } @@ -2189,13 +2197,16 @@ struct mlx5_ind_table_obj * mlx5_glue->destroy_flow_action(hrxq->action); #endif priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, - hrxq_idx, hrxq, next); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + if (!hrxq->standalone) + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], + &priv->hrxqs, hrxq_idx, hrxq, next); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return 0; } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone)); return 1; } @@ -2217,8 +2228,8 @@ struct mlx5_ind_table_obj * * Number of queues. * @param tunnel * Tunnel type. - * @param shared - * If true new object of Rx Hash queue will be used in shared action. + * @param standalone + * Object of Rx Hash queue will be used in standalone shared action or not. * * @return * The DevX object initialized index, 0 otherwise and rte_errno is set. @@ -2228,7 +2239,7 @@ struct mlx5_ind_table_obj * const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel, bool shared) + int tunnel, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = NULL; @@ -2239,7 +2250,8 @@ struct mlx5_ind_table_obj * queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + standalone); if (!ind_tbl) { rte_errno = ENOMEM; return 0; @@ -2247,7 +2259,7 @@ struct mlx5_ind_table_obj * hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; - hrxq->shared = !!shared; + hrxq->standalone = !!standalone; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; @@ -2258,12 +2270,13 @@ struct mlx5_ind_table_obj * goto error; } rte_atomic32_inc(&hrxq->refcnt); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, - hrxq, next); + if (!hrxq->standalone) + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, + hrxq_idx, hrxq, next); return hrxq_idx; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_obj_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl, standalone); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); rte_errno = ret; /* Restore rte_errno. */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 1b5fba4..8fe0473 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -347,12 +347,13 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n); int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl); + struct mlx5_ind_table_obj *ind_tbl, + bool standalone); uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - int tunnel, bool shared); + int tunnel, bool standalone); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, From patchwork Tue Oct 27 12:27:18 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82310 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B08CCA04B5; Tue, 27 Oct 2020 13:36:34 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C0A39A932; Tue, 27 Oct 2020 13:29:23 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 91CA53253 for ; Tue, 27 Oct 2020 13:28:22 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:19 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7L024637; Tue, 27 Oct 2020 14:28:18 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:18 +0800 Message-Id: <1603801650-442376-24-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 23/34] net/mlx5: make Rx queue thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit applies the cache linked list to Rx queue to make it thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 + drivers/net/mlx5/mlx5.c | 1 + drivers/net/mlx5/mlx5.h | 28 +++- drivers/net/mlx5/mlx5_flow.h | 16 --- drivers/net/mlx5/mlx5_flow_dv.c | 74 ++++------ drivers/net/mlx5/mlx5_flow_verbs.c | 21 +-- drivers/net/mlx5/mlx5_rxq.c | 281 ++++++++++++++++++++----------------- drivers/net/mlx5/mlx5_rxtx.h | 20 +-- 8 files changed, 228 insertions(+), 218 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index d017c23..10fc7c5 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1468,6 +1468,10 @@ err = ENOTSUP; goto error; } + mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev, + mlx5_hrxq_create_cb, + mlx5_hrxq_match_cb, + mlx5_hrxq_remove_cb); /* Query availability of metadata reg_c's. */ err = mlx5_flow_discover_mreg_c(eth_dev); if (err < 0) { @@ -1520,6 +1524,7 @@ mlx5_drop_action_destroy(eth_dev); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + mlx5_cache_list_destroy(&priv->hrxqs); mlx5_free(priv); if (eth_dev != NULL) eth_dev->data->dev_private = NULL; diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 1d25a8e..862bd40 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1286,6 +1286,7 @@ struct mlx5_dev_ctx_shared * if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); + mlx5_cache_list_destroy(&priv->hrxqs); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 417e111..99dfcd7 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -65,6 +65,13 @@ enum mlx5_reclaim_mem_mode { MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */ }; +/* Hash and cache list callback context. */ +struct mlx5_flow_cb_ctx { + struct rte_eth_dev *dev; + struct rte_flow_error *error; + void *data; +}; + /* Device attributes used in mlx5 PMD */ struct mlx5_dev_attr { uint64_t device_cap_flags_ex; @@ -688,6 +695,22 @@ struct mlx5_proc_priv { /* MTR list. */ TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter); +/* RSS description. */ +struct mlx5_flow_rss_desc { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint64_t hash_fields; /* Verbs Hash fields. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint32_t key_len; /**< RSS hash key len. */ + uint32_t tunnel; /**< Queue in tunnel. */ + union { + uint16_t *queue; /**< Destination queues. */ + const uint16_t *const_q; /**< Const pointer convert. */ + }; + bool standalone; /**< Queue is standalone or not. */ +}; + #define MLX5_PROC_PRIV(port_id) \ ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) @@ -727,7 +750,7 @@ struct mlx5_ind_table_obj { /* Hash Rx queue. */ __extension__ struct mlx5_hrxq { - ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ + struct mlx5_cache_entry entry; /* Cache entry. */ rte_atomic32_t refcnt; /* Reference counter. */ uint32_t standalone:1; /* This object used in shared action. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ @@ -741,6 +764,7 @@ struct mlx5_hrxq { #endif uint64_t hash_fields; /* Verbs Hash fields. */ uint32_t rss_key_len; /* Hash key length in bytes. */ + uint32_t idx; /* Hash Rx queue index. */ uint8_t rss_key[]; /* Hash key. */ }; @@ -858,7 +882,7 @@ struct mlx5_priv { struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ - uint32_t hrxqs; /* Verbs Hash Rx queues. */ + struct mlx5_cache_list hrxqs; /* Hash Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */ /* Indirection tables. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 4bc540b..6b706e7 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -385,13 +385,6 @@ enum mlx5_flow_fate_type { MLX5_FLOW_FATE_MAX, }; -/* Hash list callback context */ -struct mlx5_flow_cb_ctx { - struct rte_eth_dev *dev; - struct rte_flow_error *error; - void *data; -}; - /* Matcher PRM representation */ struct mlx5_flow_dv_match_params { size_t size; @@ -610,15 +603,6 @@ struct ibv_spec_header { uint16_t size; }; -/* RSS description. */ -struct mlx5_flow_rss_desc { - uint32_t level; - uint32_t queue_num; /**< Number of entries in @p queue. */ - uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t *queue; /**< Destination queues. */ -}; - /* PMD flow priority for tunnel */ #define MLX5_TUNNEL_PRIO_GET(rss_desc) \ ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 1bcbe38..47dea4a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8521,7 +8521,7 @@ struct mlx5_hlist_entry * } /** - * Create an Rx Hash queue. + * Prepare an Rx Hash queue. * * @param dev * Pointer to Ethernet device. @@ -8536,29 +8536,23 @@ struct mlx5_hlist_entry * * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ static struct mlx5_hrxq * -flow_dv_handle_rx_queue(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc, - uint32_t *hrxq_idx) +flow_dv_hrxq_prepare(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t *hrxq_idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); - *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num); - if (!*hrxq_idx) { - *hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num, - !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL), - false); - if (!*hrxq_idx) - return NULL; - } + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); + rss_desc->standalone = false; + *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); + if (!*hrxq_idx) + return NULL; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], *hrxq_idx); return hrxq; @@ -8921,8 +8915,8 @@ struct mlx5_hlist_entry * queue = sub_actions->conf; rss_desc->queue_num = 1; rss_desc->queue[0] = queue->index; - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -9119,8 +9113,8 @@ struct mlx5_hlist_entry * if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -10254,24 +10248,8 @@ struct mlx5_hlist_entry * struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; - MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new(dev, - rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dev_flow->handle->layers & - MLX5_FLOW_LAYER_TUNNEL), - false); - } - *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], - hrxq_idx); + *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc, + &hrxq_idx); } return hrxq_idx; } @@ -10325,7 +10303,6 @@ struct mlx5_hlist_entry * struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = __flow_dv_rss_get_hrxq (dev, flow, dev_flow, &hrxq); - if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -10945,21 +10922,24 @@ struct mlx5_hlist_entry * struct mlx5_shared_action_rss *action, struct rte_flow_error *error) { + struct mlx5_flow_rss_desc rss_desc = { 0 }; size_t i; int err; + memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN); + rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc.const_q = action->origin.queue; + rss_desc.queue_num = action->origin.queue_num; + rss_desc.standalone = true; for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) { uint32_t hrxq_idx; uint64_t hash_fields = mlx5_rss_hash_fields[i]; int tunnel; for (tunnel = 0; tunnel < 2; tunnel++) { - hrxq_idx = mlx5_hrxq_new(dev, action->origin.key, - MLX5_RSS_HASH_KEY_LEN, - hash_fields, - action->origin.queue, - action->origin.queue_num, - tunnel, true); + rss_desc.tunnel = tunnel; + rss_desc.hash_fields = hash_fields; + hrxq_idx = mlx5_hrxq_get(dev, &rss_desc); if (!hrxq_idx) { rte_flow_error_set (error, rte_errno, diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index ba6731a..9afa563 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1963,21 +1963,12 @@ &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(handle->layers & - MLX5_FLOW_LAYER_TUNNEL), - false); + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + rss_desc->standalone = false; + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 9c9f8c4..0b0bdcc 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -2040,54 +2040,38 @@ struct mlx5_ind_table_obj * } /** - * Get an Rx Hash queue. + * Match an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. - * @param rss_conf - * RSS configuration for the Rx hash queue. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * An hash Rx queue index on success. + * 0 if match, none zero if not match. */ -uint32_t -mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n) +int +mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - - queues_n = hash_fields ? queues_n : 1; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - struct mlx5_ind_table_obj *ind_tbl; + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + struct mlx5_ind_table_obj *ind_tbl; - if (hrxq->rss_key_len != rss_key_len) - continue; - if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) - continue; - if (hrxq->hash_fields != hash_fields) - continue; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) - continue; - if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl, - hrxq->standalone); - continue; - } - rte_atomic32_inc(&hrxq->refcnt); - return idx; - } - return 0; + if (hrxq->rss_key_len != rss_desc->key_len || + memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || + hrxq->hash_fields != rss_desc->hash_fields) + return 1; + ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, + rss_desc->queue_num); + if (ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + return ind_tbl != hrxq->ind_table; } /** @@ -2172,114 +2156,163 @@ struct mlx5_ind_table_obj * return -rte_errno; } -/** - * Release the hash Rx queue. - * - * @param dev - * Pointer to Ethernet device. - * @param hrxq - * Index to Hash Rx queue to release. - * - * @return - * 1 while a reference on it exists, 0 when freed. - */ -int -mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +static void +__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - if (!hrxq) - return 0; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); + mlx5_glue->destroy_flow_action(hrxq->action); #endif - priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone); - if (!hrxq->standalone) - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], - &priv->hrxqs, hrxq_idx, hrxq, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - return 0; - } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone)); - return 1; + priv->obj_ops.hrxq_destroy(hrxq); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } /** - * Create an Rx Hash queue. + * Release the hash Rx queue. * * @param dev * Pointer to Ethernet device. - * @param rss_key - * RSS key for the Rx hash queue. - * @param rss_key_len - * RSS key length. - * @param hash_fields - * Verbs protocol hash field to make the RSS on. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. - * @param tunnel - * Tunnel type. - * @param standalone - * Object of Rx Hash queue will be used in standalone shared action or not. + * @param hrxq + * Index to Hash Rx queue to release. * - * @return - * The DevX object initialized index, 0 otherwise and rte_errno is set. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. */ -uint32_t -mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel, bool standalone) +void +mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + + __mlx5_hrxq_remove(dev, hrxq); +} + +static struct mlx5_hrxq * +__mlx5_hrxq_create(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) { struct mlx5_priv *priv = dev->data->dev_private; + const uint8_t *rss_key = rss_desc->key; + uint32_t rss_key_len = rss_desc->key_len; + const uint16_t *queues = + rss_desc->standalone ? rss_desc->const_q : rss_desc->queue; + uint32_t queues_n = rss_desc->queue_num; struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = 0; struct mlx5_ind_table_obj *ind_tbl; int ret; - queues_n = hash_fields ? queues_n : 1; + queues_n = rss_desc->hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, - standalone); - if (!ind_tbl) { - rte_errno = ENOMEM; - return 0; - } + rss_desc->standalone); + if (!ind_tbl) + return NULL; hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; - hrxq->standalone = !!standalone; + hrxq->standalone = rss_desc->standalone; + hrxq->idx = hrxq_idx; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; - hrxq->hash_fields = hash_fields; + hrxq->hash_fields = rss_desc->hash_fields; memcpy(hrxq->rss_key, rss_key, rss_key_len); - ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel); - if (ret < 0) { - rte_errno = errno; + ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); + if (ret < 0) goto error; - } - rte_atomic32_inc(&hrxq->refcnt); - if (!hrxq->standalone) - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, - hrxq_idx, hrxq, next); - return hrxq_idx; + return hrxq; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_obj_release(dev, ind_tbl, standalone); + mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_errno = ret; /* Restore rte_errno. */ + return NULL; +} + +/** + * Create an Rx Hash queue. + * + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. + * + * @return + * queue entry on success, NULL otherwise. + */ +struct mlx5_cache_entry * +mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq; + + hrxq = __mlx5_hrxq_create(dev, rss_desc); + return hrxq ? &hrxq->entry : NULL; +} + +/** + * Get an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_desc + * RSS configuration for the Rx hash queue. + * + * @return + * An hash Rx queue index on success. + */ +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .data = rss_desc, + }; + + if (rss_desc->standalone) { + hrxq = __mlx5_hrxq_create(dev, rss_desc); + } else { + entry = mlx5_cache_register(&priv->hrxqs, &ctx); + if (!entry) + return 0; + hrxq = container_of(entry, typeof(*hrxq), entry); + } + return hrxq->idx; +} + +/** + * Release the hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param hrxq_idx + * Index to Hash Rx queue to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + if (!hrxq->standalone) + return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); + __mlx5_hrxq_remove(dev, hrxq); return 0; } @@ -2364,22 +2397,12 @@ struct mlx5_hrxq * * @return * The number of object not released. */ -int +uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - int ret = 0; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - DRV_LOG(DEBUG, - "port %u hash Rx queue %p still referenced", - dev->data->port_id, (void *)hrxq); - ++ret; - } - return ret; + return mlx5_cache_list_get_entry_num(&priv->hrxqs); } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 8fe0473..c361bbd 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -349,17 +349,19 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, bool standalone); -uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel, bool standalone); +struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); +int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx); +void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n); + struct mlx5_flow_rss_desc *rss_desc); int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); -int mlx5_hrxq_verify(struct rte_eth_dev *dev); +uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); + + enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); const struct rte_eth_hairpin_conf *mlx5_rxq_get_hairpin_conf (struct rte_eth_dev *dev, uint16_t idx); From patchwork Tue Oct 27 12:27:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82311 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CD038A04B5; Tue, 27 Oct 2020 13:36:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8B10CAAB7; Tue, 27 Oct 2020 13:29:25 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9BDB13253 for ; Tue, 27 Oct 2020 13:28:26 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:21 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7M024637; Tue, 27 Oct 2020 14:28:19 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:19 +0800 Message-Id: <1603801650-442376-25-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 24/34] net/mlx5: make matcher list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this path converts matcher list to use thread safe cache list API. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 3 + drivers/net/mlx5/mlx5_flow.h | 15 ++- drivers/net/mlx5/mlx5_flow_dv.c | 214 +++++++++++++++++++++------------------- 3 files changed, 129 insertions(+), 103 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 99dfcd7..7e5518a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -32,6 +32,9 @@ #include "mlx5_os.h" #include "mlx5_autoconf.h" + +#define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh) + enum mlx5_ipool_index { #ifdef HAVE_IBV_FLOW_DV_SUPPORT MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 6b706e7..c7b9fb9 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -395,11 +395,9 @@ struct mlx5_flow_dv_match_params { /* Matcher structure. */ struct mlx5_flow_dv_matcher { - LIST_ENTRY(mlx5_flow_dv_matcher) next; - /**< Pointer to the next element. */ + struct mlx5_cache_entry entry; /**< Pointer to the next element. */ struct mlx5_flow_tbl_resource *tbl; /**< Pointer to the table(group) the matcher associated with. */ - rte_atomic32_t refcnt; /**< Reference counter. */ void *matcher_object; /**< Pointer to DV matcher */ uint16_t crc; /**< CRC of key. */ uint16_t priority; /**< Priority of matcher. */ @@ -533,7 +531,7 @@ struct mlx5_flow_tbl_data_entry { /**< hash list entry, 64-bits key inside. */ struct mlx5_flow_tbl_resource tbl; /**< flow table resource. */ - LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; + struct mlx5_cache_list matchers; /**< matchers' header associated with the flow table. */ struct mlx5_flow_dv_jump_tbl_resource jump; /**< jump resource, at most one for each table created. */ @@ -543,6 +541,7 @@ struct mlx5_flow_tbl_data_entry { uint32_t group_id; bool external; bool tunnel_offload; /* Tunnel offlod table or not. */ + bool is_egress; /**< Egress table. */ }; /* Sub rdma-core actions list. */ @@ -1426,4 +1425,12 @@ struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list, uint64_t key, void *cb_ctx); void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry); + +int flow_dv_matcher_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *ctx); +struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *ctx); +void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 47dea4a..646e2b0 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -70,7 +70,7 @@ }; static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl); static int @@ -7941,6 +7941,7 @@ struct mlx5_hlist_entry * tbl_data->group_id = tt_prm->group_id; tbl_data->external = tt_prm->external; tbl_data->tunnel_offload = is_tunnel_offload_active(dev); + tbl_data->is_egress = !!key.direction; tbl = &tbl_data->tbl; if (key.dummy) return &tbl_data->entry; @@ -7971,6 +7972,13 @@ struct mlx5_hlist_entry * return NULL; } } + MKSTR(matcher_name, "%s_%s_%u_matcher_cache", + key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress", + key.table_id); + mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh, + flow_dv_matcher_create_cb, + flow_dv_matcher_match_cb, + flow_dv_matcher_remove_cb); return &tbl_data->entry; } @@ -8080,14 +8088,15 @@ struct mlx5_flow_tbl_resource * tbl_data->tunnel->tunnel_id : 0, tbl_data->group_id); } + mlx5_cache_list_destroy(&tbl_data->matchers); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } /** * Release a flow table. * - * @param[in] dev - * Pointer to rte_eth_dev structure. + * @param[in] sh + * Pointer to device shared structure. * @param[in] tbl * Table resource to be released. * @@ -8095,11 +8104,9 @@ struct mlx5_flow_tbl_resource * * Returns 0 if table was released, else return 1; */ static int -flow_dv_tbl_resource_release(struct rte_eth_dev *dev, +flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, struct mlx5_flow_tbl_resource *tbl) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -8108,6 +8115,63 @@ struct mlx5_flow_tbl_resource * return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry); } +int +flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur), + entry); + + return cur->crc != ref->crc || + cur->priority != ref->priority || + memcmp((const void *)cur->mask.buf, + (const void *)ref->mask.buf, ref->mask.size); +} + +struct mlx5_cache_entry * +flow_dv_matcher_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_matcher *ref = ctx->data; + struct mlx5_flow_dv_matcher *cache; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .match_mask = (void *)&ref->mask, + }; + struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl, + typeof(*tbl), tbl); + int ret; + + cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; + } + *cache = *ref; + dv_attr.match_criteria_enable = + flow_dv_matcher_enable(cache->mask.buf); + dv_attr.priority = ref->priority; + if (tbl->is_egress) + dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj, + &cache->matcher_object); + if (ret) { + mlx5_free(cache); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create matcher"); + return NULL; + } + return &cache->entry; +} + /** * Register the flow matcher. * @@ -8127,87 +8191,35 @@ struct mlx5_flow_tbl_resource * */ static int flow_dv_matcher_register(struct rte_eth_dev *dev, - struct mlx5_flow_dv_matcher *matcher, + struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_matcher *cache_matcher; - struct mlx5dv_flow_matcher_attr dv_attr = { - .type = IBV_FLOW_ATTR_NORMAL, - .match_mask = (void *)&matcher->mask, - }; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_matcher *cache; struct mlx5_flow_tbl_resource *tbl; struct mlx5_flow_tbl_data_entry *tbl_data; - int ret; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = ref, + }; tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, key->domain, false, NULL, 0, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - /* Lookup from cache. */ - LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { - if (matcher->crc == cache_matcher->crc && - matcher->priority == cache_matcher->priority && - !memcmp((const void *)matcher->mask.buf, - (const void *)cache_matcher->mask.buf, - cache_matcher->mask.size)) { - DRV_LOG(DEBUG, - "%s group %u priority %hd use %s " - "matcher %p: refcnt %d++", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", - (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->handle->dvh.matcher = cache_matcher; - /* old matcher should not make the table ref++. */ - flow_dv_tbl_resource_release(dev, tbl); - return 0; - } - } - /* Register new matcher. */ - cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0, - SOCKET_ID_ANY); - if (!cache_matcher) { - flow_dv_tbl_resource_release(dev, tbl); + ref->tbl = tbl; + entry = mlx5_cache_register(&tbl_data->matchers, &ctx); + if (!entry) { + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate matcher memory"); + "cannot allocate ref memory"); } - *cache_matcher = *matcher; - dv_attr.match_criteria_enable = - flow_dv_matcher_enable(cache_matcher->mask.buf); - dv_attr.priority = matcher->priority; - if (key->direction) - dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; - ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, - &cache_matcher->matcher_object); - if (ret) { - mlx5_free(cache_matcher); -#ifdef HAVE_MLX5DV_DR - flow_dv_tbl_resource_release(dev, tbl); -#endif - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create matcher"); - } - /* Save the table information */ - cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); - /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); - LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->handle->dvh.matcher = cache_matcher; - DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", - key->domain ? "FDB" : "NIC", key->table_id, - cache_matcher->priority, - key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + cache = container_of(entry, typeof(*cache), entry); + dev_flow->handle->dvh.matcher = cache; return 0; } @@ -8696,7 +8708,7 @@ struct mlx5_hlist_entry * } } if (cache_resource->normal_path_tbl) - flow_dv_tbl_resource_release(dev, + flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], dev_flow->handle->dvh.rix_sample); @@ -9599,7 +9611,7 @@ struct mlx5_hlist_entry * "cannot create jump action."); if (flow_dv_jump_tbl_resource_register (dev, tbl, dev_flow, error)) { - flow_dv_tbl_resource_release(dev, tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); return rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_ACTION, @@ -10360,6 +10372,17 @@ struct mlx5_hlist_entry * return -rte_errno; } +void +flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) +{ + struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache), + entry); + + claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object)); + mlx5_free(cache); +} + /** * Release the flow matcher. * @@ -10376,23 +10399,14 @@ struct mlx5_hlist_entry * struct mlx5_flow_handle *handle) { struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; + struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl, + typeof(*tbl), tbl); + int ret; MLX5_ASSERT(matcher->matcher_object); - DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", - dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_matcher - (matcher->matcher_object)); - LIST_REMOVE(matcher, next); - /* table ref-- in release interface. */ - flow_dv_tbl_resource_release(dev, matcher->tbl); - mlx5_free(matcher); - DRV_LOG(DEBUG, "port %u matcher %p: removed", - dev->data->port_id, (void *)matcher); - return 0; - } - return 1; + ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry); + flow_dv_tbl_resource_release(MLX5_SH(dev), matcher->tbl); + return ret; } /** @@ -10464,7 +10478,7 @@ struct mlx5_hlist_entry * handle->rix_jump); if (!tbl_data) return 0; - return flow_dv_tbl_resource_release(dev, &tbl_data->tbl); + return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl); } void @@ -10652,7 +10666,7 @@ struct mlx5_hlist_entry * (cache_resource->default_miss)); } if (cache_resource->normal_path_tbl) - flow_dv_tbl_resource_release(dev, + flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); } if (cache_resource->sample_idx.rix_hrxq && @@ -11445,9 +11459,9 @@ struct mlx5_hlist_entry * claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->egress.any_matcher)); if (mtd->egress.tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl); if (mtd->egress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl); if (mtd->ingress.color_matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->ingress.color_matcher)); @@ -11455,9 +11469,10 @@ struct mlx5_hlist_entry * claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->ingress.any_matcher)); if (mtd->ingress.tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl); if (mtd->ingress.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->ingress.sfx_tbl); if (mtd->transfer.color_matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->transfer.color_matcher)); @@ -11465,9 +11480,10 @@ struct mlx5_hlist_entry * claim_zero(mlx5_flow_os_destroy_flow_matcher (mtd->transfer.any_matcher)); if (mtd->transfer.tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl); if (mtd->transfer.sfx_tbl) - flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), + mtd->transfer.sfx_tbl); if (mtd->drop_actn) claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); mlx5_free(mtd); @@ -11911,9 +11927,9 @@ struct mlx5_hlist_entry * if (matcher) claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher)); if (tbl) - flow_dv_tbl_resource_release(dev, tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), tbl); if (dest_tbl) - flow_dv_tbl_resource_release(dev, dest_tbl); + flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl); if (dcs) claim_zero(mlx5_devx_cmd_destroy(dcs)); return ret; From patchwork Tue Oct 27 12:27:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82313 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8EEA7A04B5; Tue, 27 Oct 2020 13:37:49 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 64C02BBA4; Tue, 27 Oct 2020 13:29:28 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A6568354D for ; Tue, 27 Oct 2020 13:28:27 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:23 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7N024637; Tue, 27 Oct 2020 14:28:21 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:20 +0800 Message-Id: <1603801650-442376-26-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 25/34] net/mlx5: make port ID action cache thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch convert port id action cache list to thread safe cache list. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 ++ drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_flow.h | 15 +++-- drivers/net/mlx5/mlx5_flow_dv.c | 141 +++++++++++++++++++++------------------ 4 files changed, 94 insertions(+), 71 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 10fc7c5..3d2bf57 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -236,6 +236,12 @@ goto error; /* The resources below are only valid with DV support. */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT + /* Init port id action cache list. */ + snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name); + mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh, + flow_dv_port_id_create_cb, + flow_dv_port_id_match_cb, + flow_dv_port_id_remove_cb); /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, @@ -431,6 +437,7 @@ mlx5_release_tunnel_hub(sh, priv->dev_port); sh->tunnel_hub = NULL; } + mlx5_cache_list_destroy(&sh->port_id_action_list); mlx5_free_table_hash_list(priv); } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 7e5518a..ed606ee 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -663,7 +663,7 @@ struct mlx5_dev_ctx_shared { struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */ struct mlx5_hlist *modify_cmds; struct mlx5_hlist *tag_table; - uint32_t port_id_action_list; /* List of port ID actions. */ + struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */ uint32_t push_vlan_action_list; /* List of push VLAN actions. */ uint32_t sample_action_list; /* List of sample actions. */ uint32_t dest_array_list; /* List of destination array actions. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index c7b9fb9..1a6521f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -486,12 +486,10 @@ struct mlx5_flow_dv_jump_tbl_resource { /* Port ID resource structure. */ struct mlx5_flow_dv_port_id_action_resource { - ILIST_ENTRY(uint32_t)next; - /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ - void *action; - /**< Action object. */ + struct mlx5_cache_entry entry; + void *action; /**< Action object. */ uint32_t port_id; /**< Port ID value. */ + uint32_t idx; /**< Indexed pool memory index. */ }; /* Push VLAN action resource structure */ @@ -1433,4 +1431,11 @@ struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list, void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); +int flow_dv_port_id_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 646e2b0..c3c3405 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2961,6 +2961,52 @@ struct mlx5_hlist_entry * return 0; } +int +flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->port_id != res->port_id; +} + +struct mlx5_cache_entry * +flow_dv_port_id_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data; + struct mlx5_flow_dv_port_id_action_resource *cache; + uint32_t idx; + int ret; + + /* Register new port id action resource. */ + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate port_id action cache memory"); + return NULL; + } + *cache = *ref; + ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain, + ref->port_id, + &cache->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create action"); + return NULL; + } + return &cache->entry; +} + /** * Find existing table port ID resource or create and register a new one. * @@ -2984,51 +3030,19 @@ struct mlx5_hlist_entry * struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = 0; - int ret; + struct mlx5_cache_entry *entry; + struct mlx5_flow_dv_port_id_action_resource *cache; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, - idx, cache_resource, next) { - if (resource->port_id == cache_resource->port_id) { - DRV_LOG(DEBUG, "port id action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->rix_port_id_action = idx; - dev_flow->dv.port_id_action = cache_resource; - return 0; - } - } - /* Register new port id action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], - &dev_flow->handle->rix_port_id_action); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - ret = mlx5_flow_os_create_flow_action_dest_port - (priv->sh->fdb_domain, resource->port_id, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, - dev_flow->handle->rix_port_id_action, cache_resource, - next); - dev_flow->dv.port_id_action = cache_resource; - DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + dev_flow->dv.port_id_action = cache; + dev_flow->handle->rix_port_id_action = cache->idx; return 0; } @@ -10514,6 +10528,18 @@ struct mlx5_hlist_entry * return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry); } +void +flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_port_id_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx); +} + /** * Release port ID action resource. * @@ -10530,29 +10556,14 @@ struct mlx5_hlist_entry * uint32_t port_id) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_port_id_action_resource *cache_resource; - uint32_t idx = port_id; + struct mlx5_flow_dv_port_id_action_resource *cache; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - idx); - if (!cache_resource) + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id); + if (!cache) return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - &priv->sh->port_id_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); - DRV_LOG(DEBUG, "port id action resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->port_id_action_list, + &cache->entry); } /** From patchwork Tue Oct 27 12:27:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82312 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3DF0CA04B5; Tue, 27 Oct 2020 13:37:24 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E0F4CACA2; Tue, 27 Oct 2020 13:29:26 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A8BD94C6B for ; Tue, 27 Oct 2020 13:28:27 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:25 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7O024637; Tue, 27 Oct 2020 14:28:23 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:21 +0800 Message-Id: <1603801650-442376-27-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 26/34] net/mlx5: make push VLAN action cache thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch converts push VLAN action cache list to thread safe cache list. Signed-off-by: Xueming Li Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 7 ++ drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_flow.h | 13 +++- drivers/net/mlx5/mlx5_flow_dv.c | 157 +++++++++++++++++++++------------------ 4 files changed, 102 insertions(+), 77 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 3d2bf57..9ab3ba9 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -242,6 +242,12 @@ flow_dv_port_id_create_cb, flow_dv_port_id_match_cb, flow_dv_port_id_remove_cb); + /* Init push vlan action cache list. */ + snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name); + mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh, + flow_dv_push_vlan_create_cb, + flow_dv_push_vlan_match_cb, + flow_dv_push_vlan_remove_cb); /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, @@ -438,6 +444,7 @@ sh->tunnel_hub = NULL; } mlx5_cache_list_destroy(&sh->port_id_action_list); + mlx5_cache_list_destroy(&sh->push_vlan_action_list); mlx5_free_table_hash_list(priv); } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index ed606ee..2aa669b 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -664,7 +664,7 @@ struct mlx5_dev_ctx_shared { struct mlx5_hlist *modify_cmds; struct mlx5_hlist *tag_table; struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */ - uint32_t push_vlan_action_list; /* List of push VLAN actions. */ + struct mlx5_cache_list push_vlan_action_list; /* Push VLAN actions. */ uint32_t sample_action_list; /* List of sample actions. */ uint32_t dest_array_list; /* List of destination array actions. */ struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 1a6521f..e6e6cc4 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -494,12 +494,11 @@ struct mlx5_flow_dv_port_id_action_resource { /* Push VLAN action resource structure */ struct mlx5_flow_dv_push_vlan_action_resource { - ILIST_ENTRY(uint32_t)next; - /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + struct mlx5_cache_entry entry; /* Cache entry. */ void *action; /**< Action object. */ uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ rte_be32_t vlan_tag; /**< VLAN tag value. */ + uint32_t idx; /**< Indexed pool memory index. */ }; /* Metadata register copy table entry. */ @@ -1438,4 +1437,12 @@ struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list, void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); +int flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +struct mlx5_cache_entry *flow_dv_push_vlan_create_cb + (struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index c3c3405..fff5534 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -3046,6 +3046,58 @@ struct mlx5_cache_entry * return 0; } +int +flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *res = + container_of(entry, typeof(*res), entry); + + return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type; +} + +struct mlx5_cache_entry * +flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5dv_dr_domain *domain; + uint32_t idx; + int ret; + + /* Register new port id action resource. */ + cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx); + if (!cache) { + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate push_vlan action cache memory"); + return NULL; + } + *cache = *ref; + if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag, + &cache->action); + if (ret) { + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + rte_flow_error_set(ctx->error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create push vlan action"); + return NULL; + } + return &cache->entry; +} + /** * Find existing push vlan resource or create and register a new one. * @@ -3069,62 +3121,23 @@ struct mlx5_cache_entry * struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; - struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; - int ret; + struct mlx5_flow_dv_push_vlan_action_resource *cache; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .error = error, + .data = resource, + }; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - sh->push_vlan_action_list, idx, cache_resource, next) { - if (resource->vlan_tag == cache_resource->vlan_tag && - resource->ft_type == cache_resource->ft_type) { - DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " - "refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->handle->dvh.rix_push_vlan = idx; - dev_flow->dv.push_vlan_res = cache_resource; - return 0; - } - } - /* Register new push_vlan action resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &dev_flow->handle->dvh.rix_push_vlan); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate resource memory"); - *cache_resource = *resource; - if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) - domain = sh->fdb_domain; - else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) - domain = sh->rx_domain; - else - domain = sh->tx_domain; - ret = mlx5_flow_os_create_flow_action_push_vlan - (domain, resource->vlan_tag, - &cache_resource->action); - if (ret) { - mlx5_free(cache_resource); - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create action"); - } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &sh->push_vlan_action_list, - dev_flow->handle->dvh.rix_push_vlan, - cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; - DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx); + if (!entry) + return -rte_errno; + cache = container_of(entry, typeof(*cache), entry); + + dev_flow->handle->dvh.rix_push_vlan = cache->idx; + dev_flow->dv.push_vlan_res = cache; return 0; } + /** * Get the size of specific rte_flow_item_type hdr size * @@ -10566,6 +10579,18 @@ struct mlx5_hlist_entry * &cache->entry); } +void +flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct mlx5_flow_dv_push_vlan_action_resource *cache = + container_of(entry, typeof(*cache), entry); + + claim_zero(mlx5_flow_os_destroy_flow_action(cache->action)); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx); +} + /** * Release push vlan action resource. * @@ -10582,29 +10607,15 @@ struct mlx5_hlist_entry * struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_push_vlan_action_resource *cache; uint32_t idx = handle->dvh.rix_push_vlan; - struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", - (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_flow_os_destroy_flow_action - (cache_resource->action)); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], - &priv->sh->push_vlan_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); - DRV_LOG(DEBUG, "push vlan action resource %p: removed", - (void *)cache_resource); + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + if (!cache) return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->push_vlan_action_list, + &cache->entry); } /** From patchwork Tue Oct 27 12:27:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82316 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D86E4A04B5; Tue, 27 Oct 2020 13:38:53 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 55440BE4B; Tue, 27 Oct 2020 13:29:32 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 33A583253 for ; Tue, 27 Oct 2020 13:28:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:26 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7P024637; Tue, 27 Oct 2020 14:28:25 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:22 +0800 Message-Id: <1603801650-442376-28-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 27/34] net/mlx5: simplify sample attributes X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, the sample action resource already has ft_type to indicate the action domain attribute, the extra flow attributes parameter can be optimized. This commit uses action resource ty_type as domain attribute instead of the flow attribute. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow_dv.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index fff5534..0527223 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8602,8 +8602,6 @@ struct mlx5_hlist_entry * * * @param[in, out] dev * Pointer to rte_eth_dev structure. - * @param[in] attr - * Attributes of flow that includes this item. * @param[in] resource * Pointer to sample resource. * @parm[in, out] dev_flow @@ -8618,7 +8616,6 @@ struct mlx5_hlist_entry * */ static int flow_dv_sample_resource_register(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, struct mlx5_flow_dv_sample_resource *resource, struct mlx5_flow *dev_flow, void **sample_dv_actions, @@ -8632,6 +8629,8 @@ struct mlx5_hlist_entry * uint32_t idx = 0; const uint32_t next_ft_step = 1; uint32_t next_ft_id = resource->ft_id + next_ft_step; + uint8_t is_egress = 0; + uint8_t is_transfer = 0; /* Lookup a matching resource from cache. */ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list, @@ -8664,8 +8663,12 @@ struct mlx5_hlist_entry * "cannot allocate resource memory"); *cache_resource = *resource; /* Create normal path table level */ + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + is_transfer = 1; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + is_egress = 1; tbl = flow_dv_tbl_resource_get(dev, next_ft_id, - attr->egress, attr->transfer, + is_egress, is_transfer, dev_flow->external, NULL, 0, 0, error); if (!tbl) { rte_flow_error_set(error, ENOMEM, @@ -8748,8 +8751,6 @@ struct mlx5_hlist_entry * * * @param[in, out] dev * Pointer to rte_eth_dev structure. - * @param[in] attr - * Attributes of flow that includes this item. * @param[in] resource * Pointer to destination array resource. * @parm[in, out] dev_flow @@ -8762,7 +8763,6 @@ struct mlx5_hlist_entry * */ static int flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, struct mlx5_flow_dv_dest_array_resource *resource, struct mlx5_flow *dev_flow, struct rte_flow_error *error) @@ -8806,9 +8806,9 @@ struct mlx5_hlist_entry * NULL, "cannot allocate resource memory"); *cache_resource = *resource; - if (attr->transfer) + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; - else if (attr->ingress) + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) domain = sh->rx_domain; else domain = sh->tx_domain; @@ -9097,6 +9097,8 @@ struct mlx5_hlist_entry * res->set_action = action_ctx.set_action; } else if (attr->ingress) { res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + } else { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX; } return 0; } @@ -9108,8 +9110,6 @@ struct mlx5_hlist_entry * * Pointer to rte_eth_dev structure. * @param[in, out] dev_flow * Pointer to the mlx5_flow. - * @param[in] attr - * Pointer to the flow attributes. * @param[in] num_of_dest * The num of destination. * @param[in, out] res @@ -9129,7 +9129,6 @@ struct mlx5_hlist_entry * static int flow_dv_create_action_sample(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, uint32_t num_of_dest, struct mlx5_flow_dv_sample_resource *res, struct mlx5_flow_dv_dest_array_resource *mdest_res, @@ -9189,14 +9188,14 @@ struct mlx5_hlist_entry * memcpy(&mdest_res->sample_act[0], &res->sample_act, sizeof(struct mlx5_flow_sub_actions_list)); mdest_res->num_of_dest = num_of_dest; - if (flow_dv_dest_array_resource_register(dev, attr, mdest_res, + if (flow_dv_dest_array_resource_register(dev, mdest_res, dev_flow, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "can't create sample " "action"); } else { - if (flow_dv_sample_resource_register(dev, attr, res, dev_flow, + if (flow_dv_sample_resource_register(dev, res, dev_flow, sample_actions, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -9836,7 +9835,7 @@ struct mlx5_hlist_entry * } if (action_flags & MLX5_FLOW_ACTION_SAMPLE) { ret = flow_dv_create_action_sample(dev, - dev_flow, attr, + dev_flow, num_of_dest, &sample_res, &mdest_res, From patchwork Tue Oct 27 12:27:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82315 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 78E57A04B5; Tue, 27 Oct 2020 13:38:33 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0FB0EBE3F; Tue, 27 Oct 2020 13:29:31 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 33C17354D for ; Tue, 27 Oct 2020 13:28:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:28 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7Q024637; Tue, 27 Oct 2020 14:28:27 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:23 +0800 Message-Id: <1603801650-442376-29-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 28/34] net/mlx5: fix sample register error flow X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, sample flow need to prepare and register the sub-actions before sample action is created. Once the same sample action exists, the sub-actions registered by the second flow should be released, or these sub-actions will be leaked. Since the exist sample action only release these same sub-actions when the sample action itself releases. When same sample action exists, call the sub-action release function for the later flow to release the redundant prepared sub-actions. Fixes: 0756228b2704 ("net/mlx5: update translate function for sample action") Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow_dv.c | 117 ++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 57 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 0527223..2d283ab 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8598,6 +8598,42 @@ struct mlx5_hlist_entry * } /** + * Release sample sub action resource. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] act_res + * Pointer to sample sub action resource. + */ +static void +flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev, + struct mlx5_flow_sub_actions_idx *act_res) +{ + if (act_res->rix_hrxq) { + mlx5_hrxq_release(dev, act_res->rix_hrxq); + act_res->rix_hrxq = 0; + } + if (act_res->rix_encap_decap) { + flow_dv_encap_decap_resource_release(dev, + act_res->rix_encap_decap); + act_res->rix_encap_decap = 0; + } + if (act_res->rix_port_id_action) { + flow_dv_port_id_action_resource_release(dev, + act_res->rix_port_id_action); + act_res->rix_port_id_action = 0; + } + if (act_res->rix_tag) { + flow_dv_tag_release(dev, act_res->rix_tag); + act_res->rix_tag = 0; + } + if (act_res->cnt) { + flow_dv_counter_release(dev, act_res->cnt); + act_res->cnt = 0; + } +} + +/** * Find existing sample resource or create and register a new one. * * @param[in, out] dev @@ -8650,6 +8686,12 @@ struct mlx5_hlist_entry * __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_sample = idx; dev_flow->dv.sample_res = cache_resource; + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx); return 0; } } @@ -8718,25 +8760,13 @@ struct mlx5_hlist_entry * __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; error: - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && + cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action (cache_resource->default_miss)); - } else { - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - } + else + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); @@ -8794,6 +8824,13 @@ struct mlx5_hlist_entry * __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_dest_array = idx; dev_flow->dv.dest_array_res = cache_resource; + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + for (idx = 0; idx < resource->num_of_dest; idx++) + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx[idx]); return 0; } } @@ -10689,21 +10726,8 @@ struct mlx5_hlist_entry * if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); - } - if (cache_resource->sample_idx.rix_hrxq && - !mlx5_hrxq_release(dev, - cache_resource->sample_idx.rix_hrxq)) - cache_resource->sample_idx.rix_hrxq = 0; - if (cache_resource->sample_idx.rix_tag && - !flow_dv_tag_release(dev, - cache_resource->sample_idx.rix_tag)) - cache_resource->sample_idx.rix_tag = 0; - if (cache_resource->sample_idx.cnt) { - flow_dv_counter_release(dev, - cache_resource->sample_idx.cnt); - cache_resource->sample_idx.cnt = 0; - } - if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) { + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], &priv->sh->sample_action_list, idx, cache_resource, next); @@ -10732,7 +10756,6 @@ struct mlx5_hlist_entry * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_dv_dest_array_resource *cache_resource; - struct mlx5_flow_sub_actions_idx *mdest_act_res; uint32_t idx = handle->dvh.rix_dest_array; uint32_t i = 0; @@ -10749,29 +10772,9 @@ struct mlx5_hlist_entry * if (cache_resource->action) claim_zero(mlx5_glue->destroy_flow_action (cache_resource->action)); - for (; i < cache_resource->num_of_dest; i++) { - mdest_act_res = &cache_resource->sample_idx[i]; - if (mdest_act_res->rix_hrxq) { - mlx5_hrxq_release(dev, - mdest_act_res->rix_hrxq); - mdest_act_res->rix_hrxq = 0; - } - if (mdest_act_res->rix_encap_decap) { - flow_dv_encap_decap_resource_release(dev, - mdest_act_res->rix_encap_decap); - mdest_act_res->rix_encap_decap = 0; - } - if (mdest_act_res->rix_port_id_action) { - flow_dv_port_id_action_resource_release(dev, - mdest_act_res->rix_port_id_action); - mdest_act_res->rix_port_id_action = 0; - } - if (mdest_act_res->rix_tag) { - flow_dv_tag_release(dev, - mdest_act_res->rix_tag); - mdest_act_res->rix_tag = 0; - } - } + for (; i < cache_resource->num_of_dest; i++) + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx[i]); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], &priv->sh->dest_array_list, idx, cache_resource, next); From patchwork Tue Oct 27 12:27:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82314 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A4B6EA04B5; Tue, 27 Oct 2020 13:38:11 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BC72ABC64; Tue, 27 Oct 2020 13:29:29 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id CA55C2BFA for ; Tue, 27 Oct 2020 13:28:31 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:30 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7R024637; Tue, 27 Oct 2020 14:28:29 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:24 +0800 Message-Id: <1603801650-442376-30-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 29/34] net/mlx5: make sample and mirror action thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit uses cache list to make sample and mirror action thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 14 ++ drivers/net/mlx5/mlx5.h | 5 +- drivers/net/mlx5/mlx5_flow.h | 28 ++- drivers/net/mlx5/mlx5_flow_dv.c | 396 ++++++++++++++++++++++----------------- 4 files changed, 260 insertions(+), 183 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 9ab3ba9..5856981 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -248,6 +248,20 @@ flow_dv_push_vlan_create_cb, flow_dv_push_vlan_match_cb, flow_dv_push_vlan_remove_cb); + /* Init sample action cache list. */ + snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name); + mlx5_cache_list_init(&sh->sample_action_list, s, 0, + &rte_eth_devices[priv->dev_data->port_id], + flow_dv_sample_create_cb, + flow_dv_sample_match_cb, + flow_dv_sample_remove_cb); + /* Init dest array action cache list. */ + snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name); + mlx5_cache_list_init(&sh->dest_array_list, s, 0, + &rte_eth_devices[priv->dev_data->port_id], + flow_dv_dest_array_create_cb, + flow_dv_dest_array_match_cb, + flow_dv_dest_array_remove_cb); /* Create tags hash list table. */ snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0, diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 2aa669b..a830945 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -665,8 +665,9 @@ struct mlx5_dev_ctx_shared { struct mlx5_hlist *tag_table; struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */ struct mlx5_cache_list push_vlan_action_list; /* Push VLAN actions. */ - uint32_t sample_action_list; /* List of sample actions. */ - uint32_t dest_array_list; /* List of destination array actions. */ + struct mlx5_cache_list sample_action_list; /* List of sample actions. */ + struct mlx5_cache_list dest_array_list; + /* List of destination array actions. */ struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ void *default_miss_action; /* Default miss action. */ struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX]; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index e6e6cc4..2de8988 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -563,9 +563,12 @@ struct mlx5_flow_sub_actions_idx { /* Sample action resource structure. */ struct mlx5_flow_dv_sample_resource { - ILIST_ENTRY(uint32_t)next; /**< Pointer to next element. */ - uint32_t refcnt; /**< Reference counter. */ - void *verbs_action; /**< Verbs sample action object. */ + struct mlx5_cache_entry entry; /**< Cache entry. */ + union { + void *verbs_action; /**< Verbs sample action object. */ + void **sub_actions; /**< Sample sub-action array. */ + }; + uint32_t idx; /** Smaple object index. */ uint8_t ft_type; /** Flow Table Type */ uint32_t ft_id; /** Flow Table Level */ uint32_t ratio; /** Sample Ratio */ @@ -582,8 +585,8 @@ struct mlx5_flow_dv_sample_resource { /* Destination array action resource structure. */ struct mlx5_flow_dv_dest_array_resource { - ILIST_ENTRY(uint32_t)next; /**< Pointer to next element. */ - uint32_t refcnt; /**< Reference counter. */ + struct mlx5_cache_entry entry; /**< Cache entry. */ + uint32_t idx; /** Destination array action object index. */ uint8_t ft_type; /** Flow Table Type */ uint8_t num_of_dest; /**< Number of destination actions. */ void *action; /**< Pointer to the rdma core action. */ @@ -1445,4 +1448,19 @@ struct mlx5_cache_entry *flow_dv_push_vlan_create_cb void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); +int flow_dv_sample_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +struct mlx5_cache_entry *flow_dv_sample_create_cb + (struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +void flow_dv_sample_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); + +int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +struct mlx5_cache_entry *flow_dv_dest_array_create_cb + (struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, void *cb_ctx); +void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 2d283ab..9f7ac2e 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8633,30 +8633,43 @@ struct mlx5_hlist_entry * } } -/** - * Find existing sample resource or create and register a new one. - * - * @param[in, out] dev - * Pointer to rte_eth_dev structure. - * @param[in] resource - * Pointer to sample resource. - * @parm[in, out] dev_flow - * Pointer to the dev_flow. - * @param[in, out] sample_dv_actions - * Pointer to sample actions list. - * @param[out] error - * pointer to error structure. - * - * @return - * 0 on success otherwise -errno and errno is set. - */ -static int -flow_dv_sample_resource_register(struct rte_eth_dev *dev, - struct mlx5_flow_dv_sample_resource *resource, - struct mlx5_flow *dev_flow, - void **sample_dv_actions, - struct rte_flow_error *error) +int +flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) { + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (resource->ratio == cache_resource->ratio && + resource->ft_type == cache_resource->ft_type && + resource->ft_id == cache_resource->ft_id && + resource->set_action == cache_resource->set_action && + !memcmp((void *)&resource->sample_act, + (void *)&cache_resource->sample_act, + sizeof(struct mlx5_flow_sub_actions_list))) { + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_sample_resource *resource = ctx->data; + void **sample_dv_actions = resource->sub_actions; struct mlx5_flow_dv_sample_resource *cache_resource; struct mlx5dv_dr_flow_sampler_attr sampler_attr; struct mlx5_priv *priv = dev->data->dev_private; @@ -8667,42 +8680,17 @@ struct mlx5_hlist_entry * uint32_t next_ft_id = resource->ft_id + next_ft_step; uint8_t is_egress = 0; uint8_t is_transfer = 0; + struct rte_flow_error *error = ctx->error; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list, - idx, cache_resource, next) { - if (resource->ratio == cache_resource->ratio && - resource->ft_type == cache_resource->ft_type && - resource->ft_id == cache_resource->ft_id && - resource->set_action == cache_resource->set_action && - !memcmp((void *)&resource->sample_act, - (void *)&cache_resource->sample_act, - sizeof(struct mlx5_flow_sub_actions_list))) { - DRV_LOG(DEBUG, "sample resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, - __ATOMIC_RELAXED)); - __atomic_fetch_add(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED); - dev_flow->handle->dvh.rix_sample = idx; - dev_flow->dv.sample_res = cache_resource; - /* - * Existing smaple action should release the prepared - * sub-actions reference counter. - */ - flow_dv_sample_sub_actions_release(dev, - &resource->sample_idx); - return 0; - } - } /* Register new sample resource. */ - cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], - &dev_flow->handle->dvh.rix_sample); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); + return NULL; + } *cache_resource = *resource; /* Create normal path table level */ if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) @@ -8711,7 +8699,7 @@ struct mlx5_hlist_entry * is_egress = 1; tbl = flow_dv_tbl_resource_get(dev, next_ft_id, is_egress, is_transfer, - dev_flow->external, NULL, 0, 0, error); + true, NULL, 0, 0, error); if (!tbl) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -8750,15 +8738,8 @@ struct mlx5_hlist_entry * NULL, "cannot create sample action"); goto error; } - __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list, - dev_flow->handle->dvh.rix_sample, cache_resource, - next); - dev_flow->dv.sample_res = cache_resource; - DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - return 0; + cache_resource->idx = idx; + return &cache_resource->entry; error: if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB && cache_resource->default_miss) @@ -8770,19 +8751,18 @@ struct mlx5_hlist_entry * if (cache_resource->normal_path_tbl) flow_dv_tbl_resource_release(MLX5_SH(dev), cache_resource->normal_path_tbl); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], - dev_flow->handle->dvh.rix_sample); - dev_flow->handle->dvh.rix_sample = 0; - return -rte_errno; + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx); + return NULL; + } /** - * Find existing destination array resource or create and register a new one. + * Find existing sample resource or create and register a new one. * * @param[in, out] dev * Pointer to rte_eth_dev structure. * @param[in] resource - * Pointer to destination array resource. + * Pointer to sample resource. * @parm[in, out] dev_flow * Pointer to the dev_flow. * @param[out] error @@ -8792,56 +8772,86 @@ struct mlx5_hlist_entry * * 0 on success otherwise -errno and errno is set. */ static int -flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, - struct mlx5_flow_dv_dest_array_resource *resource, +flow_dv_sample_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_sample_resource *resource, struct mlx5_flow *dev_flow, struct rte_flow_error *error) { + struct mlx5_flow_dv_sample_resource *cache_resource; + struct mlx5_cache_entry *entry; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_sample = cache_resource->idx; + dev_flow->dv.sample_res = cache_resource; + return 0; +} + +int +flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry, void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; + struct rte_eth_dev *dev = ctx->dev; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t idx = 0; + + if (resource->num_of_dest == cache_resource->num_of_dest && + resource->ft_type == cache_resource->ft_type && + !memcmp((void *)cache_resource->sample_act, + (void *)resource->sample_act, + (resource->num_of_dest * + sizeof(struct mlx5_flow_sub_actions_list)))) { + /* + * Existing smaple action should release the prepared + * sub-actions reference counter. + */ + for (idx = 0; idx < resource->num_of_dest; idx++) + flow_dv_sample_sub_actions_release(dev, + &resource->sample_idx[idx]); + return 0; + } + return 1; +} + +struct mlx5_cache_entry * +flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct rte_eth_dev *dev = ctx->dev; struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_flow_dv_dest_array_resource *resource = ctx->data; struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 }; struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM]; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_sub_actions_list *sample_act; struct mlx5dv_dr_domain *domain; - uint32_t idx = 0; + uint32_t idx = 0, res_idx = 0; + struct rte_flow_error *error = ctx->error; - /* Lookup a matching resource from cache. */ - ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - sh->dest_array_list, - idx, cache_resource, next) { - if (resource->num_of_dest == cache_resource->num_of_dest && - resource->ft_type == cache_resource->ft_type && - !memcmp((void *)cache_resource->sample_act, - (void *)resource->sample_act, - (resource->num_of_dest * - sizeof(struct mlx5_flow_sub_actions_list)))) { - DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, - __ATOMIC_RELAXED)); - __atomic_fetch_add(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED); - dev_flow->handle->dvh.rix_dest_array = idx; - dev_flow->dv.dest_array_res = cache_resource; - /* - * Existing smaple action should release the prepared - * sub-actions reference counter. - */ - for (idx = 0; idx < resource->num_of_dest; idx++) - flow_dv_sample_sub_actions_release(dev, - &resource->sample_idx[idx]); - return 0; - } - } /* Register new destination array resource. */ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &dev_flow->handle->dvh.rix_dest_array); - if (!cache_resource) - return rte_flow_error_set(error, ENOMEM, + &res_idx); + if (!cache_resource) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); + return NULL; + } *cache_resource = *resource; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; @@ -8890,18 +8900,10 @@ struct mlx5_hlist_entry * "cannot create destination array action"); goto error; } - __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); - ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &sh->dest_array_list, - dev_flow->handle->dvh.rix_dest_array, cache_resource, - next); - dev_flow->dv.dest_array_res = cache_resource; - DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + cache_resource->idx = res_idx; for (idx = 0; idx < resource->num_of_dest; idx++) mlx5_free(dest_attr[idx]); - return 0; + return &cache_resource->entry; error: for (idx = 0; idx < resource->num_of_dest; idx++) { struct mlx5_flow_sub_actions_idx *act_res = @@ -8922,10 +8924,47 @@ struct mlx5_hlist_entry * mlx5_free(dest_attr[idx]); } - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], - dev_flow->handle->dvh.rix_dest_array); - dev_flow->handle->dvh.rix_dest_array = 0; - return -rte_errno; + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx); + return NULL; +} + +/** + * Find existing destination array resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] resource + * Pointer to destination array resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_dest_array_resource_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_dest_array_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_dest_array_resource *cache_resource; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .dev = dev, + .error = error, + .data = resource, + }; + + entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx); + if (!entry) + return -rte_errno; + cache_resource = container_of(entry, typeof(*cache_resource), entry); + dev_flow->handle->dvh.rix_dest_array = cache_resource->idx; + dev_flow->dv.dest_array_res = cache_resource; + return 0; } /** @@ -9232,8 +9271,8 @@ struct mlx5_hlist_entry * NULL, "can't create sample " "action"); } else { - if (flow_dv_sample_resource_register(dev, res, dev_flow, - sample_actions, error)) + res->sub_actions = sample_actions; + if (flow_dv_sample_resource_register(dev, res, dev_flow, error)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -10686,6 +10725,34 @@ struct mlx5_hlist_entry * handle->rix_fate = 0; } +void +flow_dv_sample_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_sample_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + + if (cache_resource->verbs_action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { + if (cache_resource->default_miss) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->default_miss)); + } + if (cache_resource->normal_path_tbl) + flow_dv_tbl_resource_release(MLX5_SH(dev), + cache_resource->normal_path_tbl); + flow_dv_sample_sub_actions_release(dev, + &cache_resource->sample_idx); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], + cache_resource->idx); + DRV_LOG(DEBUG, "sample resource %p: removed", + (void *)cache_resource); +} + /** * Release an sample resource. * @@ -10702,41 +10769,38 @@ struct mlx5_hlist_entry * struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - uint32_t idx = handle->dvh.rix_sample; struct mlx5_flow_dv_sample_resource *cache_resource; cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE], - idx); + handle->dvh.rix_sample); if (!cache_resource) return 0; MLX5_ASSERT(cache_resource->verbs_action); - DRV_LOG(DEBUG, "sample resource %p: refcnt %d--", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - if (__atomic_sub_fetch(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED) == 0) { - if (cache_resource->verbs_action) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->verbs_action)); - if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) { - if (cache_resource->default_miss) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->default_miss)); - } - if (cache_resource->normal_path_tbl) - flow_dv_tbl_resource_release(MLX5_SH(dev), - cache_resource->normal_path_tbl); + return mlx5_cache_unregister(&priv->sh->sample_action_list, + &cache_resource->entry); +} + +void +flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_dest_array_resource *cache_resource = + container_of(entry, typeof(*cache_resource), entry); + uint32_t i = 0; + + MLX5_ASSERT(cache_resource->action); + if (cache_resource->action) + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + for (; i < cache_resource->num_of_dest; i++) flow_dv_sample_sub_actions_release(dev, - &cache_resource->sample_idx); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE], - &priv->sh->sample_action_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx); - DRV_LOG(DEBUG, "sample resource %p: removed", - (void *)cache_resource); - return 0; - } - return 1; + &cache_resource->sample_idx[i]); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + cache_resource->idx); + DRV_LOG(DEBUG, "destination array resource %p: removed", + (void *)cache_resource); } /** @@ -10752,38 +10816,18 @@ struct mlx5_hlist_entry * */ static int flow_dv_dest_array_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow_handle *handle) + struct mlx5_flow_handle *handle) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_dv_dest_array_resource *cache_resource; - uint32_t idx = handle->dvh.rix_dest_array; - uint32_t i = 0; + struct mlx5_flow_dv_dest_array_resource *cache; - cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], - idx); - if (!cache_resource) - return 0; - MLX5_ASSERT(cache_resource->action); - DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--", - (void *)cache_resource, - __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); - if (__atomic_sub_fetch(&cache_resource->refcnt, 1, - __ATOMIC_RELAXED) == 0) { - if (cache_resource->action) - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); - for (; i < cache_resource->num_of_dest; i++) - flow_dv_sample_sub_actions_release(dev, - &cache_resource->sample_idx[i]); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], - &priv->sh->dest_array_list, idx, - cache_resource, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx); - DRV_LOG(DEBUG, "destination array resource %p: removed", - (void *)cache_resource); + cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], + handle->dvh.rix_dest_array); + if (!cache) return 0; - } - return 1; + MLX5_ASSERT(cache->action); + return mlx5_cache_unregister(&priv->sh->dest_array_list, + &cache->entry); } /** From patchwork Tue Oct 27 12:27:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82319 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 953F5A04B5; Tue, 27 Oct 2020 13:39:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6C5F9BE6B; Tue, 27 Oct 2020 13:29:36 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id DE751354D for ; Tue, 27 Oct 2020 13:28:36 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:32 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7S024637; Tue, 27 Oct 2020 14:28:30 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:25 +0800 Message-Id: <1603801650-442376-31-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 30/34] net/mlx5: make tunnel offloading table thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" To support multi-thread flow insertion, this patch updates tunnel offloading hash table to use thread safe hash list. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 92 ++++++++++++++++++++++++++--------------- drivers/net/mlx5/mlx5_flow_dv.c | 11 +---- 2 files changed, 61 insertions(+), 42 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f7f4faa..5483f75 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -7390,13 +7390,56 @@ struct mlx5_meter_domains_infos * container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; } +static void +mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tunnel_flow_tbl_to_id(tte->flow_table)); + mlx5_free(tte); +} + +static struct mlx5_hlist_entry * +mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, + uint64_t key __rte_unused, + void *ctx __rte_unused) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte; + + tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, + sizeof(*tte), 0, + SOCKET_ID_ANY); + if (!tte) + goto err; + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { + goto err; + } + tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); + return &tte->hash; +err: + if (tte) + mlx5_free(tte); + return NULL; +} + static uint32_t tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, struct rte_flow_error *error) { - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hlist_entry *he; struct tunnel_tbl_entry *tte; union tunnel_tbl_key key = { @@ -7407,40 +7450,17 @@ struct mlx5_meter_domains_infos * struct mlx5_hlist *group_hash; group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_lookup(group_hash, key.val, NULL); - if (!he) { - tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, - sizeof(*tte), 0, - SOCKET_ID_ANY); - if (!tte) - goto err; - tte->hash.key = key.val; - mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - &tte->flow_table); - if (tte->flow_table >= MLX5_MAX_TABLES) { - DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", - tte->flow_table); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tte->flow_table); - goto err; - } else if (!tte->flow_table) { - goto err; - } - tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); - mlx5_hlist_insert(group_hash, &tte->hash); - } else { - tte = container_of(he, typeof(*tte), hash); - } + he = mlx5_hlist_register(group_hash, key.val, NULL); + if (!he) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "tunnel group index not supported"); + tte = container_of(he, typeof(*tte), hash); *table = tte->flow_table; DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", dev->data->port_id, key.tunnel_id, group, *table); return 0; - -err: - if (tte) - mlx5_free(tte); - return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, "tunnel group index not supported"); } static int @@ -7963,13 +7983,16 @@ struct mlx5_meter_domains_infos * return NULL; } tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, - NULL, NULL, NULL); + mlx5_flow_tunnel_grp2tbl_create_cb, + NULL, + mlx5_flow_tunnel_grp2tbl_remove_cb); if (!tunnel->groups) { mlx5_ipool_free(priv->sh->ipool [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); mlx5_free(tunnel); return NULL; } + tunnel->groups->ctx = priv->sh; /* initiate new PMD tunnel */ memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel)); tunnel->tunnel_id = id; @@ -8043,11 +8066,14 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) return -ENOMEM; LIST_INIT(&thub->tunnels); thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, - 0, NULL, NULL, NULL); + 0, mlx5_flow_tunnel_grp2tbl_create_cb, + NULL, + mlx5_flow_tunnel_grp2tbl_remove_cb); if (!thub->groups) { err = -rte_errno; goto err; } + thub->groups->ctx = sh; sh->tunnel_hub = thub; return 0; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 9f7ac2e..184a675 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8099,15 +8099,8 @@ struct mlx5_flow_tbl_resource * tbl_data->tunnel->groups : thub->groups; he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL); - if (he) { - struct tunnel_tbl_entry *tte; - tte = container_of(he, typeof(*tte), hash); - MLX5_ASSERT(tte->flow_table == table_id); - mlx5_hlist_remove(tunnel_grp_hash, he); - mlx5_free(tte); - } - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], - tunnel_flow_tbl_to_id(table_id)); + if (he) + mlx5_hlist_unregister(tunnel_grp_hash, he); DRV_LOG(DEBUG, "Table_id %#x tunnel %u group %u released.", table_id, From patchwork Tue Oct 27 12:27:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82317 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 49587A04B5; Tue, 27 Oct 2020 13:39:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E0839BE55; Tue, 27 Oct 2020 13:29:33 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id CD4742BE2 for ; Tue, 27 Oct 2020 13:28:36 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:33 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7T024637; Tue, 27 Oct 2020 14:28:32 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:26 +0800 Message-Id: <1603801650-442376-32-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 31/34] net/mlx5: remove unused hash list operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In previous commits the hash list objects have been converted to new thread safe hash list. The legacy hash list code can be removed now. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_utils.c | 38 ------------------------- drivers/net/mlx5/mlx5_utils.h | 66 ------------------------------------------- 2 files changed, 104 deletions(-) diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index df15f63..13590dd 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -170,44 +170,6 @@ struct mlx5_hlist_entry* return entry; } -struct mlx5_hlist_entry * -mlx5_hlist_lookup_ex(struct mlx5_hlist *h, uint64_t key, - mlx5_hlist_match_callback_fn cb, void *ctx) -{ - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h && cb && ctx); - idx = rte_hash_crc_8byte(key, 0) & h->mask; - first = &h->heads[idx]; - LIST_FOREACH(node, first, next) { - if (!cb(node, ctx)) - return node; - } - return NULL; -} - -int -mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry, - mlx5_hlist_match_callback_fn cb, void *ctx) -{ - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h && entry && cb && ctx); - idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; - first = &h->heads[idx]; - /* No need to reuse the lookup function. */ - LIST_FOREACH(node, first, next) { - if (!cb(node, ctx)) - return -EEXIST; - } - LIST_INSERT_HEAD(first, entry, next); - return 0; -} - int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) { diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index b00789c..be6e5f6 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -21,12 +21,6 @@ #include "mlx5_defs.h" -#define mlx5_hlist_remove(h, e) \ - mlx5_hlist_unregister(h, e) - -#define mlx5_hlist_insert(h, e) \ - mlx5_hlist_register(h, 0, e) - /* Convert a bit number to the corresponding 64-bit mask */ #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) @@ -287,23 +281,6 @@ struct mlx5_hlist_entry { /** Structure for hash head. */ LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry); -/** Type of function that is used to handle the data before freeing. */ -typedef void (*mlx5_hlist_destroy_callback_fn)(void *p, void *ctx); - -/** - * Type of function for user defined matching. - * - * @param entry - * The entry in the list. - * @param ctx - * The pointer to new entry context. - * - * @return - * 0 if matching, -1 otherwise. - */ -typedef int (*mlx5_hlist_match_callback_fn)(struct mlx5_hlist_entry *entry, - void *ctx); - /** * Type of callback function for entry removal. * @@ -429,49 +406,6 @@ struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx); /** - * Extended routine to search an entry matching the context with - * user defined match function. - * - * @param h - * Pointer to the hast list table. - * @param key - * Key for the searching entry. - * @param cb - * Callback function to match the node with context. - * @param ctx - * Common context parameter used by callback function. - * - * @return - * Pointer of the hlist entry if found, NULL otherwise. - */ -struct mlx5_hlist_entry *mlx5_hlist_lookup_ex(struct mlx5_hlist *h, - uint64_t key, - mlx5_hlist_match_callback_fn cb, - void *ctx); - -/** - * Extended routine to insert an entry to the list with key collisions. - * - * For the list have key collision, the extra user defined match function - * allows node with same key will be inserted. - * - * @param h - * Pointer to the hast list table. - * @param entry - * Entry to be inserted into the hash list table. - * @param cb - * Callback function to match the node with context. - * @param ctx - * Common context parameter used by callback function. - * - * @return - * - zero for success. - * - -EEXIST if the entry is already inserted. - */ -int mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry, - mlx5_hlist_match_callback_fn cb, void *ctx); - -/** * Insert an entry to the hash list table, the entry is only part of whole data * element and a 64B key is used for matching. User should construct the key or * give a calculated hash signature and guarantee there is no collision. From patchwork Tue Oct 27 12:27:27 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82318 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 88271A04B5; Tue, 27 Oct 2020 13:39:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2DF74BE5F; Tue, 27 Oct 2020 13:29:35 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id CE60E3253 for ; Tue, 27 Oct 2020 13:28:37 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:35 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7U024637; Tue, 27 Oct 2020 14:28:34 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:27 +0800 Message-Id: <1603801650-442376-33-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 32/34] net/mlx5: make tunnel hub list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit uses spinlock to protect the tunnel hub list in multiple thread. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 20 +++++++++++++++++--- drivers/net/mlx5/mlx5_flow.h | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 5483f75..87446f7 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -669,10 +669,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) + if (&tun->item == pmd_items) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_items != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -690,10 +694,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) + if (&tun->action == pmd_actions) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_actions != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -5871,8 +5879,12 @@ struct rte_flow * mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7931,7 +7943,6 @@ struct mlx5_meter_domains_infos * DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - LIST_REMOVE(tunnel, chain); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups); @@ -8020,6 +8031,7 @@ struct mlx5_meter_domains_infos * struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { if (!memcmp(app_tunnel, &tun->app_tunnel, sizeof(*app_tunnel))) { @@ -8037,6 +8049,7 @@ struct mlx5_meter_domains_infos * ret = -ENOMEM; } } + rte_spinlock_unlock(&thub->sl); if (tun) __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); @@ -8065,6 +8078,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); + rte_spinlock_init(&thub->sl); thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, NULL, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 2de8988..c15f5e7 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -946,6 +946,7 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { LIST_HEAD(, mlx5_flow_tunnel) tunnels; + rte_spinlock_t sl; /* Tunnel list spinlock. */ struct mlx5_hlist *groups; /** non tunnel groups */ }; From patchwork Tue Oct 27 12:27:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82320 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7A9BA04B5; Tue, 27 Oct 2020 13:40:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C8271BE81; Tue, 27 Oct 2020 13:29:37 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E479B2BE2 for ; Tue, 27 Oct 2020 13:28:41 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:37 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7V024637; Tue, 27 Oct 2020 14:28:35 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:28 +0800 Message-Id: <1603801650-442376-34-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 33/34] net/mlx5: make shared action list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit uses spinlock to protect the shared action list in multiple thread. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 1 + drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow_dv.c | 5 +++++ 3 files changed, 7 insertions(+) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 5856981..8612cab 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1534,6 +1534,7 @@ } priv->mreg_cp_tbl->ctx = eth_dev; } + rte_spinlock_init(&priv->shared_act_sl); mlx5_flow_counter_mode_config(eth_dev); return eth_dev; error: diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a830945..fa49d7c 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -917,6 +917,7 @@ struct mlx5_priv { uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */ struct mlx5_mp_id mp_id; /* ID of a multi-process process */ LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */ + rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */ LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions; /* shared actions */ }; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 184a675..2d4ef11 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -11187,7 +11187,9 @@ struct mlx5_cache_entry * if (shared_action) { __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED); + rte_spinlock_lock(&priv->shared_act_sl); LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next); + rte_spinlock_unlock(&priv->shared_act_sl); } return shared_action; } @@ -11214,6 +11216,7 @@ struct mlx5_cache_entry * struct rte_flow_shared_action *action, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; int ret; switch (action->type) { @@ -11228,7 +11231,9 @@ struct mlx5_cache_entry * } if (ret) return ret; + rte_spinlock_lock(&priv->shared_act_sl); LIST_REMOVE(action, next); + rte_spinlock_unlock(&priv->shared_act_sl); rte_free(action); return 0; } From patchwork Tue Oct 27 12:27:29 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 82321 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 26302A04B5; Tue, 27 Oct 2020 13:40:42 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 206D5BE95; Tue, 27 Oct 2020 13:29:39 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E4A923253 for ; Tue, 27 Oct 2020 13:28:42 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:39 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7W024637; Tue, 27 Oct 2020 14:28:37 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, Xueming Li Date: Tue, 27 Oct 2020 20:27:29 +0800 Message-Id: <1603801650-442376-35-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 34/34] net/mlx5: remove shared context lock X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li To support multi-thread flow insertion, this patch removes shared data lock since all resources should support concurrent protection. Signed-off-by: Xueming Li Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- doc/guides/nics/mlx5.rst | 1 + doc/guides/rel_notes/release_20_11.rst | 1 + drivers/net/mlx5/linux/mlx5_os.c | 4 +- drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_flow_dv.c | 209 ++++----------------------------- 5 files changed, 27 insertions(+), 189 deletions(-) diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index e5e55fc..8c59cc6 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -96,6 +96,7 @@ Features - Per packet no-inline hint flag to disable packet data copying into Tx descriptors. - Hardware LRO. - Hairpin. +- Multiple-thread flow insertion. Limitations ----------- diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index 1cd7f1f..d73ecd0 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -358,6 +358,7 @@ New Features * Added support for QinQ packets matching. * Added support for the new vlan fields ``has_vlan`` in the eth item and ``has_more_vlan`` in the vlan item. + * Added support for PMD level multiple-thread flow insertion. * **Updated vhost sample application.** diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 8612cab..d4f2194 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -321,7 +321,6 @@ err = errno; goto error; } - pthread_mutex_init(&sh->dv_mutex, NULL); sh->tx_domain = domain; #ifdef HAVE_MLX5DV_DR_ESWITCH if (priv->config.dv_esw_en) { @@ -435,7 +434,6 @@ mlx5_glue->destroy_flow_action(sh->pop_vlan_action); sh->pop_vlan_action = NULL; } - pthread_mutex_destroy(&sh->dv_mutex); #endif /* HAVE_MLX5DV_DR */ if (sh->default_miss_action) mlx5_glue->destroy_flow_action @@ -1536,6 +1534,8 @@ } rte_spinlock_init(&priv->shared_act_sl); mlx5_flow_counter_mode_config(eth_dev); + if (priv->config.dv_flow_en) + eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; return eth_dev; error: if (priv) { diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index fa49d7c..c434fea 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -642,7 +642,6 @@ struct mlx5_dev_ctx_shared { /* Packet pacing related structure. */ struct mlx5_dev_txpp txpp; /* Shared DV/DR flow data section. */ - pthread_mutex_t dv_mutex; /* DV context mutex. */ uint32_t dv_meta_mask; /* flow META metadata supported mask. */ uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 2d4ef11..e15d903 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -275,45 +275,6 @@ struct field_modify_info modify_tcp[] = { } } -/** - * Acquire the synchronizing object to protect multithreaded access - * to shared dv context. Lock occurs only if context is actually - * shared, i.e. we have multiport IB device and representors are - * created. - * - * @param[in] dev - * Pointer to the rte_eth_dev structure. - */ -static void -flow_dv_shared_lock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->refcnt > 1) { - int ret; - - ret = pthread_mutex_lock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - -static void -flow_dv_shared_unlock(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - - if (sh->refcnt > 1) { - int ret; - - ret = pthread_mutex_unlock(&sh->dv_mutex); - MLX5_ASSERT(!ret); - (void)ret; - } -} - /* Update VLAN's VID/PCP based on input rte_flow_action. * * @param[in] action @@ -5074,7 +5035,7 @@ struct mlx5_hlist_entry * * Index to the counter handler. */ static void -flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) +flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; @@ -8621,7 +8582,7 @@ struct mlx5_hlist_entry * act_res->rix_tag = 0; } if (act_res->cnt) { - flow_dv_counter_release(dev, act_res->cnt); + flow_dv_counter_free(dev, act_res->cnt); act_res->cnt = 0; } } @@ -9295,12 +9256,12 @@ struct mlx5_cache_entry * * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -__flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; @@ -10376,8 +10337,8 @@ struct mlx5_cache_entry * * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) +flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { struct mlx5_flow_dv_workspace *dv; struct mlx5_flow_handle *dh; @@ -10833,7 +10794,7 @@ struct mlx5_cache_entry * * Pointer to flow structure. */ static void -__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow_handle *dh; uint32_t handle_idx; @@ -10869,7 +10830,7 @@ struct mlx5_cache_entry * * Pointer to flow structure. */ static void -__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { struct rte_flow_shared_action *shared; struct mlx5_flow_handle *dev_handle; @@ -10877,12 +10838,12 @@ struct mlx5_cache_entry * if (!flow) return; - __flow_dv_remove(dev, flow); + flow_dv_remove(dev, flow); shared = mlx5_flow_get_shared_rss(flow); if (shared) __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED); if (flow->counter) { - flow_dv_counter_release(dev, flow->counter); + flow_dv_counter_free(dev, flow->counter); flow->counter = 0; } if (flow->meter) { @@ -11165,10 +11126,10 @@ struct mlx5_cache_entry * * rte_errno is set. */ static struct rte_flow_shared_action * -__flow_dv_action_create(struct rte_eth_dev *dev, - const struct rte_flow_shared_action_conf *conf, - const struct rte_flow_action *action, - struct rte_flow_error *error) +flow_dv_action_create(struct rte_eth_dev *dev, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *error) { struct rte_flow_shared_action *shared_action = NULL; struct mlx5_priv *priv = dev->data->dev_private; @@ -11212,9 +11173,9 @@ struct mlx5_cache_entry * * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_destroy(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - struct rte_flow_error *error) +flow_dv_action_destroy(struct rte_eth_dev *dev, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; int ret; @@ -11334,7 +11295,7 @@ struct mlx5_cache_entry * * 0 on success, otherwise negative errno value. */ static int -__flow_dv_action_update(struct rte_eth_dev *dev, +flow_dv_action_update(struct rte_eth_dev *dev, struct rte_flow_shared_action *action, const void *action_conf, struct rte_flow_error *error) @@ -12098,85 +12059,12 @@ struct mlx5_cache_entry * } /* - * Mutex-protected thunk to lock-free __flow_dv_translate(). - */ -static int -flow_dv_translate(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_apply(). - */ -static int -flow_dv_apply(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_apply(dev, flow, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_remove(). - */ -static void -flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - flow_dv_shared_lock(dev); - __flow_dv_remove(dev, flow); - flow_dv_shared_unlock(dev); -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_destroy(). - */ -static void -flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) -{ - flow_dv_shared_lock(dev); - __flow_dv_destroy(dev, flow); - flow_dv_shared_unlock(dev); -} - -/* * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). */ static uint32_t flow_dv_counter_allocate(struct rte_eth_dev *dev) { - uint32_t cnt; - - flow_dv_shared_lock(dev); - cnt = flow_dv_counter_alloc(dev, 0); - flow_dv_shared_unlock(dev); - return cnt; -} - -/* - * Mutex-protected thunk to lock-free flow_dv_counter_release(). - */ -static void -flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) -{ - flow_dv_shared_lock(dev); - flow_dv_counter_release(dev, cnt); - flow_dv_shared_unlock(dev); + return flow_dv_counter_alloc(dev, 0); } /** @@ -12214,57 +12102,6 @@ struct mlx5_cache_entry * } } -/* - * Mutex-protected thunk to lock-free __flow_dv_action_create(). - */ -static struct rte_flow_shared_action * -flow_dv_action_create(struct rte_eth_dev *dev, - const struct rte_flow_shared_action_conf *conf, - const struct rte_flow_action *action, - struct rte_flow_error *error) -{ - struct rte_flow_shared_action *shared_action = NULL; - - flow_dv_shared_lock(dev); - shared_action = __flow_dv_action_create(dev, conf, action, error); - flow_dv_shared_unlock(dev); - return shared_action; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_action_destroy(). - */ -static int -flow_dv_action_destroy(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_action_destroy(dev, action, error); - flow_dv_shared_unlock(dev); - return ret; -} - -/* - * Mutex-protected thunk to lock-free __flow_dv_action_update(). - */ -static int -flow_dv_action_update(struct rte_eth_dev *dev, - struct rte_flow_shared_action *action, - const void *action_conf, - struct rte_flow_error *error) -{ - int ret; - - flow_dv_shared_lock(dev); - ret = __flow_dv_action_update(dev, action, action_conf, - error); - flow_dv_shared_unlock(dev); - return ret; -} - const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .validate = flow_dv_validate, .prepare = flow_dv_prepare,