From patchwork Tue Oct 6 11:48:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 79754 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5B42DA04BB; Tue, 6 Oct 2020 13:49:23 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 86F6C1C01; Tue, 6 Oct 2020 13:49:21 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id D31B51023 for ; Tue, 6 Oct 2020 13:49:19 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 6 Oct 2020 14:49:17 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 096BnC0M028553; Tue, 6 Oct 2020 14:49:16 +0300 From: Suanming Mou To: viacheslavo@nvidia.com, matan@nvidia.com Cc: rasland@nvidia.com, dev@dpdk.org, Xueming Li Date: Tue, 6 Oct 2020 19:48:45 +0800 Message-Id: <1601984948-313027-3-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH 02/25] net/mlx5: use thread specific flow context X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xueming Li As part of multi-thread flow support, this patch moves flow intermediate data to thread specific, makes them a flow context. The context is allocated per thread, destroyed along with thread life-cycle. Signed-off-by: Xueming Li --- drivers/net/mlx5/linux/mlx5_os.c | 5 -- drivers/net/mlx5/mlx5.c | 2 - drivers/net/mlx5/mlx5.h | 6 -- drivers/net/mlx5/mlx5_flow.c | 134 +++++++++++++++++++++++++------------ drivers/net/mlx5/mlx5_flow.h | 15 ++++- drivers/net/mlx5/mlx5_flow_dv.c | 26 ++++--- drivers/net/mlx5/mlx5_flow_verbs.c | 24 ++++--- 7 files changed, 133 insertions(+), 79 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 188a6d4..4276964 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1330,11 +1330,6 @@ err = ENOTSUP; goto error; } - /* - * Allocate the buffer for flow creating, just once. - * The allocation must be done before any flow creating. - */ - mlx5_flow_alloc_intermediate(eth_dev); /* Query availability of metadata reg_c's. */ err = mlx5_flow_discover_mreg_c(eth_dev); if (err < 0) { diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index c9fc085..16719e6 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1375,8 +1375,6 @@ struct mlx5_dev_ctx_shared * */ mlx5_flow_list_flush(dev, &priv->flows, true); mlx5_flow_meter_flush(dev, NULL); - /* Free the intermediate buffers for flow creation. */ - mlx5_flow_free_intermediate(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index bd91e16..0080ac8 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -793,10 +793,6 @@ struct mlx5_priv { struct mlx5_drop drop_queue; /* Flow drop queues. */ uint32_t flows; /* RTE Flow rules. */ uint32_t ctrl_flows; /* Control flow rules. */ - void *inter_flows; /* Intermediate resources for flow creation. */ - void *rss_desc; /* Intermediate rss description resources. */ - int flow_idx; /* Intermediate device flow index. */ - int flow_nested_idx; /* Intermediate device flow index, nested. */ struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ @@ -1020,8 +1016,6 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list); int mlx5_flow_start_default(struct rte_eth_dev *dev); void mlx5_flow_stop_default(struct rte_eth_dev *dev); -void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev); -void mlx5_flow_free_intermediate(struct rte_eth_dev *dev); int mlx5_flow_verify(struct rte_eth_dev *dev); int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue); int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index ffa7646..eeee546 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -306,6 +306,13 @@ struct mlx5_flow_tunnel_info { }, }; +/* Key of thread specific flow workspace data. */ +static pthread_key_t key_workspace; + +/* Thread specific flow workspace data once initialization data. */ +static pthread_once_t key_workspace_init; + + /** * Translate tag ID to register. * @@ -4348,16 +4355,18 @@ struct mlx5_flow_tunnel_info { uint8_t buffer[2048]; } items_tx; struct rte_flow_expand_rss *buf = &expand_buffer.buf; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc)[!!priv->flow_idx]; + struct mlx5_flow_rss_desc *rss_desc; const struct rte_flow_action *p_actions_rx = actions; uint32_t i; uint32_t idx = 0; int hairpin_flow; uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); int ret; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_idx]; hairpin_flow = flow_check_hairpin_split(dev, attr, actions); ret = flow_drv_validate(dev, attr, items, p_actions_rx, external, hairpin_flow, error); @@ -4383,9 +4392,25 @@ struct mlx5_flow_tunnel_info { flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - memset(rss_desc, 0, sizeof(*rss_desc)); + memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue)); rss = flow_get_rss_action(p_actions_rx); if (rss) { + /* Check if need more memory for the queue. */ + if (rss->queue_num > wks->rssq_num[!!wks->flow_idx]) { + /* Default memory is from workspace. No need to free. */ + if (wks->rssq_num[!!wks->flow_idx] == + MLX5_RSSQ_DEFAULT_NUM) + rss_desc->queue = NULL; + rss_desc->queue = mlx5_realloc(rss_desc->queue, + MLX5_MEM_ZERO, + sizeof(rss_desc->queue[0]) * rss->queue_num * 2, + 0, SOCKET_ID_ANY); + if (!rss_desc->queue) { + rte_errno = EINVAL; + return 0; + } + wks->rssq_num[!!wks->flow_idx] = rss->queue_num * 2; + } /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. @@ -4414,9 +4439,9 @@ struct mlx5_flow_tunnel_info { * need to be translated before another calling. * No need to use ping-pong buffer to save memory here. */ - if (priv->flow_idx) { - MLX5_ASSERT(!priv->flow_nested_idx); - priv->flow_nested_idx = priv->flow_idx; + if (wks->flow_idx) { + MLX5_ASSERT(!wks->flow_nested_idx); + wks->flow_nested_idx = wks->flow_idx; } for (i = 0; i < buf->entries; ++i) { /* @@ -4481,9 +4506,9 @@ struct mlx5_flow_tunnel_info { flow, next); flow_rxq_flags_set(dev, flow); /* Nested flow creation index recovery. */ - priv->flow_idx = priv->flow_nested_idx; - if (priv->flow_nested_idx) - priv->flow_nested_idx = 0; + wks->flow_idx = wks->flow_nested_idx; + if (wks->flow_nested_idx) + wks->flow_nested_idx = 0; return idx; error: MLX5_ASSERT(flow); @@ -4498,9 +4523,9 @@ struct mlx5_flow_tunnel_info { mlx5_flow_id_release(priv->sh->flow_id_pool, hairpin_id); rte_errno = ret; - priv->flow_idx = priv->flow_nested_idx; - if (priv->flow_nested_idx) - priv->flow_nested_idx = 0; + wks->flow_idx = wks->flow_nested_idx; + if (wks->flow_nested_idx) + wks->flow_nested_idx = 0; return 0; } @@ -4782,48 +4807,69 @@ struct rte_flow * } /** - * Allocate intermediate resources for flow creation. - * - * @param dev - * Pointer to Ethernet device. + * Release key of thread specific flow workspace data. */ -void -mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +static void +flow_release_workspace(void *data) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = data; - if (!priv->inter_flows) { - priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO, - MLX5_NUM_MAX_DEV_FLOWS * - sizeof(struct mlx5_flow) + - (sizeof(struct mlx5_flow_rss_desc) + - sizeof(uint16_t) * UINT16_MAX) * 2, 0, - SOCKET_ID_ANY); - if (!priv->inter_flows) { - DRV_LOG(ERR, "can't allocate intermediate memory."); - return; - } - } - priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) - [MLX5_NUM_MAX_DEV_FLOWS]; - /* Reset the index. */ - priv->flow_idx = 0; - priv->flow_nested_idx = 0; + if (!wks) + return; + if (wks->rssq_num[0] == MLX5_RSSQ_DEFAULT_NUM) + mlx5_free(wks->rss_desc[0].queue); + if (wks->rssq_num[1] == MLX5_RSSQ_DEFAULT_NUM) + mlx5_free(wks->rss_desc[1].queue); + mlx5_free(wks); + return; } /** - * Free intermediate resources for flows. + * Initialize key of thread specific flow workspace data. + */ +static void +flow_alloc_workspace(void) +{ + if (pthread_key_create(&key_workspace, flow_release_workspace)) + DRV_LOG(ERR, "can't create flow workspace data thread key."); +} + +/** + * Get thread specific flow workspace. * - * @param dev - * Pointer to Ethernet device. + * @return pointer to thread specific flowworkspace data, NULL on error. */ -void -mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +struct mlx5_flow_workspace* +mlx5_flow_get_thread_workspace(void) { - struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *data; + + if (pthread_once(&key_workspace_init, flow_alloc_workspace)) { + DRV_LOG(ERR, "failed to init flow workspace data thread key."); + return NULL; + } - mlx5_free(priv->inter_flows); - priv->inter_flows = NULL; + data = pthread_getspecific(key_workspace); + if (!data) { + data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*data) + + sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM * 2, + 0, SOCKET_ID_ANY); + if (!data) { + DRV_LOG(ERR, "failed to allocate flow workspace " + "memory."); + return NULL; + } + data->rss_desc[0].queue = (uint16_t *)(data + 1); + data->rss_desc[1].queue = data->rss_desc[0].queue + + MLX5_RSSQ_DEFAULT_NUM; + data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM; + data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM; + if (pthread_setspecific(key_workspace, data)) { + DRV_LOG(ERR, "failed to set flow workspace to thread."); + return NULL; + } + } + return data; } /** diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 279daf2..2685481 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -73,6 +73,9 @@ enum mlx5_feature_name { MLX5_MTR_SFX, }; +/* Default queue number. */ +#define MLX5_RSSQ_DEFAULT_NUM 16 + /* Pattern outer Layer bits. */ #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) @@ -531,7 +534,7 @@ struct mlx5_flow_rss_desc { uint32_t queue_num; /**< Number of entries in @p queue. */ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t queue[]; /**< Destination queues to redirect traffic to. */ + uint16_t *queue; /**< Destination queues. */ }; /* PMD flow priority for tunnel */ @@ -856,6 +859,15 @@ struct rte_flow { uint16_t meter; /**< Holds flow meter id. */ } __rte_packed; +/* Thread specific flow workspace intermediate data. */ +struct mlx5_flow_workspace { + struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; + struct mlx5_flow_rss_desc rss_desc[2]; + uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */ + int flow_idx; /* Intermediate device flow index. */ + int flow_nested_idx; /* Intermediate device flow index, nested. */ +}; + typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], @@ -930,6 +942,7 @@ struct mlx5_flow_driver_ops { /* mlx5_flow.c */ +struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 79fdf34..ede7bf8 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -5939,9 +5939,11 @@ struct field_modify_info modify_tcp[] = { struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -5955,8 +5957,8 @@ struct field_modify_info modify_tcp[] = { "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* @@ -8181,9 +8183,8 @@ struct field_modify_info modify_tcp[] = { struct mlx5_dev_config *dev_conf = &priv->config; struct rte_flow *flow = dev_flow->flow; struct mlx5_flow_handle *handle = dev_flow->handle; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; uint64_t item_flags = 0; uint64_t last_item = 0; uint64_t action_flags = 0; @@ -8216,6 +8217,8 @@ struct field_modify_info modify_tcp[] = { uint32_t table; int ret = 0; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : MLX5DV_FLOW_TABLE_TYPE_NIC_RX; ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, @@ -8955,9 +8958,11 @@ struct field_modify_info modify_tcp[] = { int n; int err; int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + dev_flow = &wks->flows[idx]; dv = &dev_flow->dv; dh = dev_flow->handle; dv_h = &dh->dvh; @@ -8988,9 +8993,8 @@ struct field_modify_info modify_tcp[] = { } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { struct mlx5_hrxq *hrxq; uint32_t hrxq_idx; - struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc + [!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 62c18b8..b649960 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1632,7 +1632,9 @@ struct mlx5_flow *dev_flow; struct mlx5_flow_handle *dev_handle; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); size += flow_verbs_get_actions_size(actions); size += flow_verbs_get_items_size(items); if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { @@ -1642,7 +1644,7 @@ return NULL; } /* In case of corrupting the memory. */ - if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not free temporary device flow"); @@ -1656,8 +1658,8 @@ "not enough memory to create flow handle"); return NULL; } - /* No multi-thread supporting. */ - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; /* Memcpy is used, only size needs to be cleared to 0. */ @@ -1701,10 +1703,11 @@ uint64_t priority = attr->priority; uint32_t subpriority = 0; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) - priv->rss_desc) - [!!priv->flow_nested_idx]; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc[!!wks->flow_nested_idx]; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -1960,9 +1963,11 @@ uint32_t dev_handles; int err; int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); - for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { - dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) { + dev_flow = &wks->flows[idx]; handle = dev_flow->handle; if (handle->fate_action == MLX5_FLOW_FATE_DROP) { hrxq = mlx5_drop_action_create(dev); @@ -1976,8 +1981,7 @@ } else { uint32_t hrxq_idx; struct mlx5_flow_rss_desc *rss_desc = - &((struct mlx5_flow_rss_desc *)priv->rss_desc) - [!!priv->flow_nested_idx]; + &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,