From patchwork Tue Feb 4 11:33:17 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65528 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 71D3BA0532; Tue, 4 Feb 2020 12:33:42 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F2E041C12F; Tue, 4 Feb 2020 12:33:38 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id EE28E1C12B for ; Tue, 4 Feb 2020 12:33:36 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:17 +0200 Message-Id: <1580816002-159035-2-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 1/6] net/mlx5: introduce non-cached flows tailq list X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" A new tailq head is introduced in the mlx5 private structure for each device. Then all the flows created by user are moved into this tailq list. This is the first stage to separate the flows with DV mode from the flows with Verbs mode. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5.c | 3 ++- drivers/net/mlx5/mlx5.h | 3 ++- drivers/net/mlx5/mlx5_flow.c | 16 ++++++++-------- drivers/net/mlx5/mlx5_trigger.c | 6 +++--- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 7a79722..6a2d662 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -2686,7 +2686,8 @@ struct mlx5_flow_id_pool * mlx5_ifindex(eth_dev), eth_dev->data->mac_addrs, MLX5_MAX_MAC_ADDRESSES); - TAILQ_INIT(&priv->flows); + TAILQ_INIT(&priv->cached_flows); + TAILQ_INIT(&priv->noncached_flows); TAILQ_INIT(&priv->ctrl_flows); TAILQ_INIT(&priv->flow_meters); TAILQ_INIT(&priv->flow_meter_profiles); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index d7c519b..65bdb3b 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -515,7 +515,8 @@ struct mlx5_priv { unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int reta_idx_n; /* RETA index size. */ struct mlx5_drop drop_queue; /* Flow drop queues. */ - struct mlx5_flows flows; /* RTE Flow rules. */ + struct mlx5_flows cached_flows; /* cached RTE Flow rules. */ + struct mlx5_flows noncached_flows; /* non-cached RTE Flow rules. */ struct mlx5_flows ctrl_flows; /* Control flow rules. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 144e07c..d7fb094 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -4357,7 +4357,7 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; - return flow_list_create(dev, &priv->flows, + return flow_list_create(dev, &priv->noncached_flows, attr, items, actions, true, error); } @@ -4490,7 +4490,7 @@ struct rte_flow * struct rte_flow *flow; int ret = 0; - TAILQ_FOREACH(flow, &priv->flows, next) { + TAILQ_FOREACH(flow, &priv->noncached_flows, next) { DRV_LOG(DEBUG, "port %u flow %p still referenced", dev->data->port_id, (void *)flow); ++ret; @@ -4674,7 +4674,7 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; - flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->noncached_flows, flow); return 0; } @@ -4690,7 +4690,7 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->noncached_flows); return 0; } @@ -5004,7 +5004,7 @@ struct rte_flow * struct rte_flow *flow = NULL; MLX5_ASSERT(fdir_flow); - TAILQ_FOREACH(flow, &priv->flows, next) { + TAILQ_FOREACH(flow, &priv->noncached_flows, next) { if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) { DRV_LOG(DEBUG, "port %u found FDIR flow %p", dev->data->port_id, (void *)flow); @@ -5047,7 +5047,7 @@ struct rte_flow * rte_errno = EEXIST; goto error; } - flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr, fdir_flow->items, fdir_flow->actions, true, NULL); if (!flow) @@ -5092,7 +5092,7 @@ struct rte_flow * rte_errno = ENOENT; return -rte_errno; } - flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->noncached_flows, flow); DRV_LOG(DEBUG, "port %u deleted FDIR flow %p", dev->data->port_id, (void *)flow); return 0; @@ -5132,7 +5132,7 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; - mlx5_flow_list_flush(dev, &priv->flows); + mlx5_flow_list_flush(dev, &priv->noncached_flows); } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index be47df5..0053847 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -320,7 +320,7 @@ dev->data->port_id); goto error; } - ret = mlx5_flow_start(dev, &priv->flows); + ret = mlx5_flow_start(dev, &priv->noncached_flows); if (ret) { DRV_LOG(DEBUG, "port %u failed to set flows", dev->data->port_id); @@ -337,7 +337,7 @@ ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; - mlx5_flow_stop(dev, &priv->flows); + mlx5_flow_stop(dev, &priv->noncached_flows); mlx5_traffic_disable(dev); mlx5_txq_stop(dev); mlx5_rxq_stop(dev); @@ -367,7 +367,7 @@ mlx5_mp_req_stop_rxtx(dev); usleep(1000 * priv->rxqs_n); DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); - mlx5_flow_stop(dev, &priv->flows); + mlx5_flow_stop(dev, &priv->noncached_flows); mlx5_traffic_disable(dev); mlx5_rx_intr_vec_disable(dev); mlx5_dev_interrupt_handler_uninstall(dev); From patchwork Tue Feb 4 11:33:18 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65529 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7A32AA0532; Tue, 4 Feb 2020 12:33:54 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E49FD1C197; Tue, 4 Feb 2020 12:33:40 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id A959B1C12B for ; Tue, 4 Feb 2020 12:33:38 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:18 +0200 Message-Id: <1580816002-159035-3-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 2/6] net/mlx5: change operations of non-cached flows X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When stopping a mlx5 device, the flows with non-cached mode will be flushed. So no operation will be done for these flows in the device closing stage. If the device restarts after stopped, no flow with non-cached mode will be reinserted. Operations of flows with cached mode remain the same. And when the flushing is called from user, all the flows will be flushed. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5.c | 1 + drivers/net/mlx5/mlx5.h | 2 ++ drivers/net/mlx5/mlx5_flow.c | 36 +++++++++++++++++++++++++++++++++--- drivers/net/mlx5/mlx5_trigger.c | 11 ++++++++--- 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 6a2d662..4c97df5 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1236,6 +1236,7 @@ struct mlx5_flow_id_pool * mlx5_dev_interrupt_handler_uninstall(dev); mlx5_dev_interrupt_handler_devx_uninstall(dev); mlx5_traffic_disable(dev); + /* Only cached flows will be flushed in this stage, if any. */ mlx5_flow_flush(dev, NULL); mlx5_flow_meter_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 65bdb3b..d749b29 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -715,6 +715,8 @@ int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_flush_noncached(struct rte_eth_dev *dev, + struct rte_flow_error *error); int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, const struct rte_flow_action *action, void *data, struct rte_flow_error *error); diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d7fb094..0560874 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -4453,11 +4453,14 @@ struct rte_flow * struct rte_flow_error error; int ret = 0; - /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + /* + * Make sure default copy action (reg_c[0] -> reg_b) is created. + * This should always be executed no matter the driver type. + */ ret = flow_mreg_add_default_copy_action(dev, &error); if (ret < 0) return -rte_errno; - /* Apply Flows created by application. */ + /* Apply Flows created by application, only for cached flows. */ TAILQ_FOREACH(flow, list, next) { ret = flow_mreg_start_copy_action(dev, flow); if (ret < 0) @@ -4674,7 +4677,15 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; - flow_list_destroy(dev, &priv->noncached_flows, flow); + /* + * Checking the flow type and then destroying the flows in both lists. + * Flow with DV type is non-cached (most cases) and flow with legacy + * verbs mode is still cached right now. + */ + if (flow->drv_type == MLX5_FLOW_TYPE_DV) + flow_list_destroy(dev, &priv->noncached_flows, flow); + else + flow_list_destroy(dev, &priv->cached_flows, flow); return 0; } @@ -4690,6 +4701,24 @@ struct rte_flow * { struct mlx5_priv *priv = dev->data->dev_private; + /* In most cases, only one tailq list will contain the flows. */ + mlx5_flow_list_flush(dev, &priv->noncached_flows); + mlx5_flow_list_flush(dev, &priv->cached_flows); + return 0; +} + +/** + * Destroy all non-cached flows. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +int +mlx5_flow_flush_noncached(struct rte_eth_dev *dev, + struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + mlx5_flow_list_flush(dev, &priv->noncached_flows); return 0; } @@ -5133,6 +5162,7 @@ struct rte_flow * struct mlx5_priv *priv = dev->data->dev_private; mlx5_flow_list_flush(dev, &priv->noncached_flows); + mlx5_flow_list_flush(dev, &priv->cached_flows); } /** diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 0053847..26f4863 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -320,7 +320,7 @@ dev->data->port_id); goto error; } - ret = mlx5_flow_start(dev, &priv->noncached_flows); + ret = mlx5_flow_start(dev, &priv->cached_flows); if (ret) { DRV_LOG(DEBUG, "port %u failed to set flows", dev->data->port_id); @@ -337,7 +337,7 @@ ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; - mlx5_flow_stop(dev, &priv->noncached_flows); + mlx5_flow_stop(dev, &priv->cached_flows); mlx5_traffic_disable(dev); mlx5_txq_stop(dev); mlx5_rxq_stop(dev); @@ -367,7 +367,12 @@ mlx5_mp_req_stop_rxtx(dev); usleep(1000 * priv->rxqs_n); DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); - mlx5_flow_stop(dev, &priv->noncached_flows); + mlx5_flow_stop(dev, &priv->cached_flows); + /* + * Flows flushing is still after deleting default copy action & clearing + * flags of all RX queues. + */ + mlx5_flow_flush_noncached(dev, NULL); mlx5_traffic_disable(dev); mlx5_rx_intr_vec_disable(dev); mlx5_dev_interrupt_handler_uninstall(dev); From patchwork Tue Feb 4 11:33:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65530 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 75B95A0532; Tue, 4 Feb 2020 12:34:04 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 58B931C1A8; Tue, 4 Feb 2020 12:33:42 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id 7A6401C139 for ; Tue, 4 Feb 2020 12:33:40 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:19 +0200 Message-Id: <1580816002-159035-4-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 3/6] net/mlx5: flow type check before creating X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When creating a flow, the driver mode needs to be checked in order to call the corresponding functions. Now the driver mode checking part is moved out of the flow creating function, then the flow could be added into the correct tailq list. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5_flow.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 0560874..8fb973b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2874,8 +2874,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], - const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error); + const struct rte_flow_action actions[], bool external, + enum mlx5_flow_drv_type type, struct rte_flow_error *error); static void flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, @@ -3015,7 +3015,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * by list traversing. */ mcp_res->flow = flow_list_create(dev, NULL, &attr, items, - actions, false, error); + actions, false, + flow_get_drv_type(dev, &attr), error); if (!mcp_res->flow) goto error; mcp_res->refcnt++; @@ -4119,6 +4120,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * Associated actions (list terminated by the END action). * @param[in] external * This flow rule is created by request external to PMD. + * @param[in] type + * Flow rule type, DV or VERBS. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -4129,8 +4132,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], - const struct rte_flow_action actions[], - bool external, struct rte_flow_error *error) + const struct rte_flow_action actions[], bool external, + enum mlx5_flow_drv_type type, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow *flow = NULL; @@ -4188,7 +4191,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, rte_errno = ENOMEM; goto error_before_flow; } - flow->drv_type = flow_get_drv_type(dev, attr); + flow->drv_type = type; if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && @@ -4339,7 +4342,8 @@ struct rte_flow * struct rte_flow_error error; return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern, - actions, false, &error); + actions, false, + flow_get_drv_type(dev, &attr), &error); } /** @@ -4356,9 +4360,13 @@ struct rte_flow * struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flows *flow_list; + enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); - return flow_list_create(dev, &priv->noncached_flows, - attr, items, actions, true, error); + flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows : + &priv->cached_flows; + return flow_list_create(dev, flow_list, attr, + items, actions, true, type, error); } /** @@ -4548,8 +4556,8 @@ struct rte_flow * actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; actions[0].conf = &jump; actions[1].type = RTE_FLOW_ACTION_TYPE_END; - flow = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, + false, flow_get_drv_type(dev, &attr), &error); if (!flow) { DRV_LOG(DEBUG, "Failed to create ctrl flow: rte_errno(%d)," @@ -4636,8 +4644,8 @@ struct rte_flow * } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = flow_list_create(dev, &priv->ctrl_flows, - &attr, items, actions, false, &error); + flow = flow_list_create(dev, &priv->ctrl_flows, &attr, items, actions, + false, flow_get_drv_type(dev, &attr), &error); if (!flow) return -rte_errno; return 0; @@ -5078,7 +5086,7 @@ struct rte_flow * } flow = flow_list_create(dev, &priv->noncached_flows, &fdir_flow->attr, fdir_flow->items, fdir_flow->actions, true, - NULL); + flow_get_drv_type(dev, &fdir_flow->attr), NULL); if (!flow) goto error; MLX5_ASSERT(!flow->fdir); @@ -5695,8 +5703,8 @@ struct mlx5_flow_counter * if (!config->dv_flow_en) break; /* Create internal flow, validation skips copy action. */ - flow = flow_list_create(dev, NULL, &attr, items, - actions, false, &error); + flow = flow_list_create(dev, NULL, &attr, items, actions, false, + flow_get_drv_type(dev, &attr), &error); if (!flow) continue; if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) From patchwork Tue Feb 4 11:33:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65531 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id F1CBAA0532; Tue, 4 Feb 2020 12:34:13 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B42BA1C1AE; Tue, 4 Feb 2020 12:33:43 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id EA5991C1A3 for ; Tue, 4 Feb 2020 12:33:41 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:20 +0200 Message-Id: <1580816002-159035-5-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 4/6] net/mlx5: introduce handle structure for DV flows X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Introduce a new structure "mlx5_flow_dv_handle" based on device flow structures "mlx5_flow" and "mlx5_flow_dv", and in the meanwhile, the "mlx5_flow" is kept for Verbs flow. Only the matchers and actions objects will be saved in order to free such resource when destroying a flow. The other information will be stored by using some intermediate global variables that can be reused for all flows when being created. Inbox OFED driver should also be taken into consideration. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5_flow.c | 184 +++++++++++++++++++----- drivers/net/mlx5/mlx5_flow.h | 40 +++++- drivers/net/mlx5/mlx5_flow_dv.c | 312 +++++++++++++++++++++------------------- 3 files changed, 351 insertions(+), 185 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 8fb973b..1121904 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -709,19 +709,42 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] dev_flow - * Pointer to device flow structure. + * @param[in] type + * Driver type of the RTE flow. + * @param[in] sub_flow + * Pointer to device flow or flow handle structure. */ static void -flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, + enum mlx5_flow_drv_type type __rte_unused, + void *sub_flow) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + struct rte_flow *flow; + int mark; + int tunnel; + uint64_t layers; unsigned int i; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (type == MLX5_FLOW_TYPE_DV) { + struct mlx5_flow_dv_handle *handle = sub_flow; + mark = !!(handle->action_flags & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + layers = handle->layers; + tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); + flow = handle->m_flow; + } else { +#endif + struct mlx5_flow *dev_flow = sub_flow; + mark = !!(dev_flow->actions & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + layers = dev_flow->layers; + tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + flow = dev_flow->flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + } +#endif for (i = 0; i != flow->rss.queue_num; ++i) { int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = @@ -747,8 +770,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & - dev_flow->layers) == + if ((tunnels_info[j].tunnel & layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; @@ -771,9 +793,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow *dev_flow; + enum mlx5_flow_drv_type type = flow->drv_type; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_set(dev, dev_flow); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_dv_handle *handle; + if (type == MLX5_FLOW_TYPE_DV) + SLIST_FOREACH(handle, &flow->handles, next) + flow_drv_rxq_flags_set(dev, type, (void *)handle); + else +#endif + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_set(dev, type, (void *)dev_flow); } /** @@ -782,20 +812,44 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * * @param dev * Pointer to Ethernet device. - * @param[in] dev_flow - * Pointer to the device flow. + * @param[in] type + * Driver type of the RTE flow. + * @param[in] sub_flow + * Pointer to device flow or flow handle structure. + */ static void -flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, + enum mlx5_flow_drv_type type __rte_unused, + void *sub_flow) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow *flow = dev_flow->flow; - const int mark = !!(dev_flow->actions & - (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); + struct rte_flow *flow; + int mark; + int tunnel; + uint64_t layers; unsigned int i; MLX5_ASSERT(dev->data->dev_started); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (type == MLX5_FLOW_TYPE_DV) { + struct mlx5_flow_dv_handle *handle = sub_flow; + mark = !!(handle->action_flags & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + layers = handle->layers; + tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); + flow = handle->m_flow; + } else { +#endif + struct mlx5_flow *dev_flow = sub_flow; + mark = !!(dev_flow->actions & + (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); + layers = dev_flow->layers; + tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); + flow = dev_flow->flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + } +#endif for (i = 0; i != flow->rss.queue_num; ++i) { int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = @@ -816,8 +870,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & - dev_flow->layers) == + if ((tunnels_info[j].tunnel & layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; @@ -841,9 +894,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow *dev_flow; + enum mlx5_flow_drv_type type = flow->drv_type; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - flow_drv_rxq_flags_trim(dev, dev_flow); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_dv_handle *handle; + if (type == MLX5_FLOW_TYPE_DV) + SLIST_FOREACH(handle, &flow->handles, next) + flow_drv_rxq_flags_trim(dev, type, (void *)handle); + else +#endif + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow); } /** @@ -2341,10 +2402,22 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, struct rte_flow *flow) { struct mlx5_flow *dev_flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_dv_handle *handle; + enum mlx5_flow_drv_type type = flow->drv_type; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) - if (dev_flow->qrss_id) - flow_qrss_free_id(dev, dev_flow->qrss_id); + if (type == MLX5_FLOW_TYPE_DV) { + SLIST_FOREACH(handle, &flow->handles, next) + if (handle->qrss_id) + flow_qrss_free_id(dev, handle->qrss_id); + } else { +#endif + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + if (dev_flow->qrss_id) + flow_qrss_free_id(dev, dev_flow->qrss_id); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + } +#endif } static int @@ -3434,10 +3507,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, dev_flow = flow_drv_prepare(flow, attr, items, actions, error); if (!dev_flow) return -rte_errno; - dev_flow->flow = flow; dev_flow->external = external; - /* Subflow object was created, we must include one in the list. */ - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); + dev_flow->flow = flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (flow->drv_type == MLX5_FLOW_TYPE_DV) { + SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next); + dev_flow->dv_handle->sidx = flow->sub_flows++; + dev_flow->dv_handle->m_flow = flow; + } else { +#endif + /* Subflow obj was created, we must include one in the list. */ + LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + } +#endif if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); @@ -3900,6 +3983,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * other flows in other threads). */ dev_flow->qrss_id = qrss_id; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (flow->drv_type == MLX5_FLOW_TYPE_DV) + dev_flow->dv_handle->qrss_id = qrss_id; +#endif qrss_id = 0; ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); @@ -4012,6 +4099,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, goto exit; } dev_flow->mtr_flow_id = mtr_tag_id; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (flow->drv_type == MLX5_FLOW_TYPE_DV) + dev_flow->dv_handle->mtr_flow_id = mtr_tag_id; +#endif /* Prepare the suffix flow match pattern. */ sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); @@ -4164,6 +4255,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint32_t hairpin_id = 0; struct rte_flow_attr attr_tx = { .priority = 0 }; + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); hairpin_flow = flow_check_hairpin_split(dev, attr, actions); if (hairpin_flow > 0) { if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { @@ -4192,10 +4284,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, goto error_before_flow; } flow->drv_type = type; + flow->sub_flows = 0; if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; - MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && - flow->drv_type < MLX5_FLOW_TYPE_MAX); flow->rss.queue = (void *)(flow + 1); if (rss) { /* @@ -4206,7 +4297,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; } - LIST_INIT(&flow->dev_flows); + if (flow->drv_type == MLX5_FLOW_TYPE_DV) + SLIST_INIT(&flow->handles); + else + LIST_INIT(&flow->dev_flows); if (rss && rss->types) { unsigned int graph_root; @@ -4243,9 +4337,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, actions_hairpin_tx.actions, error); if (!dev_flow) goto error; - dev_flow->flow = flow; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (flow->drv_type == MLX5_FLOW_TYPE_DV) { + SLIST_INSERT_HEAD(&flow->handles, + dev_flow->dv_handle, next); + dev_flow->dv_handle->sidx = flow->sub_flows++; + dev_flow->dv_handle->m_flow = flow; + } else { +#endif + dev_flow->flow = flow; + LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + } +#endif dev_flow->external = 0; - LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, actions_hairpin_tx.actions, error); @@ -4363,8 +4468,17 @@ struct rte_flow * struct mlx5_flows *flow_list; enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); - flow_list = (type == MLX5_FLOW_TYPE_DV) ? &priv->noncached_flows : - &priv->cached_flows; + if (type == MLX5_FLOW_TYPE_DV) { + if (unlikely(!dev->data->dev_started)) { + rte_errno = ENODEV; + DRV_LOG(DEBUG, "port %u is not started when " + "inserting a flow", dev->data->port_id); + return NULL; + } + flow_list = &priv->noncached_flows; + } else { + flow_list = &priv->cached_flows; + } return flow_list_create(dev, flow_list, attr, items, actions, true, type, error); } diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 7c31bfe..10ac9c3 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -468,6 +468,39 @@ struct mlx5_flow_tbl_data_entry { /**< jump resource, at most one for each table created. */ }; +struct mlx5_flow_dv_handle { + SLIST_ENTRY(mlx5_flow_dv_handle) next; + struct rte_flow *m_flow; /**< Pointer to the main flow. */ + uint64_t layers; + /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ + uint64_t action_flags; + /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ + struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ + struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ + struct mlx5_flow_dv_match_params value; + /**< Holds the value that the packet is compared to. */ + struct mlx5_flow_dv_encap_decap_resource *encap_decap; + /**< Pointer to encap/decap resource in cache. */ + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + /**< Pointer to modify header resource in cache. */ + struct mlx5_flow_dv_jump_tbl_resource *jump; + /**< Pointer to the jump action resource. */ + struct mlx5_flow_dv_port_id_action_resource *port_id_action; + /**< Pointer to port ID action resource. */ + struct mlx5_vf_vlan vf_vlan; + /**< Structure for VF VLAN workaround. */ + struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; + /**< Pointer to push VLAN action resource in cache. */ + struct mlx5_flow_dv_tag_resource *tag_resource; + /**< pointer to the tag action. */ + struct ibv_flow *flow; /**< Installed flow. */ + union { + uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */ + uint32_t mtr_flow_id; /**< Unique meter match flow id. */ + }; + uint8_t sidx; +}; + /* * Max number of actions per DV flow. * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED @@ -547,12 +580,12 @@ struct mlx5_flow { uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ union { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5_flow_dv dv; + struct mlx5_flow_dv_handle *dv_handle; #endif struct mlx5_flow_verbs verbs; }; union { - uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */ + uint32_t qrss_id; /**< Unique Q/RSS suffix subflow tag. */ uint32_t mtr_flow_id; /**< Unique meter match flow id. */ }; bool external; /**< true if the flow is created external to PMD. */ @@ -674,6 +707,9 @@ struct rte_flow { struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + SLIST_HEAD(, mlx5_flow_dv_handle) handles; + /**< The HEAD of DV handles. */ + uint8_t sub_flows; }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 2878393..33a3d70 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -75,6 +75,16 @@ uint32_t attr; }; +/* Global temporary device flow. */ +struct mlx5_flow sflow; +/* Global subsidiary device flows actions' list. */ +struct { + void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; + uint64_t hash_fields; + int actions_n; + uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ +} sflow_act[8]; + /** * Initialize flow attributes structure according to flow items' types. * @@ -2348,7 +2358,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->dv_handle->encap_decap = cache_resource; return 0; } } @@ -2374,7 +2384,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next); - dev_flow->dv.encap_decap = cache_resource; + dev_flow->dv_handle->encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2425,7 +2435,7 @@ struct field_modify_info modify_tcp[] = { (void *)&tbl_data->jump, cnt); } rte_atomic32_inc(&tbl_data->jump.refcnt); - dev_flow->dv.jump = &tbl_data->jump; + dev_flow->dv_handle->jump = &tbl_data->jump; return 0; } @@ -2463,7 +2473,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->dv_handle->port_id_action = cache_resource; return 0; } } @@ -2491,7 +2501,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next); - dev_flow->dv.port_id_action = cache_resource; + dev_flow->dv_handle->port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -2534,7 +2544,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->dv_handle->push_vlan_res = cache_resource; return 0; } } @@ -2563,7 +2573,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next); - dev_flow->dv.push_vlan_res = cache_resource; + dev_flow->dv_handle->push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -3652,7 +3662,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->dv_handle->modify_hdr = cache_resource; return 0; } } @@ -3679,7 +3689,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); - dev_flow->dv.modify_hdr = cache_resource; + dev_flow->dv_handle->modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -5102,19 +5112,24 @@ struct field_modify_info modify_tcp[] = { const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) { - size_t size = sizeof(struct mlx5_flow); + size_t size = sizeof(struct mlx5_flow_dv_handle); struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *dv_handle; - dev_flow = rte_calloc(__func__, 1, size, 0); - if (!dev_flow) { + /* No need to clear to 0. */ + dev_flow = &sflow; + dv_handle = rte_zmalloc(__func__, size, 0); + if (!dv_handle) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "not enough memory to create flow"); + "not enough memory to create flow handle"); return NULL; } - dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); dev_flow->ingress = attr->ingress; dev_flow->transfer = attr->transfer; + dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* DV support already defined, compiler will happy for inbox driver. */ + dev_flow->dv_handle = dv_handle; return dev_flow; } @@ -5253,7 +5268,7 @@ struct field_modify_info modify_tcp[] = { * This is workaround, masks are not supported, * and pre-validated. */ - dev_flow->dv.vf_vlan.tag = + dev_flow->dv_handle->vf_vlan.tag = rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; } tci_m = rte_be_to_cpu_16(vlan_m->tci); @@ -6712,7 +6727,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_matcher, rte_atomic32_read(&cache_matcher->refcnt)); rte_atomic32_inc(&cache_matcher->refcnt); - dev_flow->dv.matcher = cache_matcher; + dev_flow->dv_handle->matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); return 0; @@ -6749,7 +6764,7 @@ struct field_modify_info modify_tcp[] = { /* only matcher ref++, table ref++ already done above in get API. */ rte_atomic32_inc(&cache_matcher->refcnt); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); - dev_flow->dv.matcher = cache_matcher; + dev_flow->dv_handle->matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, @@ -6791,7 +6806,7 @@ struct field_modify_info modify_tcp[] = { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->dv.tag_resource = cache_resource; + dev_flow->dv_handle->tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -6820,7 +6835,7 @@ struct field_modify_info modify_tcp[] = { RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot insert tag"); } - dev_flow->dv.tag_resource = cache_resource; + dev_flow->dv_handle->tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -7022,6 +7037,9 @@ struct field_modify_info modify_tcp[] = { dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; } } + /* No need to save the hash fileds after creation. */ + sflow_act[dev_flow->dv_handle->sidx].hash_fields = + dev_flow->hash_fields; } /** @@ -7065,6 +7083,7 @@ struct field_modify_info modify_tcp[] = { }, }; int actions_n = 0; + uint8_t sidx = dev_flow->dv_handle->sidx; bool actions_end = false; union { struct mlx5_flow_dv_modify_hdr_resource res; @@ -7076,9 +7095,9 @@ struct field_modify_info modify_tcp[] = { union flow_dv_attr flow_attr = { .attr = 0 }; uint32_t tag_be; union mlx5_flow_tbl_key tbl_key; - uint32_t modify_action_position = UINT32_MAX; + uint32_t modify_action_pos = UINT32_MAX; void *match_mask = matcher.mask.buf; - void *match_value = dev_flow->dv.value.buf; + void *match_value = dev_flow->dv_handle->value.buf; uint8_t next_protocol = 0xff; struct rte_vlan_hdr vlan = { 0 }; uint32_t table; @@ -7122,8 +7141,8 @@ struct field_modify_info modify_tcp[] = { if (flow_dv_port_id_action_resource_register (dev, &port_id_resource, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.port_id_action->action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->port_id_action->action; action_flags |= MLX5_FLOW_ACTION_PORT_ID; break; case RTE_FLOW_ACTION_TYPE_FLAG: @@ -7132,7 +7151,6 @@ struct field_modify_info modify_tcp[] = { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, }; - if (flow_dv_convert_action_mark(dev, &mark, mhdr_res, error)) @@ -7141,12 +7159,12 @@ struct field_modify_info modify_tcp[] = { break; } tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - if (!dev_flow->dv.tag_resource) + if (!dev_flow->dv_handle->tag_resource) if (flow_dv_tag_resource_register (dev, tag_be, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; @@ -7168,12 +7186,12 @@ struct field_modify_info modify_tcp[] = { tag_be = mlx5_flow_mark_set (((const struct rte_flow_action_mark *) (actions->conf))->id); - if (!dev_flow->dv.tag_resource) + if (!dev_flow->dv_handle->tag_resource) if (flow_dv_tag_resource_register (dev, tag_be, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.tag_resource->action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->tag_resource->action; break; case RTE_FLOW_ACTION_TYPE_SET_META: if (flow_dv_convert_action_set_meta @@ -7228,7 +7246,7 @@ struct field_modify_info modify_tcp[] = { dev_flow->group); if (flow->counter == NULL) goto cnt_err; - dev_flow->dv.actions[actions_n++] = + sflow_act[sidx].actions[actions_n++] = flow->counter->action; action_flags |= MLX5_FLOW_ACTION_COUNT; break; @@ -7248,7 +7266,7 @@ struct field_modify_info modify_tcp[] = { " object."); break; case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: - dev_flow->dv.actions[actions_n++] = + sflow_act[sidx].actions[actions_n++] = priv->sh->pop_vlan_action; action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; break; @@ -7270,8 +7288,8 @@ struct field_modify_info modify_tcp[] = { if (flow_dv_create_action_push_vlan (dev, attr, &vlan, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.push_vlan_res->action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->push_vlan_res->action; action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; break; case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: @@ -7297,8 +7315,8 @@ struct field_modify_info modify_tcp[] = { attr->transfer, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->encap_decap->verbs_action; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ? MLX5_FLOW_ACTION_VXLAN_ENCAP : @@ -7310,8 +7328,8 @@ struct field_modify_info modify_tcp[] = { attr->transfer, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->encap_decap->verbs_action; action_flags |= actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ? MLX5_FLOW_ACTION_VXLAN_DECAP : @@ -7323,16 +7341,16 @@ struct field_modify_info modify_tcp[] = { if (flow_dv_create_action_raw_encap (dev, actions, dev_flow, attr, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->encap_decap->verbs_action; } else { /* Handle encap without preceding decap. */ if (flow_dv_create_action_l2_encap (dev, actions, dev_flow, attr->transfer, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->encap_decap->verbs_action; } action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP; break; @@ -7347,8 +7365,8 @@ struct field_modify_info modify_tcp[] = { if (flow_dv_create_action_l2_decap (dev, dev_flow, attr->transfer, error)) return -rte_errno; - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->encap_decap->verbs_action; } /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_RAW_DECAP; @@ -7379,8 +7397,8 @@ struct field_modify_info modify_tcp[] = { NULL, "cannot create jump action."); } - dev_flow->dv.actions[actions_n++] = - dev_flow->dv.jump->action; + sflow_act[sidx].actions[actions_n++] = + dev_flow->dv_handle->jump->action; action_flags |= MLX5_FLOW_ACTION_JUMP; break; case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: @@ -7485,7 +7503,7 @@ struct field_modify_info modify_tcp[] = { "or invalid parameters"); } /* Set the meter action. */ - dev_flow->dv.actions[actions_n++] = + sflow_act[sidx].actions[actions_n++] = flow->meter->mfts->meter_action; action_flags |= MLX5_FLOW_ACTION_METER; break; @@ -7508,19 +7526,19 @@ struct field_modify_info modify_tcp[] = { if (flow_dv_modify_hdr_resource_register (dev, mhdr_res, dev_flow, error)) return -rte_errno; - dev_flow->dv.actions[modify_action_position] = - dev_flow->dv.modify_hdr->verbs_action; + sflow_act[sidx].actions[modify_action_pos] = + dev_flow->dv_handle->modify_hdr->verbs_action; } break; default: break; } - if (mhdr_res->actions_num && - modify_action_position == UINT32_MAX) - modify_action_position = actions_n++; + if (mhdr_res->actions_num && modify_action_pos == UINT32_MAX) + modify_action_pos = actions_n++; } - dev_flow->dv.actions_n = actions_n; - dev_flow->actions = action_flags; + sflow_act[sidx].actions_n = actions_n; + sflow_act[sidx].transfer = dev_flow->transfer; + dev_flow->dv_handle->action_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; @@ -7705,9 +7723,9 @@ struct field_modify_info modify_tcp[] = { } #ifdef RTE_LIBRTE_MLX5_DEBUG MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, - dev_flow->dv.value.buf)); + dev_flow->dv_handle->value.buf)); #endif - dev_flow->layers = item_flags; + dev_flow->dv_handle->layers = item_flags; if (action_flags & MLX5_FLOW_ACTION_RSS) flow_dv_hashfields_set(dev_flow); /* Register matcher. */ @@ -7742,21 +7760,23 @@ struct field_modify_info modify_tcp[] = { __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct mlx5_flow_dv *dv; - struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *dv_handle; struct mlx5_priv *priv = dev->data->dev_private; + void *matcher_obj; int n; int err; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - dv = &dev_flow->dv; - n = dv->actions_n; - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { - if (dev_flow->transfer) { - dv->actions[n++] = priv->sh->esw_drop_action; + SLIST_FOREACH(dv_handle, &flow->handles, next) { + uint8_t sidx = dv_handle->sidx; + n = sflow_act[sidx].actions_n; + + if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) { + if (sflow_act[sidx].transfer) { + sflow_act[sidx].actions[n++] = + priv->sh->esw_drop_action; } else { - dv->hrxq = mlx5_hrxq_drop_new(dev); - if (!dv->hrxq) { + dv_handle->hrxq = mlx5_hrxq_drop_new(dev); + if (!dv_handle->hrxq) { rte_flow_error_set (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -7764,26 +7784,27 @@ struct field_modify_info modify_tcp[] = { "cannot get drop hash queue"); goto error; } - dv->actions[n++] = dv->hrxq->action; + sflow_act[sidx].actions[n++] = + dv_handle->hrxq->action; } - } else if (dev_flow->actions & + } else if (dv_handle->action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; MLX5_ASSERT(flow->rss.queue); hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, + sflow_act[sidx].hash_fields, (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) { hrxq = mlx5_hrxq_new (dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, + sflow_act[sidx].hash_fields, (*flow->rss.queue), flow->rss.queue_num, - !!(dev_flow->layers & + !!(dv_handle->layers & MLX5_FLOW_LAYER_TUNNEL)); } if (!hrxq) { @@ -7793,47 +7814,45 @@ struct field_modify_info modify_tcp[] = { "cannot get hash queue"); goto error; } - dv->hrxq = hrxq; - dv->actions[n++] = dv->hrxq->action; + dv_handle->hrxq = hrxq; + sflow_act[sidx].actions[n++] = hrxq->action; } - dv->flow = - mlx5_glue->dv_create_flow(dv->matcher->matcher_object, - (void *)&dv->value, n, - dv->actions); - if (!dv->flow) { + matcher_obj = dv_handle->matcher->matcher_object; + dv_handle->flow = + mlx5_glue->dv_create_flow(matcher_obj, + (void *)&dv_handle->value, + n, sflow_act[sidx].actions); + if (!dv_handle->flow) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "hardware refuses to create flow"); goto error; } - if (priv->vmwa_context && - dev_flow->dv.vf_vlan.tag && - !dev_flow->dv.vf_vlan.created) { + if (priv->vmwa_context && dv_handle->vf_vlan.tag && + !dv_handle->vf_vlan.created) { /* * The rule contains the VLAN pattern. * For VF we are going to create VLAN * interface to make hypervisor set correct * e-Switch vport context. */ - mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan); + mlx5_vlan_vmwa_acquire(dev, &dv_handle->vf_vlan); } } return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - struct mlx5_flow_dv *dv = &dev_flow->dv; - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + SLIST_FOREACH(dv_handle, &flow->handles, next) { + if (dv_handle->hrxq) { + if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dv_handle->hrxq); + dv_handle->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan); } rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -7844,17 +7863,17 @@ struct field_modify_info modify_tcp[] = { * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_matcher_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_dv_handle *handle) { - struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; + struct mlx5_flow_dv_matcher *matcher = handle->matcher; MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", @@ -7877,17 +7896,17 @@ struct field_modify_info modify_tcp[] = { /** * Release an encap/decap resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_encap_decap_resource_release(struct mlx5_flow *flow) +flow_dv_encap_decap_resource_release(struct mlx5_flow_dv_handle *handle) { struct mlx5_flow_dv_encap_decap_resource *cache_resource = - flow->dv.encap_decap; + handle->encap_decap; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", @@ -7910,17 +7929,17 @@ struct field_modify_info modify_tcp[] = { * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, - struct mlx5_flow *flow) + struct mlx5_flow_dv_handle *handle) { - struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump; + struct mlx5_flow_dv_jump_tbl_resource *cache_resource = handle->jump; struct mlx5_flow_tbl_data_entry *tbl_data = container_of(cache_resource, struct mlx5_flow_tbl_data_entry, jump); @@ -7944,17 +7963,17 @@ struct field_modify_info modify_tcp[] = { /** * Release a modify-header resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow) +flow_dv_modify_hdr_resource_release(struct mlx5_flow_dv_handle *handle) { struct mlx5_flow_dv_modify_hdr_resource *cache_resource = - flow->dv.modify_hdr; + handle->modify_hdr; MLX5_ASSERT(cache_resource->verbs_action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", @@ -7975,17 +7994,17 @@ struct field_modify_info modify_tcp[] = { /** * Release port ID action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_port_id_action_resource_release(struct mlx5_flow *flow) +flow_dv_port_id_action_resource_release(struct mlx5_flow_dv_handle *handle) { struct mlx5_flow_dv_port_id_action_resource *cache_resource = - flow->dv.port_id_action; + handle->port_id_action; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", @@ -8006,17 +8025,17 @@ struct field_modify_info modify_tcp[] = { /** * Release push vlan action resource. * - * @param flow - * Pointer to mlx5_flow. + * @param handle + * Pointer to mlx5_flow_dv_handle. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow) +flow_dv_push_vlan_action_resource_release(struct mlx5_flow_dv_handle *handle) { struct mlx5_flow_dv_push_vlan_action_resource *cache_resource = - flow->dv.push_vlan_res; + handle->push_vlan_res; MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", @@ -8046,27 +8065,24 @@ struct field_modify_info modify_tcp[] = { static void __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_dv *dv; - struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *dv_handle; if (!flow) return; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - dv = &dev_flow->dv; - if (dv->flow) { - claim_zero(mlx5_glue->dv_destroy_flow(dv->flow)); - dv->flow = NULL; + SLIST_FOREACH(dv_handle, &flow->handles, next) { + if (dv_handle->flow) { + claim_zero(mlx5_glue->dv_destroy_flow(dv_handle->flow)); + dv_handle->flow = NULL; } - if (dv->hrxq) { - if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) + if (dv_handle->hrxq) { + if (dv_handle->action_flags & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else - mlx5_hrxq_release(dev, dv->hrxq); - dv->hrxq = NULL; + mlx5_hrxq_release(dev, dv_handle->hrxq); + dv_handle->hrxq = NULL; } - if (dev_flow->dv.vf_vlan.tag && - dev_flow->dv.vf_vlan.created) - mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan); + if (dv_handle->vf_vlan.tag && dv_handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dv_handle->vf_vlan); } } @@ -8082,7 +8098,7 @@ struct field_modify_info modify_tcp[] = { static void __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *dv_handle; if (!flow) return; @@ -8095,24 +8111,24 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_meter_detach(flow->meter); flow->meter = NULL; } - while (!LIST_EMPTY(&flow->dev_flows)) { - dev_flow = LIST_FIRST(&flow->dev_flows); - LIST_REMOVE(dev_flow, next); - if (dev_flow->dv.matcher) - flow_dv_matcher_release(dev, dev_flow); - if (dev_flow->dv.encap_decap) - flow_dv_encap_decap_resource_release(dev_flow); - if (dev_flow->dv.modify_hdr) - flow_dv_modify_hdr_resource_release(dev_flow); - if (dev_flow->dv.jump) - flow_dv_jump_tbl_resource_release(dev, dev_flow); - if (dev_flow->dv.port_id_action) - flow_dv_port_id_action_resource_release(dev_flow); - if (dev_flow->dv.push_vlan_res) - flow_dv_push_vlan_action_resource_release(dev_flow); - if (dev_flow->dv.tag_resource) - flow_dv_tag_release(dev, dev_flow->dv.tag_resource); - rte_free(dev_flow); + while (!SLIST_EMPTY(&flow->handles)) { + dv_handle = SLIST_FIRST(&flow->handles); + SLIST_REMOVE_HEAD(&flow->handles, next); + if (dv_handle->matcher) + flow_dv_matcher_release(dev, dv_handle); + if (dv_handle->encap_decap) + flow_dv_encap_decap_resource_release(dv_handle); + if (dv_handle->modify_hdr) + flow_dv_modify_hdr_resource_release(dv_handle); + if (dv_handle->jump) + flow_dv_jump_tbl_resource_release(dev, dv_handle); + if (dv_handle->port_id_action) + flow_dv_port_id_action_resource_release(dv_handle); + if (dv_handle->push_vlan_res) + flow_dv_push_vlan_action_resource_release(dv_handle); + if (dv_handle->tag_resource) + flow_dv_tag_release(dev, dv_handle->tag_resource); + rte_free(dv_handle); } } From patchwork Tue Feb 4 11:33:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65532 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 48F8CA0532; Tue, 4 Feb 2020 12:34:25 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 37DA51C1B6; Tue, 4 Feb 2020 12:33:45 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id BECFE1C1AF for ; Tue, 4 Feb 2020 12:33:43 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:21 +0200 Message-Id: <1580816002-159035-6-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 5/6] net/mlx5: remove the DV support macro checking X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some structures are defined in the mlx5_flow header file and only used for flows with DV driver type. When using inbox driver, the DV mode is not supported. But the code is unique and there is no need to use pre-processing macro since all code could be compiled. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5_flow.c | 42 ++++++++---------------------------------- drivers/net/mlx5/mlx5_flow.h | 2 -- 2 files changed, 8 insertions(+), 36 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 1121904..2b2ba20 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -716,7 +716,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, */ static void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, - enum mlx5_flow_drv_type type __rte_unused, + enum mlx5_flow_drv_type type, void *sub_flow) { struct mlx5_priv *priv = dev->data->dev_private; @@ -726,7 +726,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint64_t layers; unsigned int i; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (type == MLX5_FLOW_TYPE_DV) { struct mlx5_flow_dv_handle *handle = sub_flow; mark = !!(handle->action_flags & @@ -735,16 +734,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); flow = handle->m_flow; } else { -#endif struct mlx5_flow *dev_flow = sub_flow; mark = !!(dev_flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); layers = dev_flow->layers; tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); flow = dev_flow->flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT } -#endif for (i = 0; i != flow->rss.queue_num; ++i) { int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = @@ -793,15 +789,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *handle; enum mlx5_flow_drv_type type = flow->drv_type; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5_flow_dv_handle *handle; if (type == MLX5_FLOW_TYPE_DV) SLIST_FOREACH(handle, &flow->handles, next) flow_drv_rxq_flags_set(dev, type, (void *)handle); else -#endif LIST_FOREACH(dev_flow, &flow->dev_flows, next) flow_drv_rxq_flags_set(dev, type, (void *)dev_flow); } @@ -820,7 +814,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, */ static void flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, - enum mlx5_flow_drv_type type __rte_unused, + enum mlx5_flow_drv_type type, void *sub_flow) { struct mlx5_priv *priv = dev->data->dev_private; @@ -831,7 +825,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, unsigned int i; MLX5_ASSERT(dev->data->dev_started); -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (type == MLX5_FLOW_TYPE_DV) { struct mlx5_flow_dv_handle *handle = sub_flow; mark = !!(handle->action_flags & @@ -840,16 +833,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); flow = handle->m_flow; } else { -#endif struct mlx5_flow *dev_flow = sub_flow; mark = !!(dev_flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); layers = dev_flow->layers; tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); flow = dev_flow->flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT } -#endif for (i = 0; i != flow->rss.queue_num; ++i) { int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = @@ -894,15 +884,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { struct mlx5_flow *dev_flow; + struct mlx5_flow_dv_handle *handle; enum mlx5_flow_drv_type type = flow->drv_type; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5_flow_dv_handle *handle; if (type == MLX5_FLOW_TYPE_DV) SLIST_FOREACH(handle, &flow->handles, next) flow_drv_rxq_flags_trim(dev, type, (void *)handle); else -#endif LIST_FOREACH(dev_flow, &flow->dev_flows, next) flow_drv_rxq_flags_trim(dev, type, (void *)dev_flow); } @@ -2402,7 +2390,6 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, struct rte_flow *flow) { struct mlx5_flow *dev_flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_flow_dv_handle *handle; enum mlx5_flow_drv_type type = flow->drv_type; @@ -2411,13 +2398,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, if (handle->qrss_id) flow_qrss_free_id(dev, handle->qrss_id); } else { -#endif LIST_FOREACH(dev_flow, &flow->dev_flows, next) if (dev_flow->qrss_id) flow_qrss_free_id(dev, dev_flow->qrss_id); -#ifdef HAVE_IBV_FLOW_DV_SUPPORT } -#endif } static int @@ -3509,18 +3493,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, return -rte_errno; dev_flow->external = external; dev_flow->flow = flow; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (flow->drv_type == MLX5_FLOW_TYPE_DV) { SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next); dev_flow->dv_handle->sidx = flow->sub_flows++; dev_flow->dv_handle->m_flow = flow; } else { -#endif /* Subflow obj was created, we must include one in the list. */ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); -#ifdef HAVE_IBV_FLOW_DV_SUPPORT } -#endif if (sub_flow) *sub_flow = dev_flow; return flow_drv_translate(dev, dev_flow, attr, items, actions, error); @@ -3982,11 +3962,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, * reallocation becomes possible (for example, for * other flows in other threads). */ - dev_flow->qrss_id = qrss_id; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (flow->drv_type == MLX5_FLOW_TYPE_DV) dev_flow->dv_handle->qrss_id = qrss_id; -#endif + else + dev_flow->qrss_id = qrss_id; qrss_id = 0; ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); @@ -4098,11 +4077,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, ret = -rte_errno; goto exit; } - dev_flow->mtr_flow_id = mtr_tag_id; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (flow->drv_type == MLX5_FLOW_TYPE_DV) dev_flow->dv_handle->mtr_flow_id = mtr_tag_id; -#endif + else + dev_flow->mtr_flow_id = mtr_tag_id; /* Prepare the suffix flow match pattern. */ sfx_items = (struct rte_flow_item *)((char *)sfx_actions + act_size); @@ -4337,19 +4315,15 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, actions_hairpin_tx.actions, error); if (!dev_flow) goto error; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (flow->drv_type == MLX5_FLOW_TYPE_DV) { SLIST_INSERT_HEAD(&flow->handles, dev_flow->dv_handle, next); dev_flow->dv_handle->sidx = flow->sub_flows++; dev_flow->dv_handle->m_flow = flow; } else { -#endif dev_flow->flow = flow; LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); -#ifdef HAVE_IBV_FLOW_DV_SUPPORT } -#endif dev_flow->external = 0; ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 10ac9c3..5e517c3 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -579,9 +579,7 @@ struct mlx5_flow { uint32_t group; /**< The group index. */ uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ union { -#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_flow_dv_handle *dv_handle; -#endif struct mlx5_flow_verbs verbs; }; union { From patchwork Tue Feb 4 11:33:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bing Zhao X-Patchwork-Id: 65533 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 55B4EA0532; Tue, 4 Feb 2020 12:34:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EB7811C193; Tue, 4 Feb 2020 12:33:48 +0100 (CET) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id D4EE01C1C2 for ; Tue, 4 Feb 2020 12:33:47 +0100 (CET) From: Bing Zhao To: orika@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com, matan@mellanox.com Cc: dev@dpdk.org Date: Tue, 4 Feb 2020 13:33:22 +0200 Message-Id: <1580816002-159035-7-git-send-email-bingz@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580816002-159035-1-git-send-email-bingz@mellanox.com> References: <1580736735-19472-1-git-send-email-bingz@mellanox.com> <1580816002-159035-1-git-send-email-bingz@mellanox.com> Subject: [dpdk-dev] [PATCH v2 6/6] net/mlx5: do not save device flow matcher value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The matcher value is a series of bits with specified format that defined by the hardware interface. PMD driver needs to translate the packet header into the matcher format and then used to create the flow with the lower layer driver. And this matcher value is only used when creating a flow, and when destroying it, only the lower layer driver object related to the matcher needs to be released. So there is no need to save such huge block information of a device flow. Signed-off-by: Bing Zhao --- drivers/net/mlx5/mlx5_flow.h | 2 -- drivers/net/mlx5/mlx5_flow_dv.c | 28 ++++++++++++++++++++-------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 5e517c3..af30438 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -513,8 +513,6 @@ struct mlx5_flow_dv { struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ /* Flow DV api: */ struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ - struct mlx5_flow_dv_match_params value; - /**< Holds the value that the packet is compared to. */ struct mlx5_flow_dv_encap_decap_resource *encap_decap; /**< Pointer to encap/decap resource in cache. */ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 33a3d70..111b01d 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -56,7 +56,7 @@ #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ sizeof(struct rte_flow_item_ipv4)) -/* VLAN header definitions */ +/* VLAN header definitions. */ #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff @@ -75,15 +75,23 @@ uint32_t attr; }; +/* Maximal number of global temporary device flow. */ +#define MLX5DV_FLOW_HANDLE_MAX_NUM 8 /* Global temporary device flow. */ struct mlx5_flow sflow; /* Global subsidiary device flows actions' list. */ struct { void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; + /**< Action list. */ uint64_t hash_fields; + /**< Verbs hash Rx queue hash fields. */ + struct mlx5_flow_dv_match_params value; + /**< Holds the value that the packet is compared to. */ int actions_n; - uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ -} sflow_act[8]; + /**< number of actions. */ + uint8_t transfer; + /**< 1 if the flow is E-Switch flow. */ +} sflow_act[MLX5DV_FLOW_HANDLE_MAX_NUM]; /** * Initialize flow attributes structure according to flow items' types. @@ -5127,7 +5135,6 @@ struct field_modify_info modify_tcp[] = { } dev_flow->ingress = attr->ingress; dev_flow->transfer = attr->transfer; - dv_handle->value.size = MLX5_ST_SZ_BYTES(fte_match_param); /* DV support already defined, compiler will happy for inbox driver. */ dev_flow->dv_handle = dv_handle; return dev_flow; @@ -7097,7 +7104,7 @@ struct field_modify_info modify_tcp[] = { union mlx5_flow_tbl_key tbl_key; uint32_t modify_action_pos = UINT32_MAX; void *match_mask = matcher.mask.buf; - void *match_value = dev_flow->dv_handle->value.buf; + void *match_value = &sflow_act[sidx].value.buf; uint8_t next_protocol = 0xff; struct rte_vlan_hdr vlan = { 0 }; uint32_t table; @@ -7539,6 +7546,11 @@ struct field_modify_info modify_tcp[] = { sflow_act[sidx].actions_n = actions_n; sflow_act[sidx].transfer = dev_flow->transfer; dev_flow->dv_handle->action_flags = action_flags; + /* Matcher size is fixed right now. */ + sflow_act[sidx].value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* Clear buffer in case of dirty content. */ + memset(&sflow_act[sidx].value.buf, 0, + MLX5_ST_SZ_BYTES(fte_match_param)); for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; @@ -7723,7 +7735,7 @@ struct field_modify_info modify_tcp[] = { } #ifdef RTE_LIBRTE_MLX5_DEBUG MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, - dev_flow->dv_handle->value.buf)); + sflow_act[sidx].value.buf)); #endif dev_flow->dv_handle->layers = item_flags; if (action_flags & MLX5_FLOW_ACTION_RSS) @@ -7820,8 +7832,8 @@ struct field_modify_info modify_tcp[] = { matcher_obj = dv_handle->matcher->matcher_object; dv_handle->flow = mlx5_glue->dv_create_flow(matcher_obj, - (void *)&dv_handle->value, - n, sflow_act[sidx].actions); + &sflow_act[sidx].value, n, + sflow_act[sidx].actions); if (!dv_handle->flow) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,