From patchwork Fri Nov 13 14:52:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84163 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5693A09E0; Fri, 13 Nov 2020 15:53:31 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0B112C8CA; Fri, 13 Nov 2020 15:52:53 +0100 (CET) Received: from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id 23175C8CA for ; Fri, 13 Nov 2020 15:52:50 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Fri, 13 Nov 2020 06:52:53 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Fri, 13 Nov 2020 14:52:47 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler , Xueming Li Date: Fri, 13 Nov 2020 16:52:26 +0200 Message-ID: <20201113145231.13154-2-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201113145231.13154-1-getelson@nvidia.com> References: <20201113145231.13154-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL105.nvidia.com (172.20.187.12) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605279173; bh=o4Fo5MYhF9uUIdFJt706KA5RRqUinW6dsU7Wu6RamBg=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=llr+amKWbp0nW6sSk7PVpeGyr/pOre8OGC4szcOcnf/4moaaSY4pppJ7Iez3I9geq ZjHOPgW29A97uAYeqOXlOpseR/spMm3zq+84GCjGyNfH0xizpEhm7As9oR81IpR1NM 9W/3aOhW9Jd/w/mxP2agdE4nMjPlDhPQnTDmadzw0/BiCej/j1vKf6yyxALQMA/mVw 52MoAFnWtw6++haY2DYquh/qisO4TeQKKp37MrRPjyMgxQJq4OAiUGM6yLGOF3WczU gBqtGfF2OmJPxbId4QHcf+M55yAYUmRwS9FGqDVlLU+2uJl72/+a/W49ENCnpX4h+t blUzAXGcOiTgA== Subject: [dpdk-dev] [PATCH v2 1/5] net/mlx5: fix tunnel offload object allocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch allocated tunnel offload objects with invalid indexes. As the result, PMD tunnel object allocation failed. In this patch indexed pool provides both an index and memory for a new tunnel offload object. Also tunnel offload ipool moved to dv enabled code only. Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator") Signed-off-by: Gregory Etelson 2 Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.c | 50 ++++++++++++++++++------------------ drivers/net/mlx5/mlx5.h | 4 +-- drivers/net/mlx5/mlx5_flow.c | 41 ++++++++++------------------- 3 files changed, 40 insertions(+), 55 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 43344391df..e1faa819a3 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - { + [MLX5_IPOOL_DECAP_ENCAP] = { .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), .trunk_size = 64, .grow_trunk = 3, @@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_encap_decap_ipool", }, - { + [MLX5_IPOOL_PUSH_VLAN] = { .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), .trunk_size = 64, .grow_trunk = 3, @@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_push_vlan_ipool", }, - { + [MLX5_IPOOL_TAG] = { .size = sizeof(struct mlx5_flow_dv_tag_resource), .trunk_size = 64, .grow_trunk = 3, @@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_tag_ipool", }, - { + [MLX5_IPOOL_PORT_ID] = { .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), .trunk_size = 64, .grow_trunk = 3, @@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_port_id_ipool", }, - { + [MLX5_IPOOL_JUMP] = { .size = sizeof(struct mlx5_flow_tbl_data_entry), .trunk_size = 64, .grow_trunk = 3, @@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_jump_ipool", }, - { + [MLX5_IPOOL_SAMPLE] = { .size = sizeof(struct mlx5_flow_dv_sample_resource), .trunk_size = 64, .grow_trunk = 3, @@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_sample_ipool", }, - { + [MLX5_IPOOL_DEST_ARRAY] = { .size = sizeof(struct mlx5_flow_dv_dest_array_resource), .trunk_size = 64, .grow_trunk = 3, @@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_dest_array_ipool", }, + [MLX5_IPOOL_TUNNEL_OFFLOAD] = { + .size = sizeof(struct mlx5_flow_tunnel), + .need_lock = 1, + .release_mem_en = 1, + .type = "mlx5_tunnel_offload", + }, + [MLX5_IPOOL_TUNNEL_FLOW_TBL_ID] = { + .size = 0, + .need_lock = 1, + .type = "mlx5_flow_tnl_tbl_ipool", + }, #endif - { + [MLX5_IPOOL_MTR] = { .size = sizeof(struct mlx5_flow_meter), .trunk_size = 64, .grow_trunk = 3, @@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_meter_ipool", }, - { + [MLX5_IPOOL_MCP] = { .size = sizeof(struct mlx5_flow_mreg_copy_resource), .trunk_size = 64, .grow_trunk = 3, @@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_mcp_ipool", }, - { + [MLX5_IPOOL_HRXQ] = { .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), .trunk_size = 64, .grow_trunk = 3, @@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_hrxq_ipool", }, - { + [MLX5_IPOOL_MLX5_FLOW] = { /* * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. * It set in run time according to PCI function configuration. @@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_flow_handle_ipool", }, - { + [MLX5_IPOOL_RTE_FLOW] = { .size = sizeof(struct rte_flow), .trunk_size = 4096, .need_lock = 1, @@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "rte_flow_ipool", }, - { + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = { .size = 0, .need_lock = 1, .type = "mlx5_flow_rss_id_ipool", }, - { - .size = 0, - .need_lock = 1, - .type = "mlx5_flow_tnl_flow_ipool", - }, - { - .size = 0, - .need_lock = 1, - .type = "mlx5_flow_tnl_tbl_ipool", - }, - { + [MLX5_IPOOL_RSS_SHARED_ACTIONS] = { .size = sizeof(struct mlx5_shared_action_rss), .trunk_size = 64, .grow_trunk = 3, @@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_shared_action_rss", }, - }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 7ee63a7a14..af097d6a7e 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -44,6 +44,8 @@ enum mlx5_ipool_index { MLX5_IPOOL_JUMP, /* Pool for jump resource. */ MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */ MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */ + MLX5_IPOOL_TUNNEL_OFFLOAD, /* Pool for tunnel offload context */ + MLX5_IPOOL_TUNNEL_FLOW_TBL_ID, /* Pool for tunnel table ID. */ #endif MLX5_IPOOL_MTR, /* Pool for meter resource. */ MLX5_IPOOL_MCP, /* Pool for metadata resource. */ @@ -51,8 +53,6 @@ enum mlx5_ipool_index { MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */ - MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */ - MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */ MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */ MLX5_IPOOL_MAX, }; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 92adfcacca..31c9d82b4a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -6934,7 +6934,7 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, struct mlx5_dev_ctx_shared *sh = list->ctx; struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], tunnel_flow_tbl_to_id(tte->flow_table)); mlx5_free(tte); } @@ -6952,12 +6952,12 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, SOCKET_ID_ANY); if (!tte) goto err; - mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], &tte->flow_table); if (tte->flow_table >= MLX5_MAX_TABLES) { DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", tte->flow_table); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], tte->flow_table); goto err; } else if (!tte->flow_table) { @@ -7465,14 +7465,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); - RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], - tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups); - mlx5_free(tunnel); + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; + mlx5_ipool_free(ipool, tunnel->tunnel_id); } static struct mlx5_flow_tunnel * @@ -7494,39 +7493,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_indexed_pool *ipool; struct mlx5_flow_tunnel *tunnel; uint32_t id; - mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - &id); + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; + tunnel = mlx5_ipool_zmalloc(ipool, &id); + if (!tunnel) + return NULL; if (id >= MLX5_MAX_TUNNELS) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); + mlx5_ipool_free(ipool, id); DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); return NULL; - } else if (!id) { - return NULL; - } - /** - * mlx5 flow tunnel is an auxlilary data structure - * It's not part of IO. No need to allocate it from - * huge pages pools dedicated for IO - */ - tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), - 0, SOCKET_ID_ANY); - if (!tunnel) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - return NULL; } tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, NULL, mlx5_flow_tunnel_grp2tbl_remove_cb); if (!tunnel->groups) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - mlx5_free(tunnel); + mlx5_ipool_free(ipool, id); return NULL; } tunnel->groups->ctx = priv->sh; From patchwork Fri Nov 13 14:52:27 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84164 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BD81BA09E0; Fri, 13 Nov 2020 15:53:54 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6E96CC8B4; Fri, 13 Nov 2020 15:52:55 +0100 (CET) Received: from hqnvemgate25.nvidia.com (hqnvemgate25.nvidia.com [216.228.121.64]) by dpdk.org (Postfix) with ESMTP id 741E5C8D0 for ; Fri, 13 Nov 2020 15:52:53 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate25.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Fri, 13 Nov 2020 06:52:45 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Fri, 13 Nov 2020 14:52:49 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler , Suanming Mou Date: Fri, 13 Nov 2020 16:52:27 +0200 Message-ID: <20201113145231.13154-3-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201113145231.13154-1-getelson@nvidia.com> References: <20201113145231.13154-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL105.nvidia.com (172.20.187.12) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605279165; bh=Ed1F1BeWm3ZZKcAMAfgAPC/IY3AL2EFVNJjACJIsUxc=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=dsVUSVbrRQgFnQDroUtbvDr9N1qyRNNF7EwGDKWIkH3tThKwji9IYqaTsRtteDM9U us9yEd8c1pLbnyCSKMYEObjA420wIkBdbKwNDYnJyTI4DS68Bbvxg4YYum2u0Sn87x l3e8+RaMAgcXvwHY6C8RGwLUW5UIVqeTLY563UiHlfZcARgzKyy84/e4lr2e9WxvIo idZJCmBCswDoFr1SIPY0DKdSkAMz4wCV15fzGbGNaZw5ocLSq3bM7KZrJ9SeY3FbNe 6x5NlIh/SBn++ja6TfhA76yTJXgtps/mcpP1dIf2b1gQ96Jye28AlNv7HyA3BpjKxO E7q9Xn2t4c3yQ== Subject: [dpdk-dev] [PATCH v2 2/5] net/mlx5: fix tunnel offload hub multi-thread protection X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch was removing active tunnel offload objects from a tunnels db list without checking its reference counter value. That action was leading to a PMD crash. Current patch isolates tunnels db list into a separate API. That API manages MT protection of the tunnel offload db. Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe") Signed-off-by: Gregory Etelson Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_flow.c | 256 +++++++++++++++++++++++++---------- drivers/net/mlx5/mlx5_flow.h | 6 +- 2 files changed, 192 insertions(+), 70 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 31c9d82b4a..2f01e34033 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -33,6 +33,14 @@ #include "mlx5_common_os.h" #include "rte_pmd_mlx5.h" +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); static struct mlx5_flow_tunnel * mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); static void @@ -661,29 +669,68 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, return 0; } +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + if (ctx->num_elements != 1) + return false; + else if (ctx->items) + return ctx->items == &tunnel->item; + else if (ctx->actions) + return ctx->actions == &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + ctx->ret = 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->ret = rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + static int mlx5_flow_item_release(struct rte_eth_dev *dev, struct rte_flow_item *pmd_items, uint32_t num_items, struct rte_flow_error *err) { - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct tunnel_db_element_release_ctx ctx = { + .items = pmd_items, + .actions = NULL, + .num_elements = num_items, + .error = err, + }; - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; } static int @@ -691,25 +738,18 @@ mlx5_flow_action_release(struct rte_eth_dev *dev, struct rte_flow_action *pmd_actions, uint32_t num_actions, struct rte_flow_error *err) { - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct tunnel_db_element_release_ctx ctx = { + .items = NULL, + .actions = pmd_actions, + .num_elements = num_actions, + .error = err, + }; - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); - return 0; + return ctx.ret; } static int @@ -5889,11 +5929,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7464,28 +7501,87 @@ static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { + /* no tunnel hub spinlock protection */ struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_indexed_pool *ipool; DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); + rte_spinlock_lock(&thub->sl); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&thub->sl); mlx5_hlist_destroy(tunnel->groups); ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; mlx5_ipool_free(ipool, tunnel->tunnel_id); } -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) { + bool verdict = false; struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct mlx5_flow_tunnel *tunnel; - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id == id) + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict = match(dev, tunnel, (const void *)ctx); + if (verdict) break; } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); - return tun; + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx = x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id == ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->tunnel = tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx = { + .tunnel_id = id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; } static struct mlx5_flow_tunnel * @@ -7533,38 +7629,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, return tunnel; } +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel = tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx = x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn = 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, struct mlx5_flow_tunnel **tunnel) { - int ret; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel = tun; - ret = 0; - break; - } - } - if (!tun) { - tun = mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel = tun; - } else { - ret = -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); + struct tunnel_db_get_tunnel_ctx ctx = { + .app_tunnel = app_tunnel, + }; - return ret; + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel = ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; } void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index e3a5030785..bdf2c50090 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -950,8 +950,12 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { + /* Tunnels list + * Access to the list MUST be MT protected + */ LIST_HEAD(, mlx5_flow_tunnel) tunnels; - rte_spinlock_t sl; /* Tunnel list spinlock. */ + /* protect access to the tunnels list */ + rte_spinlock_t sl; struct mlx5_hlist *groups; /** non tunnel groups */ }; From patchwork Fri Nov 13 14:52:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84165 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5BB8FA09E0; Fri, 13 Nov 2020 15:54:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ECE77C8DC; Fri, 13 Nov 2020 15:52:56 +0100 (CET) Received: from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id B6E0AC8D8 for ; Fri, 13 Nov 2020 15:52:54 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Fri, 13 Nov 2020 06:52:57 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Fri, 13 Nov 2020 14:52:51 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler , Xueming Li Date: Fri, 13 Nov 2020 16:52:28 +0200 Message-ID: <20201113145231.13154-4-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201113145231.13154-1-getelson@nvidia.com> References: <20201113145231.13154-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL105.nvidia.com (172.20.187.12) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605279178; bh=lwuDyzZT0PgRko46z205VzccS5E0pXL+kW+7ZoAzGKA=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=XOXSPG++bg+K6wVDn3tUnUaEBl0WA6feLUegQJe9Z/zqWsMXADZ8fw0IeuNPzixFy x2u4pcMVnSYuhvFxOxSGMfQByUoBTKm1YRrM6/LhJ9QUnYjJWvhh3wkEwel6LvkCZW Un1F8Q9FsUl4rvn50ibW9TC+oTNtpZFn+AxnQYhHoRHYwBIfwcZDFHY5bWapxAG1xA b08C6FCtsYY3tCL9doT5qPPQHLakkSROm8ujwoAc/JllzU9bZw2N9cUmYd4zjbBE/j Vx2/IlQOBU5VExMb8I/MESbop2Q2Cke+mc6eUwByuMAB3R3XK0fcM+Q37l9LfQpU1b Nlk2KZwTbxsnw== Subject: [dpdk-dev] [PATCH v2 3/5] net/mlx5: fix double table referencing X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The new flow table resource management API triggered a PMD crash in tunnel offload mode, when tunnel match flow rule was inserted before tunnel set rule. Reason for the crash was double flow table registration. The table was registered by the tunnel offload code for the first time and once more by PMD code, as part of general table processing. The table counter was decremented only once during the rule destruction and caused a resource leak that triggered the crash. The patch updates PMD registration with tunnel offload parameters and removes table registration in tunnel related code. Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe") Signed-off-by: Gregory Etelson Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_flow.c | 16 ++++++++++---- drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++---------------- 2 files changed, 32 insertions(+), 23 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 2f01e34033..185b4ba51a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -7024,7 +7024,15 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, struct mlx5_hlist *group_hash; group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_register(group_hash, key.val, NULL); + he = mlx5_hlist_lookup(group_hash, key.val, NULL); + if (!he) { + DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id", + dev->data->port_id, key.tunnel_id, group); + he = mlx5_hlist_register(group_hash, key.val, NULL); + } else { + DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id", + dev->data->port_id, key.tunnel_id, group); + } if (!he) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -7032,8 +7040,8 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, "tunnel group index not supported"); tte = container_of(he, typeof(*tte), hash); *table = tte->flow_table; - DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x", - dev->data->port_id, key.tunnel_id, group, *table); + DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u", + dev->data->port_id, key.tunnel_id, group, *table); return 0; } @@ -7114,7 +7122,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev, standard_translation = true; } DRV_LOG(DEBUG, - "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s", + "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s", dev->data->port_id, group, grp_info.transfer, grp_info.external, grp_info.fdb_def_rule, standard_translation ? "STANDARD" : "TUNNEL"); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 78c710fef9..95165980f4 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, "cannot get table"); return NULL; } + DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.", + table_id, tunnel ? tunnel->tunnel_id : 0, group_id); tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry); return &tbl_data->tbl; } @@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list, if (he) mlx5_hlist_unregister(tunnel_grp_hash, he); DRV_LOG(DEBUG, - "Table_id %#x tunnel %u group %u released.", + "Table_id %u tunnel %u group %u released.", table_id, tbl_data->tunnel ? tbl_data->tunnel->tunnel_id : 0, @@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, struct mlx5_flow_dv_matcher *ref, union mlx5_flow_tbl_key *key, struct mlx5_flow *dev_flow, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group_id, struct rte_flow_error *error) { struct mlx5_cache_entry *entry; @@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, .data = ref, }; - tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, - key->domain, false, NULL, 0, 0, error); + /** + * tunnel offload API requires this registration for cases when + * tunnel match rule was inserted before tunnel set rule. + */ + tbl = flow_dv_tbl_resource_get(dev, key->table_id, + key->direction, key->domain, + dev_flow->external, tunnel, + group_id, 0, error); if (!tbl) return -rte_errno; /* No need to refill the error info */ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); @@ -9605,10 +9615,14 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * do not add decap action if match rule drops packet * HW rejects rules with decap & drop + * + * if tunnel match rule was inserted before matching tunnel set + * rule flow table used in the match rule must be registered. + * current implementation handles that in the + * flow_dv_match_register() at the function end. */ bool add_decap = true; const struct rte_flow_action *ptr = actions; - struct mlx5_flow_tbl_resource *tbl; for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) { if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) { @@ -9625,20 +9639,6 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_DECAP; } - /* - * bind table_id with for tunnel match rule. - * Tunnel set rule establishes that bind in JUMP action handler. - * Required for scenario when application creates tunnel match - * rule before tunnel set rule. - */ - tbl = flow_dv_tbl_resource_get(dev, table, attr->egress, - attr->transfer, - !!dev_flow->external, tunnel, - attr->group, 0, error); - if (!tbl) - return rte_flow_error_set - (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "cannot register tunnel group"); } for (; !actions_end ; actions++) { const struct rte_flow_action_queue *queue; @@ -10468,7 +10468,8 @@ flow_dv_translate(struct rte_eth_dev *dev, tbl_key.domain = attr->transfer; tbl_key.direction = attr->egress; tbl_key.table_id = dev_flow->dv.group; - if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, + tunnel, attr->group, error)) return -rte_errno; return 0; } From patchwork Fri Nov 13 14:52:29 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84166 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 36DB6A09E0; Fri, 13 Nov 2020 15:54:42 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ED99BC8E6; Fri, 13 Nov 2020 15:52:59 +0100 (CET) Received: from hqnvemgate24.nvidia.com (hqnvemgate24.nvidia.com [216.228.121.143]) by dpdk.org (Postfix) with ESMTP id D30DFC8E4 for ; Fri, 13 Nov 2020 15:52:57 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate24.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Fri, 13 Nov 2020 06:53:04 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Fri, 13 Nov 2020 14:52:54 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler Date: Fri, 13 Nov 2020 16:52:29 +0200 Message-ID: <20201113145231.13154-5-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201113145231.13154-1-getelson@nvidia.com> References: <20201113145231.13154-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL105.nvidia.com (172.20.187.12) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605279184; bh=KZBYoCEO1WTuM4IAaZjyciZNrT8PErCpfazvtXMl6+g=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=Sq1ntmzl5vAh3lmmXP8252qjh6NxUL6fVgsWueVi1o/L7iUxmdVI4mxqlziVV5N6i s/ea26EttWqvdxvxS544Gp5wNyAjy6U7xSnmMj+JNkTQ1Y8XcUWq4eiXv+72yVAncG eDFrEhEHgQl5CMCoNHmDfi4AbHjsHZxPDEblEsMAn8K5zIStPyctHAKSzXRAaZ3MwG rfRj+YiW4gBaon5yUW9ZtYJpsX+0IDg1OKOn9XBZEP6IkUT1q8fyon37zPxFscjna5 Y2dHkP0dzb6GTw7+puPVM6J2tvw/Ykag/738HZLGTRiKPcOAZQ1f94zldpnb0rZgaK ZGGmyg/V/02zw== Subject: [dpdk-dev] [PATCH v2 4/5] net/mlx5: fix tunnel offload callback names X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release callback names to match tunnel offload names pattern. Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload") Signed-off-by: Gregory Etelson Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_flow.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 185b4ba51a..358a5f4e72 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -715,9 +715,9 @@ tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) } static int -mlx5_flow_item_release(struct rte_eth_dev *dev, - struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) { struct tunnel_db_element_release_ctx ctx = { .items = pmd_items, @@ -734,9 +734,10 @@ mlx5_flow_item_release(struct rte_eth_dev *dev, } static int -mlx5_flow_action_release(struct rte_eth_dev *dev, - struct rte_flow_action *pmd_actions, - uint32_t num_actions, struct rte_flow_error *err) +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err) { struct tunnel_db_element_release_ctx ctx = { .items = NULL, @@ -800,8 +801,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .shared_action_query = mlx5_shared_action_query, .tunnel_decap_set = mlx5_flow_tunnel_decap_set, .tunnel_match = mlx5_flow_tunnel_match, - .tunnel_action_decap_release = mlx5_flow_action_release, - .tunnel_item_release = mlx5_flow_item_release, + .tunnel_action_decap_release = mlx5_flow_tunnel_action_release, + .tunnel_item_release = mlx5_flow_tunnel_item_release, .get_restore_info = mlx5_flow_tunnel_get_restore_info, }; From patchwork Fri Nov 13 14:52:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84167 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 330E1A09E0; Fri, 13 Nov 2020 15:55:05 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4F1DDC908; Fri, 13 Nov 2020 15:53:02 +0100 (CET) Received: from hqnvemgate25.nvidia.com (hqnvemgate25.nvidia.com [216.228.121.64]) by dpdk.org (Postfix) with ESMTP id 0DD2FC904 for ; Fri, 13 Nov 2020 15:52:59 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate25.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Fri, 13 Nov 2020 06:52:51 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Fri, 13 Nov 2020 14:52:56 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler Date: Fri, 13 Nov 2020 16:52:30 +0200 Message-ID: <20201113145231.13154-6-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201113145231.13154-1-getelson@nvidia.com> References: <20201113145231.13154-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL105.nvidia.com (172.20.187.12) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605279171; bh=ztnVe0mM59TOxRYT9lvhnFu4urKb4QRc9luOv/cF4E4=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=in/8iizxJ3JTFoHhWrpqjdbsUkhm9ihV+wOcnyKj20g41tWyBP9GfkXZS4Bmv479d 7TrhHNpxwLyuHs404+o+fz86uL6zvwH3i7Qmjq/GnqPm8aYtVG1wBqcnHkTR3qoAFP ZyUXugvAo3LMBbBcXxLBq2jgRCbvFv02UDVTS1tX7f8fjy8EixqMrP+1GDuSJfaXUu BHkBcWq1ubO4b+DKAcjpyCzSw5J24OGleDLufcWb83YN6ry37Cu9YtXoL9rIOA78O/ 5trBUdCezs3qprb1uc43lQ9vduLu8ZIRxYbjDccB+mybTSa8PqTBSvLRge058SvdRR oGWfYp3FAza9w== Subject: [dpdk-dev] [PATCH v2 5/5] net/mlx5: fix non-dv compilation errors X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ipools used for tunnel offload are restricted to dv enabled code only since 1d1248d452ff, while the rest of the tunnel offload API did not have compilation limitations. As the results, PMD compilation failed on non-dv setups. Current patch groups tunnel offload code in dv enabled code area and provides stubs for calls that have to be visible in non-dv environment. These stabs will notify caller that tunnel offload functionality is not supported on that setup. Fixes: 1d1248d452ff ("net/mlx5: fix offloaded tunnel allocation") Acked-by: Viacheslav Ovsiienko Signed-off-by: Gregory Etelson --- drivers/net/mlx5/linux/mlx5_os.c | 16 +- drivers/net/mlx5/mlx5_flow.c | 1104 +++++++++++++++++------------- drivers/net/mlx5/mlx5_flow.h | 17 +- 3 files changed, 638 insertions(+), 499 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index c78d56fae3..1aee481bd7 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -301,6 +301,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) goto error; } sh->encaps_decaps->ctx = sh; + if (!sh->tunnel_hub) + err = mlx5_alloc_tunnel_hub(sh); + if (err) { + DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); + goto error; + } #endif #ifdef HAVE_MLX5DV_DR void *domain; @@ -335,12 +341,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); } #endif - if (!sh->tunnel_hub) - err = mlx5_alloc_tunnel_hub(sh); - if (err) { - DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); - goto error; - } if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); @@ -389,10 +389,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) mlx5_hlist_destroy(sh->tag_table); sh->tag_table = NULL; } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (sh->tunnel_hub) { mlx5_release_tunnel_hub(sh, priv->dev_port); sh->tunnel_hub = NULL; } +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ mlx5_free_table_hash_list(priv); return err; } @@ -451,10 +453,12 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv) mlx5_hlist_destroy(sh->tag_table); sh->tag_table = NULL; } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT if (sh->tunnel_hub) { mlx5_release_tunnel_hub(sh, priv->dev_port); sh->tunnel_hub = NULL; } +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ mlx5_cache_list_destroy(&sh->port_id_action_list); mlx5_cache_list_destroy(&sh->push_vlan_action_list); mlx5_free_table_hash_list(priv); diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 358a5f4e72..11bc8e9dde 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -33,25 +33,34 @@ #include "mlx5_common_os.h" #include "rte_pmd_mlx5.h" -static bool -mlx5_access_tunnel_offload_db - (struct rte_eth_dev *dev, - bool (*match)(struct rte_eth_dev *, - struct mlx5_flow_tunnel *, const void *), - void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), - void (*miss)(struct rte_eth_dev *, void *), - void *ctx, bool lock_op); +struct tunnel_default_miss_ctx { + uint16_t *queue; + __extension__ + union { + struct rte_flow_action_rss action_rss; + struct rte_flow_action_queue miss_queue; + struct rte_flow_action_jump miss_jump; + uint8_t raw[0]; + }; +}; + +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error); static struct mlx5_flow_tunnel * mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel); -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark); -static int -mlx5_get_flow_tunnel(struct rte_eth_dev *dev, - const struct rte_flow_tunnel *app_tunnel, - struct mlx5_flow_tunnel **tunnel); - +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error); /** Device flow drivers. */ extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; @@ -588,203 +597,32 @@ static int mlx5_shared_action_query const struct rte_flow_shared_action *action, void *data, struct rte_flow_error *error); -static inline bool -mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, - struct rte_flow_tunnel *tunnel, - const char *err_msg) -{ - err_msg = NULL; - if (!is_tunnel_offload_active(dev)) { - err_msg = "tunnel offload was not activated"; - goto out; - } else if (!tunnel) { - err_msg = "no application tunnel"; - goto out; - } - - switch (tunnel->type) { - default: - err_msg = "unsupported tunnel type"; - goto out; - case RTE_FLOW_ITEM_TYPE_VXLAN: - break; - } - -out: - return !err_msg; -} - - static int mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, - struct rte_flow_tunnel *app_tunnel, - struct rte_flow_action **actions, - uint32_t *num_of_actions, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, - "failed to initialize pmd tunnel"); - } - *actions = &tunnel->action; - *num_of_actions = 1; - return 0; -} - + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_action **actions, + uint32_t *num_of_actions, + struct rte_flow_error *error); static int mlx5_flow_tunnel_match(struct rte_eth_dev *dev, struct rte_flow_tunnel *app_tunnel, struct rte_flow_item **items, uint32_t *num_of_items, - struct rte_flow_error *error) -{ - int ret; - struct mlx5_flow_tunnel *tunnel; - const char *err_msg = NULL; - bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - - if (!verdict) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - err_msg); - ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); - if (ret < 0) { - return rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "failed to initialize pmd tunnel"); - } - *items = &tunnel->item; - *num_of_items = 1; - return 0; -} - -struct tunnel_db_element_release_ctx { - struct rte_flow_item *items; - struct rte_flow_action *actions; - uint32_t num_elements; - struct rte_flow_error *error; - int ret; -}; - -static bool -tunnel_element_release_match(struct rte_eth_dev *dev, - struct mlx5_flow_tunnel *tunnel, const void *x) -{ - const struct tunnel_db_element_release_ctx *ctx = x; - - RTE_SET_USED(dev); - if (ctx->num_elements != 1) - return false; - else if (ctx->items) - return ctx->items == &tunnel->item; - else if (ctx->actions) - return ctx->actions == &tunnel->action; - - return false; -} - -static void -tunnel_element_release_hit(struct rte_eth_dev *dev, - struct mlx5_flow_tunnel *tunnel, void *x) -{ - struct tunnel_db_element_release_ctx *ctx = x; - ctx->ret = 0; - if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tunnel); -} - -static void -tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) -{ - struct tunnel_db_element_release_ctx *ctx = x; - RTE_SET_USED(dev); - ctx->ret = rte_flow_error_set(ctx->error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); -} - + struct rte_flow_error *error); static int mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) -{ - struct tunnel_db_element_release_ctx ctx = { - .items = pmd_items, - .actions = NULL, - .num_elements = num_items, - .error = err, - }; - - mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, - tunnel_element_release_hit, - tunnel_element_release_miss, &ctx, false); - - return ctx.ret; -} - + uint32_t num_items, struct rte_flow_error *err); static int mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, struct rte_flow_action *pmd_actions, uint32_t num_actions, - struct rte_flow_error *err) -{ - struct tunnel_db_element_release_ctx ctx = { - .items = NULL, - .actions = pmd_actions, - .num_elements = num_actions, - .error = err, - }; - - mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, - tunnel_element_release_hit, - tunnel_element_release_miss, &ctx, false); - - return ctx.ret; -} - + struct rte_flow_error *err); static int mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m, struct rte_flow_restore_info *info, - struct rte_flow_error *err) -{ - uint64_t ol_flags = m->ol_flags; - const struct mlx5_flow_tbl_data_entry *tble; - const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; - - if ((ol_flags & mask) != mask) - goto err; - tble = tunnel_mark_decode(dev, m->hash.fdir.hi); - if (!tble) { - DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", - dev->data->port_id, m->hash.fdir.hi); - goto err; - } - MLX5_ASSERT(tble->tunnel); - memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); - info->group_id = tble->group_id; - info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | - RTE_FLOW_RESTORE_INFO_GROUP_ID | - RTE_FLOW_RESTORE_INFO_ENCAPSULATED; - - return 0; - -err: - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "failed to get restore info"); -} + struct rte_flow_error *err); static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, @@ -4206,142 +4044,6 @@ flow_hairpin_split(struct rte_eth_dev *dev, return 0; } -__extension__ -union tunnel_offload_mark { - uint32_t val; - struct { - uint32_t app_reserve:8; - uint32_t table_id:15; - uint32_t transfer:1; - uint32_t _unused_:8; - }; -}; - -struct tunnel_default_miss_ctx { - uint16_t *queue; - __extension__ - union { - struct rte_flow_action_rss action_rss; - struct rte_flow_action_queue miss_queue; - struct rte_flow_action_jump miss_jump; - uint8_t raw[0]; - }; -}; - -static int -flow_tunnel_add_default_miss(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_attr *attr, - const struct rte_flow_action *app_actions, - uint32_t flow_idx, - struct tunnel_default_miss_ctx *ctx, - struct rte_flow_error *error) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow *dev_flow; - struct rte_flow_attr miss_attr = *attr; - const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; - const struct rte_flow_item miss_items[2] = { - { - .type = RTE_FLOW_ITEM_TYPE_ETH, - .spec = NULL, - .last = NULL, - .mask = NULL - }, - { - .type = RTE_FLOW_ITEM_TYPE_END, - .spec = NULL, - .last = NULL, - .mask = NULL - } - }; - union tunnel_offload_mark mark_id; - struct rte_flow_action_mark miss_mark; - struct rte_flow_action miss_actions[3] = { - [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, - [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } - }; - const struct rte_flow_action_jump *jump_data; - uint32_t i, flow_table = 0; /* prevent compilation warning */ - struct flow_grp_info grp_info = { - .external = 1, - .transfer = attr->transfer, - .fdb_def_rule = !!priv->fdb_def_rule, - .std_tbl_fix = 0, - }; - int ret; - - if (!attr->transfer) { - uint32_t q_size; - - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; - q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); - ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, - 0, SOCKET_ID_ANY); - if (!ctx->queue) - return rte_flow_error_set - (error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid default miss RSS"); - ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, - ctx->action_rss.level = 0, - ctx->action_rss.types = priv->rss_conf.rss_hf, - ctx->action_rss.key_len = priv->rss_conf.rss_key_len, - ctx->action_rss.queue_num = priv->reta_idx_n, - ctx->action_rss.key = priv->rss_conf.rss_key, - ctx->action_rss.queue = ctx->queue; - if (!priv->reta_idx_n || !priv->rxqs_n) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid port configuration"); - if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) - ctx->action_rss.types = 0; - for (i = 0; i != priv->reta_idx_n; ++i) - ctx->queue[i] = (*priv->reta_idx)[i]; - } else { - miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; - ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; - } - miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; - for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); - jump_data = app_actions->conf; - miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; - miss_attr.group = jump_data->group; - ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, - &flow_table, grp_info, error); - if (ret) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "invalid tunnel id"); - mark_id.app_reserve = 0; - mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); - mark_id.transfer = !!attr->transfer; - mark_id._unused_ = 0; - miss_mark.id = mark_id.val; - dev_flow = flow_drv_prepare(dev, flow, &miss_attr, - miss_items, miss_actions, flow_idx, error); - if (!dev_flow) - return -rte_errno; - dev_flow->flow = flow; - dev_flow->external = true; - dev_flow->tunnel = tunnel; - /* Subflow object was created, we must include one in the list. */ - SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, - dev_flow->handle, next); - DRV_LOG(DEBUG, - "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", - dev->data->port_id, tunnel->app_tunnel.type, - tunnel->tunnel_id, miss_attr.priority, miss_attr.group); - ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, - miss_actions, error); - if (!ret) - ret = flow_mreg_update_copy_table(dev, flow, miss_actions, - error); - - return ret; -} - /** * The last stage of splitting chain, just creates the subflow * without any modification. @@ -5672,7 +5374,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, error); if (ret < 0) goto error; - if (is_flow_tunnel_steer_rule(dev, attr, + if (is_tunnel_offload_active(dev) && + is_flow_tunnel_steer_rule(dev, attr, buf->entry[i].pattern, p_actions_rx)) { ret = flow_tunnel_add_default_miss(dev, flow, attr, @@ -5743,12 +5446,15 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list, wks->flow_idx = wks->flow_nested_idx; if (wks->flow_nested_idx) wks->flow_nested_idx = 0; - tunnel = flow_tunnel_from_rule(dev, attr, items, actions); - if (tunnel) { - flow->tunnel = 1; - flow->tunnel_id = tunnel->tunnel_id; - __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED); - mlx5_free(default_miss_ctx.queue); + if (is_tunnel_offload_active(dev)) { + tunnel = flow_tunnel_from_rule(dev, attr, items, actions); + if (tunnel) { + flow->tunnel = 1; + flow->tunnel_id = tunnel->tunnel_id; + __atomic_add_fetch(&tunnel->refctn, 1, + __ATOMIC_RELAXED); + mlx5_free(default_miss_ctx.queue); + } } return idx; error: @@ -6945,107 +6651,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh, sh->cmng.pending_queries--; } -static const struct mlx5_flow_tbl_data_entry * -tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_hlist_entry *he; - union tunnel_offload_mark mbits = { .val = mark }; - union mlx5_flow_tbl_key table_key = { - { - .table_id = tunnel_id_to_flow_tbl(mbits.table_id), - .dummy = 0, - .domain = !!mbits.transfer, - .direction = 0, - } - }; - he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); - return he ? - container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; -} - -static void -mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, - struct mlx5_hlist_entry *entry) -{ - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); - - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], - tunnel_flow_tbl_to_id(tte->flow_table)); - mlx5_free(tte); -} - -static struct mlx5_hlist_entry * -mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, - uint64_t key __rte_unused, - void *ctx __rte_unused) -{ - struct mlx5_dev_ctx_shared *sh = list->ctx; - struct tunnel_tbl_entry *tte; - - tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, - sizeof(*tte), 0, - SOCKET_ID_ANY); - if (!tte) - goto err; - mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], - &tte->flow_table); - if (tte->flow_table >= MLX5_MAX_TABLES) { - DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", - tte->flow_table); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], - tte->flow_table); - goto err; - } else if (!tte->flow_table) { - goto err; - } - tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); - return &tte->hash; -err: - if (tte) - mlx5_free(tte); - return NULL; -} - -static uint32_t -tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, - const struct mlx5_flow_tunnel *tunnel, - uint32_t group, uint32_t *table, - struct rte_flow_error *error) -{ - struct mlx5_hlist_entry *he; - struct tunnel_tbl_entry *tte; - union tunnel_tbl_key key = { - .tunnel_id = tunnel ? tunnel->tunnel_id : 0, - .group = group - }; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_hlist *group_hash; - - group_hash = tunnel ? tunnel->groups : thub->groups; - he = mlx5_hlist_lookup(group_hash, key.val, NULL); - if (!he) { - DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id", - dev->data->port_id, key.tunnel_id, group); - he = mlx5_hlist_register(group_hash, key.val, NULL); - } else { - DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id", - dev->data->port_id, key.tunnel_id, group); - } - if (!he) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "tunnel group index not supported"); - tte = container_of(he, typeof(*tte), hash); - *table = tte->flow_table; - DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u", - dev->data->port_id, key.tunnel_id, group, *table); - return 0; -} - static int flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table, struct flow_grp_info grp_info, struct rte_flow_error *error) @@ -7506,23 +7111,38 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev) return ret; } -static void -mlx5_flow_tunnel_free(struct rte_eth_dev *dev, - struct mlx5_flow_tunnel *tunnel) +#ifndef HAVE_MLX5DV_DR +#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) +#else +#define MLX5_DOMAIN_SYNC_FLOW \ + (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) +#endif + +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) { - /* no tunnel hub spinlock protection */ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_indexed_pool *ipool; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct mlx5_flow_driver_ops *fops; + int ret; + struct rte_flow_attr attr = { .transfer = 0 }; - DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", - dev->data->port_id, tunnel->tunnel_id); - rte_spinlock_lock(&thub->sl); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&thub->sl); - mlx5_hlist_destroy(tunnel->groups); - ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; - mlx5_ipool_free(ipool, tunnel->tunnel_id); + fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); + if (ret > 0) + ret = -ret; + return ret; +} + +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +static inline uint32_t +tunnel_id_to_flow_tbl(uint32_t id) +{ + return id | (1u << 16); +} + +static inline uint32_t +tunnel_flow_tbl_to_id(uint32_t flow_tbl) +{ + return flow_tbl & ~(1u << 16); } static bool @@ -7532,19 +7152,235 @@ mlx5_access_tunnel_offload_db struct mlx5_flow_tunnel *, const void *), void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), void (*miss)(struct rte_eth_dev *, void *), - void *ctx, bool lock_op) -{ - bool verdict = false; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tunnel; + void *ctx, bool lock_op); +static const struct mlx5_flow_tbl_data_entry * +tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark); +static int +mlx5_get_flow_tunnel(struct rte_eth_dev *dev, + const struct rte_flow_tunnel *app_tunnel, + struct mlx5_flow_tunnel **tunnel); - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tunnel, &thub->tunnels, chain) { - verdict = match(dev, tunnel, (const void *)ctx); - if (verdict) - break; - } - if (!lock_op) +__extension__ +union tunnel_offload_mark { + uint32_t val; + struct { + uint32_t app_reserve:8; + uint32_t table_id:15; + uint32_t transfer:1; + uint32_t _unused_:8; + }; +}; + +static int +flow_tunnel_add_default_miss(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_action *app_actions, + uint32_t flow_idx, + struct tunnel_default_miss_ctx *ctx, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow *dev_flow; + struct rte_flow_attr miss_attr = *attr; + const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf; + const struct rte_flow_item miss_items[2] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = NULL, + .last = NULL, + .mask = NULL + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + .spec = NULL, + .last = NULL, + .mask = NULL + } + }; + union tunnel_offload_mark mark_id; + struct rte_flow_action_mark miss_mark; + struct rte_flow_action miss_actions[3] = { + [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark }, + [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL } + }; + const struct rte_flow_action_jump *jump_data; + uint32_t i, flow_table = 0; /* prevent compilation warning */ + struct flow_grp_info grp_info = { + .external = 1, + .transfer = attr->transfer, + .fdb_def_rule = !!priv->fdb_def_rule, + .std_tbl_fix = 0, + }; + int ret; + + if (!attr->transfer) { + uint32_t q_size; + + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS; + q_size = priv->reta_idx_n * sizeof(ctx->queue[0]); + ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size, + 0, SOCKET_ID_ANY); + if (!ctx->queue) + return rte_flow_error_set + (error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid default miss RSS"); + ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT, + ctx->action_rss.level = 0, + ctx->action_rss.types = priv->rss_conf.rss_hf, + ctx->action_rss.key_len = priv->rss_conf.rss_key_len, + ctx->action_rss.queue_num = priv->reta_idx_n, + ctx->action_rss.key = priv->rss_conf.rss_key, + ctx->action_rss.queue = ctx->queue; + if (!priv->reta_idx_n || !priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid port configuration"); + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + ctx->action_rss.types = 0; + for (i = 0; i != priv->reta_idx_n; ++i) + ctx->queue[i] = (*priv->reta_idx)[i]; + } else { + miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP; + ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP; + } + miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw; + for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++); + jump_data = app_actions->conf; + miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY; + miss_attr.group = jump_data->group; + ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group, + &flow_table, grp_info, error); + if (ret) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "invalid tunnel id"); + mark_id.app_reserve = 0; + mark_id.table_id = tunnel_flow_tbl_to_id(flow_table); + mark_id.transfer = !!attr->transfer; + mark_id._unused_ = 0; + miss_mark.id = mark_id.val; + dev_flow = flow_drv_prepare(dev, flow, &miss_attr, + miss_items, miss_actions, flow_idx, error); + if (!dev_flow) + return -rte_errno; + dev_flow->flow = flow; + dev_flow->external = true; + dev_flow->tunnel = tunnel; + /* Subflow object was created, we must include one in the list. */ + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + DRV_LOG(DEBUG, + "port %u tunnel type=%d id=%u miss rule priority=%u group=%u", + dev->data->port_id, tunnel->app_tunnel.type, + tunnel->tunnel_id, miss_attr.priority, miss_attr.group); + ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items, + miss_actions, error); + if (!ret) + ret = flow_mreg_update_copy_table(dev, flow, miss_actions, + error); + + return ret; +} + +static const struct mlx5_flow_tbl_data_entry * +tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_hlist_entry *he; + union tunnel_offload_mark mbits = { .val = mark }; + union mlx5_flow_tbl_key table_key = { + { + .table_id = tunnel_id_to_flow_tbl(mbits.table_id), + .dummy = 0, + .domain = !!mbits.transfer, + .direction = 0, + } + }; + he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL); + return he ? + container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL; +} + +static uint32_t +tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + const struct mlx5_flow_tunnel *tunnel, + uint32_t group, uint32_t *table, + struct rte_flow_error *error) +{ + struct mlx5_hlist_entry *he; + struct tunnel_tbl_entry *tte; + union tunnel_tbl_key key = { + .tunnel_id = tunnel ? tunnel->tunnel_id : 0, + .group = group + }; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_hlist *group_hash; + + group_hash = tunnel ? tunnel->groups : thub->groups; + he = mlx5_hlist_lookup(group_hash, key.val, NULL); + if (!he) { + DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id", + dev->data->port_id, key.tunnel_id, group); + he = mlx5_hlist_register(group_hash, key.val, NULL); + } else { + DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id", + dev->data->port_id, key.tunnel_id, group); + } + if (!he) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "tunnel group index not supported"); + tte = container_of(he, typeof(*tte), hash); + *table = tte->flow_table; + DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u", + dev->data->port_id, key.tunnel_id, group, *table); + return 0; +} + +static void +mlx5_flow_tunnel_free(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel) +{ + /* no tunnel hub spinlock protection */ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_indexed_pool *ipool; + + DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", + dev->data->port_id, tunnel->tunnel_id); + rte_spinlock_lock(&thub->sl); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&thub->sl); + mlx5_hlist_destroy(tunnel->groups); + ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; + mlx5_ipool_free(ipool, tunnel->tunnel_id); +} + +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) +{ + bool verdict = false; + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict = match(dev, tunnel, (const void *)ctx); + if (verdict) + break; + } + if (!lock_op) rte_spinlock_unlock(&thub->sl); if (verdict && hit) hit(dev, tunnel, ctx); @@ -7593,6 +7429,50 @@ mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) return ctx.tunnel; } +static void +mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list, + struct mlx5_hlist_entry *entry) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash); + + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], + tunnel_flow_tbl_to_id(tte->flow_table)); + mlx5_free(tte); +} + +static struct mlx5_hlist_entry * +mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, + uint64_t key __rte_unused, + void *ctx __rte_unused) +{ + struct mlx5_dev_ctx_shared *sh = list->ctx; + struct tunnel_tbl_entry *tte; + + tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, + sizeof(*tte), 0, + SOCKET_ID_ANY); + if (!tte) + goto err; + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], + &tte->flow_table); + if (tte->flow_table >= MLX5_MAX_TABLES) { + DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", + tte->flow_table); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], + tte->flow_table); + goto err; + } else if (!tte->flow_table) { + goto err; + } + tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table); + return &tte->hash; +err: + if (tte) + mlx5_free(tte); + return NULL; +} + static struct mlx5_flow_tunnel * mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel) @@ -7678,7 +7558,6 @@ static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); } - static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, @@ -7738,23 +7617,286 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) return err; } -#ifndef HAVE_MLX5DV_DR -#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1)) -#else -#define MLX5_DOMAIN_SYNC_FLOW \ - (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW) -#endif +static inline bool +mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + const char *err_msg) +{ + err_msg = NULL; + if (!is_tunnel_offload_active(dev)) { + err_msg = "tunnel offload was not activated"; + goto out; + } else if (!tunnel) { + err_msg = "no application tunnel"; + goto out; + } -int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) + switch (tunnel->type) { + default: + err_msg = "unsupported tunnel type"; + goto out; + case RTE_FLOW_ITEM_TYPE_VXLAN: + break; + } + +out: + return !err_msg; +} + +static int +mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_action **actions, + uint32_t *num_of_actions, + struct rte_flow_error *error) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - const struct mlx5_flow_driver_ops *fops; int ret; - struct rte_flow_attr attr = { .transfer = 0 }; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); - fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); - ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW); - if (ret > 0) - ret = -ret; - return ret; + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "failed to initialize pmd tunnel"); + } + *actions = &tunnel->action; + *num_of_actions = 1; + return 0; +} + +static int +mlx5_flow_tunnel_match(struct rte_eth_dev *dev, + struct rte_flow_tunnel *app_tunnel, + struct rte_flow_item **items, + uint32_t *num_of_items, + struct rte_flow_error *error) +{ + int ret; + struct mlx5_flow_tunnel *tunnel; + const char *err_msg = NULL; + bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg); + + if (!verdict) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + err_msg); + ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel); + if (ret < 0) { + return rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "failed to initialize pmd tunnel"); + } + *items = &tunnel->item; + *num_of_items = 1; + return 0; +} + +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + if (ctx->num_elements != 1) + return false; + else if (ctx->items) + return ctx->items == &tunnel->item; + else if (ctx->actions) + return ctx->actions == &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + + ctx->ret = 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + ctx->ret = rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + +static int +mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = pmd_items, + .actions = NULL, + .num_elements = num_items, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; +} + +static int +mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = NULL, + .actions = pmd_actions, + .num_elements = num_actions, + .error = err, + }; + + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; +} + +static int +mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, + struct rte_mbuf *m, + struct rte_flow_restore_info *info, + struct rte_flow_error *err) +{ + uint64_t ol_flags = m->ol_flags; + const struct mlx5_flow_tbl_data_entry *tble; + const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID; + + if ((ol_flags & mask) != mask) + goto err; + tble = tunnel_mark_decode(dev, m->hash.fdir.hi); + if (!tble) { + DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x", + dev->data->port_id, m->hash.fdir.hi); + goto err; + } + MLX5_ASSERT(tble->tunnel); + memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel)); + info->group_id = tble->group_id; + info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL | + RTE_FLOW_RESTORE_INFO_GROUP_ID | + RTE_FLOW_RESTORE_INFO_ENCAPSULATED; + + return 0; + +err: + return rte_flow_error_set(err, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to get restore info"); +} +#else /* HAVE_IBV_FLOW_DV_SUPPORT */ +static int +mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_action **actions, + __rte_unused uint32_t *num_of_actions, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *app_tunnel, + __rte_unused struct rte_flow_item **items, + __rte_unused uint32_t *num_of_items, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_item *pmd_items, + __rte_unused uint32_t num_items, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_action *pmd_action, + __rte_unused uint32_t num_actions, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_mbuf *m, + __rte_unused struct rte_flow_restore_info *i, + __rte_unused struct rte_flow_error *err) +{ + return -ENOTSUP; +} + +static int +flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow *flow, + __rte_unused const struct rte_flow_attr *attr, + __rte_unused const struct rte_flow_action *actions, + __rte_unused uint32_t flow_idx, + __rte_unused struct tunnel_default_miss_ctx *ctx, + __rte_unused struct rte_flow_error *error) +{ + return -ENOTSUP; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint32_t id) +{ + return NULL; +} + +static void +mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct mlx5_flow_tunnel *tunnel) +{ +} + +static uint32_t +tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev, + __rte_unused const struct mlx5_flow_tunnel *t, + __rte_unused uint32_t group, + __rte_unused uint32_t *table, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload requires DV support"); } +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index bdf2c50090..672c27ecb9 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -965,18 +965,6 @@ struct tunnel_tbl_entry { uint32_t flow_table; }; -static inline uint32_t -tunnel_id_to_flow_tbl(uint32_t id) -{ - return id | (1u << 16); -} - -static inline uint32_t -tunnel_flow_tbl_to_id(uint32_t flow_tbl) -{ - return flow_tbl & ~(1u << 16); -} - union tunnel_tbl_key { uint64_t val; struct { @@ -995,8 +983,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev) static inline bool is_tunnel_offload_active(struct rte_eth_dev *dev) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_priv *priv = dev->data->dev_private; return !!priv->config.dv_miss_info; +#else + RTE_SET_USED(dev); + return false; +#endif } static inline bool