From patchwork Mon Nov 16 09:13:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gregory Etelson X-Patchwork-Id: 84217 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 73669A04DB; Mon, 16 Nov 2020 10:15:29 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1ACBCC926; Mon, 16 Nov 2020 10:13:57 +0100 (CET) Received: from hqnvemgate24.nvidia.com (hqnvemgate24.nvidia.com [216.228.121.143]) by dpdk.org (Postfix) with ESMTP id C4CE7C8B0 for ; Mon, 16 Nov 2020 10:13:54 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate24.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Mon, 16 Nov 2020 01:14:02 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Mon, 16 Nov 2020 09:13:50 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler , Suanming Mou Date: Mon, 16 Nov 2020 11:13:25 +0200 Message-ID: <20201116091326.10511-6-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201116091326.10511-1-getelson@nvidia.com> References: <20201111071417.21177-1-getelson@nvidia.com> <20201116091326.10511-1-getelson@nvidia.com> MIME-Version: 1.0 X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL101.nvidia.com (172.20.187.10) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605518042; bh=GwFE+jIYYmW5GRVzY5FsevJAFFVbEHy9h+U+BqrDFHM=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=rRVnmJpj+9MPbp38JPzC7CTkvSVlMdG6iCHcCfo2/Bab2x5IRbS9RtHFuyQwTCGsS Qtkc4MDhqxuZSskgQAoLO+lQLH16x7Hta8FUclmGS7h/sLJC7lMHZaE/SEB1rLsngz VezNgwBgfnlqs8g6/hYU1Wx40/gsXDBkeNztxzxImlfcINwrvJmtSz5uyhQFGODTGf X+mlQVnBtVoSNgRb53EuTD+8CbTDXqINTh2bjb6qbPWErictaDijUBMAnWRUFK1qA1 ddhiR1OkiNjjLk19NqC1opyBPwNp22QJVI7XnsBv4v+qJDrEefrruH4b2cHPqyPN6+ CFNH4Z6weGzgw== Subject: [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch was removing active tunnel offload objects from a tunnels db list without checking its reference counter value. That action was leading to a PMD crash. Current patch isolates tunnels db list into a separate API. That API manages MT protection of the tunnel offload db. Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe") Signed-off-by: Gregory Etelson Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++---------- drivers/net/mlx5/mlx5_flow.h | 6 +- 2 files changed, 195 insertions(+), 77 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index a9ece25e65..6efe799a2d 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -5639,11 +5639,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7264,6 +7261,15 @@ union tunnel_offload_mark { }; }; +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); + static int flow_tunnel_add_default_miss(struct rte_eth_dev *dev, struct rte_flow *flow, @@ -7441,18 +7447,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev, mlx5_ipool_free(ipool, tunnel->tunnel_id); } -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) { + bool verdict = false; struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct mlx5_flow_tunnel *tunnel; - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id == id) + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict = match(dev, tunnel, (const void *)ctx); + if (verdict) break; } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); - return tun; + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx = x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id == ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->tunnel = tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx = { + .tunnel_id = id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; } static struct mlx5_flow_tunnel * @@ -7500,38 +7560,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, return tunnel; } +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx = x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel = tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx = x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn = 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, struct mlx5_flow_tunnel **tunnel) { - int ret; - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel = tun; - ret = 0; - break; - } - } - if (!tun) { - tun = mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel = tun; - } else { - ret = -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); + struct tunnel_db_get_tunnel_ctx ctx = { + .app_tunnel = app_tunnel, + }; - return ret; + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel = ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; } void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id) @@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, *num_of_items = 1; return 0; } + +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx = x; + + RTE_SET_USED(dev); + if (ctx->num_elements != 1) + return false; + else if (ctx->items) + return ctx->items == &tunnel->item; + else if (ctx->actions) + return ctx->actions == &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + ctx->ret = 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx = x; + RTE_SET_USED(dev); + ctx->ret = rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + static int mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, - struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = pmd_items, + .actions = NULL, + .num_elements = num_items, + .error = err, + }; - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; } static int mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, - struct rte_flow_action *pmd_actions, - uint32_t num_actions, - struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct rte_flow_action *pmd_actions, + uint32_t num_actions, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx = { + .items = NULL, + .actions = pmd_actions, + .num_elements = num_actions, + .error = err, + }; - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions != 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); - return 0; + return ctx.ret; } static int diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index c33c0fee7c..f64384217f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -950,8 +950,12 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { + /* Tunnels list + * Access to the list MUST be MT protected + */ LIST_HEAD(, mlx5_flow_tunnel) tunnels; - rte_spinlock_t sl; /* Tunnel list spinlock. */ + /* protect access to the tunnels list */ + rte_spinlock_t sl; struct mlx5_hlist *groups; /** non tunnel groups */ };