get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/84226/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 84226,
    "url": "https://patches.dpdk.org/api/patches/84226/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20201116094905.12873-6-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201116094905.12873-6-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201116094905.12873-6-getelson@nvidia.com",
    "date": "2020-11-16T09:49:04",
    "name": "[v4,5/6] net/mlx5: fix tunnel offload hub multi-thread protection",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a1fb64e958c27e59856bd76c07fe5a9df787709a",
    "submitter": {
        "id": 1882,
        "url": "https://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20201116094905.12873-6-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13902,
            "url": "https://patches.dpdk.org/api/series/13902/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13902",
            "date": "2020-11-16T09:48:59",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/13902/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/84226/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/84226/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C56FAA04DB;\n\tMon, 16 Nov 2020 10:51:01 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1D794C92A;\n\tMon, 16 Nov 2020 10:49:39 +0100 (CET)",
            "from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com\n [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id D5E59C91C\n for <dev@dpdk.org>; Mon, 16 Nov 2020 10:49:34 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fb24b310000>; Mon, 16 Nov 2020 01:49:37 -0800",
            "from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13)\n with Microsoft SMTP Server (TLS) id 15.0.1473.3;\n Mon, 16 Nov 2020 09:49:30 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Shahaf Shuler\n <shahafs@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>",
        "Date": "Mon, 16 Nov 2020 11:49:04 +0200",
        "Message-ID": "<20201116094905.12873-6-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201116094905.12873-1-getelson@nvidia.com>",
        "References": "<20201111071417.21177-1-getelson@nvidia.com>\n <20201116094905.12873-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.124.1.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605520177; bh=NnLiLYWnADjKAk0/MI75BmCafdMDueW+/uffl4orz5E=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=GEyBuRI4Kr3Bg15G1+QxDhUs2khWxmuaJ4PiP5KvcL5gvTJ0+Vy/eTFO6LgU7eD4q\n ebPqlpZEcHG02hU4se2Fez3DUfASY6M8FAch46N38KmddzLUm6uTGQsnGG4nOz0ovz\n CRS4vEpojuD8PQlrS/nPyzpIu8Lkwg1CuawExVN4mUwOVfZu7bynyrHb9q2jCgVXaa\n VBBwKyUyEWIX5mPEhFstH43aSOLuWpA1lC+WrQp19hS8AVUNVbjnBQtxMgG0dNTnrU\n NNtqRd3D6j8qyRJpakU8hLZRqTT2g2Nl4qfWKPGzdrw0HsyPbBsHU8wg/lUFVmbEUy\n xShSiJWVTC3Gg==",
        "Subject": "[dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub\n\tmulti-thread protection",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The original patch was removing active tunnel offload objects from a\ntunnels db list without checking its reference counter value.\nThat action was leading to a PMD crash.\n\nCurrent patch isolates tunnels db list into a separate API. That API\nmanages MT protection of the tunnel offload db.\n\nFixes: e4f5880 (\"net/mlx5: make tunnel hub list thread safe\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++----------\n drivers/net/mlx5/mlx5_flow.h |   6 +-\n 2 files changed, 195 insertions(+), 77 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex cd94a73e53..eea185ba82 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -5639,11 +5639,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,\n \tif (flow->tunnel) {\n \t\tstruct mlx5_flow_tunnel *tunnel;\n \n-\t\trte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);\n \t\ttunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);\n \t\tRTE_VERIFY(tunnel);\n-\t\tLIST_REMOVE(tunnel, chain);\n-\t\trte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);\n \t\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n \t\t\tmlx5_flow_tunnel_free(dev, tunnel);\n \t}\n@@ -7220,6 +7217,15 @@ union tunnel_offload_mark {\n \t};\n };\n \n+static bool\n+mlx5_access_tunnel_offload_db\n+\t(struct rte_eth_dev *dev,\n+\t bool (*match)(struct rte_eth_dev *,\n+\t\t       struct mlx5_flow_tunnel *, const void *),\n+\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n+\t void (*miss)(struct rte_eth_dev *, void *),\n+\t void *ctx, bool lock_op);\n+\n static int\n flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     struct rte_flow *flow,\n@@ -7441,18 +7447,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n \tmlx5_ipool_free(ipool, tunnel->tunnel_id);\n }\n \n-static struct mlx5_flow_tunnel *\n-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+static bool\n+mlx5_access_tunnel_offload_db\n+\t(struct rte_eth_dev *dev,\n+\t bool (*match)(struct rte_eth_dev *,\n+\t\t       struct mlx5_flow_tunnel *, const void *),\n+\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n+\t void (*miss)(struct rte_eth_dev *, void *),\n+\t void *ctx, bool lock_op)\n {\n+\tbool verdict = false;\n \tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\tstruct mlx5_flow_tunnel *tunnel;\n \n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (tun->tunnel_id == id)\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_FOREACH(tunnel, &thub->tunnels, chain) {\n+\t\tverdict = match(dev, tunnel, (const void *)ctx);\n+\t\tif (verdict)\n \t\t\tbreak;\n \t}\n+\tif (!lock_op)\n+\t\trte_spinlock_unlock(&thub->sl);\n+\tif (verdict && hit)\n+\t\thit(dev, tunnel, ctx);\n+\tif (!verdict && miss)\n+\t\tmiss(dev, ctx);\n+\tif (lock_op)\n+\t\trte_spinlock_unlock(&thub->sl);\n \n-\treturn tun;\n+\treturn verdict;\n+}\n+\n+struct tunnel_db_find_tunnel_id_ctx {\n+\tuint32_t tunnel_id;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+};\n+\n+static bool\n+find_tunnel_id_match(struct rte_eth_dev *dev,\n+\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_find_tunnel_id_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\treturn tunnel->tunnel_id == ctx->tunnel_id;\n+}\n+\n+static void\n+find_tunnel_id_hit(struct rte_eth_dev *dev,\n+\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\tstruct tunnel_db_find_tunnel_id_ctx *ctx = x;\n+\tRTE_SET_USED(dev);\n+\tctx->tunnel = tunnel;\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+{\n+\tstruct tunnel_db_find_tunnel_id_ctx ctx = {\n+\t\t.tunnel_id = id,\n+\t};\n+\n+\tmlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,\n+\t\t\t\t      find_tunnel_id_hit, NULL, &ctx, true);\n+\n+\treturn ctx.tunnel;\n }\n \n static struct mlx5_flow_tunnel *\n@@ -7500,38 +7560,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n \treturn tunnel;\n }\n \n+struct tunnel_db_get_tunnel_ctx {\n+\tconst struct rte_flow_tunnel *app_tunnel;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+};\n+\n+static bool get_tunnel_match(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\treturn !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,\n+\t\t       sizeof(*ctx->app_tunnel));\n+}\n+\n+static void get_tunnel_hit(struct rte_eth_dev *dev,\n+\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\t/* called under tunnel spinlock protection */\n+\tstruct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\ttunnel->refctn++;\n+\tctx->tunnel = tunnel;\n+}\n+\n+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)\n+{\n+\t/* called under tunnel spinlock protection */\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct tunnel_db_get_tunnel_ctx *ctx = x;\n+\n+\trte_spinlock_unlock(&thub->sl);\n+\tctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);\n+\tctx->tunnel->refctn = 1;\n+\trte_spinlock_lock(&thub->sl);\n+\tif (ctx->tunnel)\n+\t\tLIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);\n+}\n+\n+\n static int\n mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_tunnel *app_tunnel,\n \t\t     struct mlx5_flow_tunnel **tunnel)\n {\n-\tint ret;\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n-\n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (!memcmp(app_tunnel, &tun->app_tunnel,\n-\t\t\t    sizeof(*app_tunnel))) {\n-\t\t\t*tunnel = tun;\n-\t\t\tret = 0;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tif (!tun) {\n-\t\ttun = mlx5_flow_tunnel_allocate(dev, app_tunnel);\n-\t\tif (tun) {\n-\t\t\tLIST_INSERT_HEAD(&thub->tunnels, tun, chain);\n-\t\t\t*tunnel = tun;\n-\t\t} else {\n-\t\t\tret = -ENOMEM;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (tun)\n-\t\t__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);\n+\tstruct tunnel_db_get_tunnel_ctx ctx = {\n+\t\t.app_tunnel = app_tunnel,\n+\t};\n \n-\treturn ret;\n+\tmlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,\n+\t\t\t\t      get_tunnel_miss, &ctx, true);\n+\t*tunnel = ctx.tunnel;\n+\treturn ctx.tunnel ? 0 : -ENOMEM;\n }\n \n void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)\n@@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n \t*num_of_items = 1;\n \treturn 0;\n }\n+\n+struct tunnel_db_element_release_ctx {\n+\tstruct rte_flow_item *items;\n+\tstruct rte_flow_action *actions;\n+\tuint32_t num_elements;\n+\tstruct rte_flow_error *error;\n+\tint ret;\n+};\n+\n+static bool\n+tunnel_element_release_match(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_element_release_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\tif (ctx->num_elements != 1)\n+\t\treturn false;\n+\telse if (ctx->items)\n+\t\treturn ctx->items == &tunnel->item;\n+\telse if (ctx->actions)\n+\t\treturn ctx->actions == &tunnel->action;\n+\n+\treturn false;\n+}\n+\n+static void\n+tunnel_element_release_hit(struct rte_eth_dev *dev,\n+\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\tctx->ret = 0;\n+\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n+\t\tmlx5_flow_tunnel_free(dev, tunnel);\n+}\n+\n+static void\n+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\tRTE_SET_USED(dev);\n+\tctx->ret = rte_flow_error_set(ctx->error, EINVAL,\n+\t\t\t\t      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t      \"invalid argument\");\n+}\n+\n static int\n mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,\n-\t\t\t      struct rte_flow_item *pmd_items,\n-\t\t\t      uint32_t num_items, struct rte_flow_error *err)\n-{\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\t\t       struct rte_flow_item *pmd_items,\n+\t\t       uint32_t num_items, struct rte_flow_error *err)\n+{\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = pmd_items,\n+\t\t.actions = NULL,\n+\t\t.num_elements = num_items,\n+\t\t.error = err,\n+\t};\n \n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->item == pmd_items) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_items != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n-\treturn 0;\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n+\n+\treturn ctx.ret;\n }\n \n static int\n mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,\n-\t\t\t\tstruct rte_flow_action *pmd_actions,\n-\t\t\t\tuint32_t num_actions,\n-\t\t\t\tstruct rte_flow_error *err)\n-{\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n+\t\t\t struct rte_flow_action *pmd_actions,\n+\t\t\t uint32_t num_actions, struct rte_flow_error *err)\n+{\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = NULL,\n+\t\t.actions = pmd_actions,\n+\t\t.num_elements = num_actions,\n+\t\t.error = err,\n+\t};\n \n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->action == pmd_actions) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_actions != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n \n-\treturn 0;\n+\treturn ctx.ret;\n }\n \n static int\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex c33c0fee7c..f64384217f 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {\n \n /** PMD tunnel related context */\n struct mlx5_flow_tunnel_hub {\n+\t/* Tunnels list\n+\t * Access to the list MUST be MT protected\n+\t */\n \tLIST_HEAD(, mlx5_flow_tunnel) tunnels;\n-\trte_spinlock_t sl;\t\t\t/* Tunnel list spinlock. */\n+\t /* protect access to the tunnels list */\n+\trte_spinlock_t sl;\n \tstruct mlx5_hlist *groups;\t\t/** non tunnel groups */\n };\n \n",
    "prefixes": [
        "v4",
        "5/6"
    ]
}