get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/82597/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 82597,
    "url": "http://patches.dpdk.org/api/patches/82597/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1603877633-293405-25-git-send-email-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1603877633-293405-25-git-send-email-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1603877633-293405-25-git-send-email-suanmingm@nvidia.com",
    "date": "2020-10-28T09:33:43",
    "name": "[v5,24/34] net/mlx5: make matcher list thread safe",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "4b14ac9b32d5eb8410d09b8f2747b67a952b7b37",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1603877633-293405-25-git-send-email-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 13413,
            "url": "http://patches.dpdk.org/api/series/13413/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13413",
            "date": "2020-10-28T09:33:20",
            "name": "[v5,01/34] net/mlx5: use thread safe index pool for flow objects",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/13413/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/82597/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/82597/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 29CDDA04DD;\n\tWed, 28 Oct 2020 10:41:46 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id CD50CC8B0;\n\tWed, 28 Oct 2020 10:37:23 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id 04D32C90E\n for <dev@dpdk.org>; Wed, 28 Oct 2020 10:34:53 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n suanmingm@nvidia.com) with SMTP; 28 Oct 2020 11:34:51 +0200",
            "from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09S9Y0S6014036;\n Wed, 28 Oct 2020 11:34:49 +0200"
        ],
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Shahaf Shuler <shahafs@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Cc": "dev@dpdk.org, rasland@nvidia.com, Xueming Li <xuemingl@nvidia.com>",
        "Date": "Wed, 28 Oct 2020 17:33:43 +0800",
        "Message-Id": "<1603877633-293405-25-git-send-email-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1603877633-293405-1-git-send-email-suanmingm@nvidia.com>",
        "References": "<1601984948-313027-1-git-send-email-suanmingm@nvidia.com>\n <1603877633-293405-1-git-send-email-suanmingm@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v5 24/34] net/mlx5: make matcher list thread safe",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Xueming Li <xuemingl@nvidia.com>\n\nTo support multi-thread flow insertion, this path converts matcher list\nto use thread safe cache list API.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h         |   3 +\n drivers/net/mlx5/mlx5_flow.h    |  15 ++-\n drivers/net/mlx5/mlx5_flow_dv.c | 215 +++++++++++++++++++++-------------------\n 3 files changed, 129 insertions(+), 104 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 7804d3a..eadb797 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -32,6 +32,9 @@\n #include \"mlx5_os.h\"\n #include \"mlx5_autoconf.h\"\n \n+\n+#define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)\n+\n enum mlx5_ipool_index {\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \tMLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 84988c6..6c0815a 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -394,11 +394,9 @@ struct mlx5_flow_dv_match_params {\n \n /* Matcher structure. */\n struct mlx5_flow_dv_matcher {\n-\tLIST_ENTRY(mlx5_flow_dv_matcher) next;\n-\t/**< Pointer to the next element. */\n+\tstruct mlx5_cache_entry entry; /**< Pointer to the next element. */\n \tstruct mlx5_flow_tbl_resource *tbl;\n \t/**< Pointer to the table(group) the matcher associated with. */\n-\tuint32_t refcnt; /**< Reference counter. */\n \tvoid *matcher_object; /**< Pointer to DV matcher */\n \tuint16_t crc; /**< CRC of key. */\n \tuint16_t priority; /**< Priority of matcher. */\n@@ -532,7 +530,7 @@ struct mlx5_flow_tbl_data_entry {\n \t/**< hash list entry, 64-bits key inside. */\n \tstruct mlx5_flow_tbl_resource tbl;\n \t/**< flow table resource. */\n-\tLIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;\n+\tstruct mlx5_cache_list matchers;\n \t/**< matchers' header associated with the flow table. */\n \tstruct mlx5_flow_dv_jump_tbl_resource jump;\n \t/**< jump resource, at most one for each table created. */\n@@ -542,6 +540,7 @@ struct mlx5_flow_tbl_data_entry {\n \tuint32_t group_id;\n \tbool external;\n \tbool tunnel_offload; /* Tunnel offlod table or not. */\n+\tbool is_egress; /**< Egress table. */\n };\n \n /* Sub rdma-core actions list. */\n@@ -1430,4 +1429,12 @@ struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,\n \t\t\t\tuint64_t key, void *cb_ctx);\n void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,\n \t\t\t\t   struct mlx5_hlist_entry *entry);\n+\n+int flow_dv_matcher_match_cb(struct mlx5_cache_list *list,\n+\t\t\t     struct mlx5_cache_entry *entry, void *ctx);\n+struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list,\n+\t\tstruct mlx5_cache_entry *entry, void *ctx);\n+void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list,\n+\t\t\t       struct mlx5_cache_entry *entry);\n+\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 78984f9..1f82b96 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -71,7 +71,7 @@\n };\n \n static int\n-flow_dv_tbl_resource_release(struct rte_eth_dev *dev,\n+flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,\n \t\t\t     struct mlx5_flow_tbl_resource *tbl);\n \n static int\n@@ -7944,6 +7944,7 @@ struct mlx5_hlist_entry *\n \ttbl_data->group_id = tt_prm->group_id;\n \ttbl_data->external = tt_prm->external;\n \ttbl_data->tunnel_offload = is_tunnel_offload_active(dev);\n+\ttbl_data->is_egress = !!key.direction;\n \ttbl = &tbl_data->tbl;\n \tif (key.dummy)\n \t\treturn &tbl_data->entry;\n@@ -7974,6 +7975,13 @@ struct mlx5_hlist_entry *\n \t\t\treturn NULL;\n \t\t}\n \t}\n+\tMKSTR(matcher_name, \"%s_%s_%u_matcher_cache\",\n+\t      key.domain ? \"FDB\" : \"NIC\", key.direction ? \"egress\" : \"ingress\",\n+\t      key.table_id);\n+\tmlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,\n+\t\t\t     flow_dv_matcher_create_cb,\n+\t\t\t     flow_dv_matcher_match_cb,\n+\t\t\t     flow_dv_matcher_remove_cb);\n \treturn &tbl_data->entry;\n }\n \n@@ -8085,14 +8093,15 @@ struct mlx5_flow_tbl_resource *\n \t\t\ttbl_data->tunnel->tunnel_id : 0,\n \t\t\ttbl_data->group_id);\n \t}\n+\tmlx5_cache_list_destroy(&tbl_data->matchers);\n \tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);\n }\n \n /**\n  * Release a flow table.\n  *\n- * @param[in] dev\n- *   Pointer to rte_eth_dev structure.\n+ * @param[in] sh\n+ *   Pointer to device shared structure.\n  * @param[in] tbl\n  *   Table resource to be released.\n  *\n@@ -8100,11 +8109,9 @@ struct mlx5_flow_tbl_resource *\n  *   Returns 0 if table was released, else return 1;\n  */\n static int\n-flow_dv_tbl_resource_release(struct rte_eth_dev *dev,\n+flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,\n \t\t\t     struct mlx5_flow_tbl_resource *tbl)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n \tstruct mlx5_flow_tbl_data_entry *tbl_data =\n \t\tcontainer_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);\n \n@@ -8113,6 +8120,63 @@ struct mlx5_flow_tbl_resource *\n \treturn mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);\n }\n \n+int\n+flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,\n+\t\t\t struct mlx5_cache_entry *entry, void *cb_ctx)\n+{\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct mlx5_flow_dv_matcher *ref = ctx->data;\n+\tstruct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),\n+\t\t\t\t\t\t\tentry);\n+\n+\treturn cur->crc != ref->crc ||\n+\t       cur->priority != ref->priority ||\n+\t       memcmp((const void *)cur->mask.buf,\n+\t\t      (const void *)ref->mask.buf, ref->mask.size);\n+}\n+\n+struct mlx5_cache_entry *\n+flow_dv_matcher_create_cb(struct mlx5_cache_list *list,\n+\t\t\t  struct mlx5_cache_entry *entry __rte_unused,\n+\t\t\t  void *cb_ctx)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n+\tstruct mlx5_flow_dv_matcher *ref = ctx->data;\n+\tstruct mlx5_flow_dv_matcher *cache;\n+\tstruct mlx5dv_flow_matcher_attr dv_attr = {\n+\t\t.type = IBV_FLOW_ATTR_NORMAL,\n+\t\t.match_mask = (void *)&ref->mask,\n+\t};\n+\tstruct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,\n+\t\t\t\t\t\t\t    typeof(*tbl), tbl);\n+\tint ret;\n+\n+\tcache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);\n+\tif (!cache) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot create matcher\");\n+\t\treturn NULL;\n+\t}\n+\t*cache = *ref;\n+\tdv_attr.match_criteria_enable =\n+\t\tflow_dv_matcher_enable(cache->mask.buf);\n+\tdv_attr.priority = ref->priority;\n+\tif (tbl->is_egress)\n+\t\tdv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;\n+\tret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,\n+\t\t\t\t\t       &cache->matcher_object);\n+\tif (ret) {\n+\t\tmlx5_free(cache);\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot create matcher\");\n+\t\treturn NULL;\n+\t}\n+\treturn &cache->entry;\n+}\n+\n /**\n  * Register the flow matcher.\n  *\n@@ -8132,88 +8196,35 @@ struct mlx5_flow_tbl_resource *\n  */\n static int\n flow_dv_matcher_register(struct rte_eth_dev *dev,\n-\t\t\t struct mlx5_flow_dv_matcher *matcher,\n+\t\t\t struct mlx5_flow_dv_matcher *ref,\n \t\t\t union mlx5_flow_tbl_key *key,\n \t\t\t struct mlx5_flow *dev_flow,\n \t\t\t struct rte_flow_error *error)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct mlx5_flow_dv_matcher *cache_matcher;\n-\tstruct mlx5dv_flow_matcher_attr dv_attr = {\n-\t\t.type = IBV_FLOW_ATTR_NORMAL,\n-\t\t.match_mask = (void *)&matcher->mask,\n-\t};\n+\tstruct mlx5_cache_entry *entry;\n+\tstruct mlx5_flow_dv_matcher *cache;\n \tstruct mlx5_flow_tbl_resource *tbl;\n \tstruct mlx5_flow_tbl_data_entry *tbl_data;\n-\tint ret;\n+\tstruct mlx5_flow_cb_ctx ctx = {\n+\t\t.error = error,\n+\t\t.data = ref,\n+\t};\n \n \ttbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,\n \t\t\t\t       key->domain, false, NULL, 0, 0, error);\n \tif (!tbl)\n \t\treturn -rte_errno;\t/* No need to refill the error info */\n \ttbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);\n-\t/* Lookup from cache. */\n-\tLIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {\n-\t\tif (matcher->crc == cache_matcher->crc &&\n-\t\t    matcher->priority == cache_matcher->priority &&\n-\t\t    !memcmp((const void *)matcher->mask.buf,\n-\t\t\t    (const void *)cache_matcher->mask.buf,\n-\t\t\t    cache_matcher->mask.size)) {\n-\t\t\tDRV_LOG(DEBUG,\n-\t\t\t\t\"%s group %u priority %hd use %s \"\n-\t\t\t\t\"matcher %p: refcnt %d++\",\n-\t\t\t\tkey->domain ? \"FDB\" : \"NIC\", key->table_id,\n-\t\t\t\tcache_matcher->priority,\n-\t\t\t\tkey->direction ? \"tx\" : \"rx\",\n-\t\t\t\t(void *)cache_matcher,\n-\t\t\t\t__atomic_load_n(&cache_matcher->refcnt,\n-\t\t\t\t\t\t__ATOMIC_RELAXED));\n-\t\t\t__atomic_fetch_add(&cache_matcher->refcnt, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n-\t\t\tdev_flow->handle->dvh.matcher = cache_matcher;\n-\t\t\t/* old matcher should not make the table ref++. */\n-\t\t\tflow_dv_tbl_resource_release(dev, tbl);\n-\t\t\treturn 0;\n-\t\t}\n-\t}\n-\t/* Register new matcher. */\n-\tcache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,\n-\t\t\t\t    SOCKET_ID_ANY);\n-\tif (!cache_matcher) {\n-\t\tflow_dv_tbl_resource_release(dev, tbl);\n+\tref->tbl = tbl;\n+\tentry = mlx5_cache_register(&tbl_data->matchers, &ctx);\n+\tif (!entry) {\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), tbl);\n \t\treturn rte_flow_error_set(error, ENOMEM,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t\t  \"cannot allocate matcher memory\");\n+\t\t\t\t\t  \"cannot allocate ref memory\");\n \t}\n-\t*cache_matcher = *matcher;\n-\tdv_attr.match_criteria_enable =\n-\t\tflow_dv_matcher_enable(cache_matcher->mask.buf);\n-\tdv_attr.priority = matcher->priority;\n-\tif (key->direction)\n-\t\tdv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;\n-\tret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,\n-\t\t\t\t\t       &cache_matcher->matcher_object);\n-\tif (ret) {\n-\t\tmlx5_free(cache_matcher);\n-#ifdef HAVE_MLX5DV_DR\n-\t\tflow_dv_tbl_resource_release(dev, tbl);\n-#endif\n-\t\treturn rte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL, \"cannot create matcher\");\n-\t}\n-\t/* Save the table information */\n-\tcache_matcher->tbl = tbl;\n-\t/* only matcher ref++, table ref++ already done above in get API. */\n-\t__atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED);\n-\tLIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);\n-\tdev_flow->handle->dvh.matcher = cache_matcher;\n-\tDRV_LOG(DEBUG, \"%s group %u priority %hd new %s matcher %p: refcnt %d\",\n-\t\tkey->domain ? \"FDB\" : \"NIC\", key->table_id,\n-\t\tcache_matcher->priority,\n-\t\tkey->direction ? \"tx\" : \"rx\", (void *)cache_matcher,\n-\t\t__atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED));\n+\tcache = container_of(entry, typeof(*cache), entry);\n+\tdev_flow->handle->dvh.matcher = cache;\n \treturn 0;\n }\n \n@@ -8702,7 +8713,7 @@ struct mlx5_hlist_entry *\n \t\t}\n \t}\n \tif (cache_resource->normal_path_tbl)\n-\t\tflow_dv_tbl_resource_release(dev,\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev),\n \t\t\t\tcache_resource->normal_path_tbl);\n \tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],\n \t\t\t\tdev_flow->handle->dvh.rix_sample);\n@@ -9605,7 +9616,7 @@ struct mlx5_hlist_entry *\n \t\t\t\t\t\t \"cannot create jump action.\");\n \t\t\tif (flow_dv_jump_tbl_resource_register\n \t\t\t    (dev, tbl, dev_flow, error)) {\n-\t\t\t\tflow_dv_tbl_resource_release(dev, tbl);\n+\t\t\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), tbl);\n \t\t\t\treturn rte_flow_error_set\n \t\t\t\t\t\t(error, errno,\n \t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -10367,6 +10378,17 @@ struct mlx5_hlist_entry *\n \treturn -rte_errno;\n }\n \n+void\n+flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,\n+\t\t\t  struct mlx5_cache_entry *entry)\n+{\n+\tstruct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),\n+\t\t\t\t\t\t\t  entry);\n+\n+\tclaim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));\n+\tmlx5_free(cache);\n+}\n+\n /**\n  * Release the flow matcher.\n  *\n@@ -10383,23 +10405,14 @@ struct mlx5_hlist_entry *\n \t\t\tstruct mlx5_flow_handle *handle)\n {\n \tstruct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;\n+\tstruct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,\n+\t\t\t\t\t\t\t    typeof(*tbl), tbl);\n+\tint ret;\n \n \tMLX5_ASSERT(matcher->matcher_object);\n-\tDRV_LOG(DEBUG, \"port %u matcher %p: refcnt %d--\",\n-\t\tdev->data->port_id, (void *)matcher,\n-\t\t__atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED));\n-\tif (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) {\n-\t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n-\t\t\t   (matcher->matcher_object));\n-\t\tLIST_REMOVE(matcher, next);\n-\t\t/* table ref-- in release interface. */\n-\t\tflow_dv_tbl_resource_release(dev, matcher->tbl);\n-\t\tmlx5_free(matcher);\n-\t\tDRV_LOG(DEBUG, \"port %u matcher %p: removed\",\n-\t\t\tdev->data->port_id, (void *)matcher);\n-\t\treturn 0;\n-\t}\n-\treturn 1;\n+\tret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);\n+\tflow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);\n+\treturn ret;\n }\n \n /**\n@@ -10471,7 +10484,7 @@ struct mlx5_hlist_entry *\n \t\t\t     handle->rix_jump);\n \tif (!tbl_data)\n \t\treturn 0;\n-\treturn flow_dv_tbl_resource_release(dev, &tbl_data->tbl);\n+\treturn flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);\n }\n \n void\n@@ -10661,7 +10674,7 @@ struct mlx5_hlist_entry *\n \t\t\t\t  (cache_resource->default_miss));\n \t\t}\n \t\tif (cache_resource->normal_path_tbl)\n-\t\t\tflow_dv_tbl_resource_release(dev,\n+\t\t\tflow_dv_tbl_resource_release(MLX5_SH(dev),\n \t\t\t\tcache_resource->normal_path_tbl);\n \t}\n \tif (cache_resource->sample_idx.rix_hrxq &&\n@@ -11454,9 +11467,9 @@ struct mlx5_hlist_entry *\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n \t\t\t   (mtd->egress.any_matcher));\n \tif (mtd->egress.tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->egress.tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);\n \tif (mtd->egress.sfx_tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);\n \tif (mtd->ingress.color_matcher)\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n \t\t\t   (mtd->ingress.color_matcher));\n@@ -11464,9 +11477,10 @@ struct mlx5_hlist_entry *\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n \t\t\t   (mtd->ingress.any_matcher));\n \tif (mtd->ingress.tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->ingress.tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);\n \tif (mtd->ingress.sfx_tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev),\n+\t\t\t\t\t     mtd->ingress.sfx_tbl);\n \tif (mtd->transfer.color_matcher)\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n \t\t\t   (mtd->transfer.color_matcher));\n@@ -11474,9 +11488,10 @@ struct mlx5_hlist_entry *\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher\n \t\t\t   (mtd->transfer.any_matcher));\n \tif (mtd->transfer.tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->transfer.tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);\n \tif (mtd->transfer.sfx_tbl)\n-\t\tflow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev),\n+\t\t\t\t\t     mtd->transfer.sfx_tbl);\n \tif (mtd->drop_actn)\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));\n \tmlx5_free(mtd);\n@@ -11920,9 +11935,9 @@ struct mlx5_hlist_entry *\n \tif (matcher)\n \t\tclaim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));\n \tif (tbl)\n-\t\tflow_dv_tbl_resource_release(dev, tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), tbl);\n \tif (dest_tbl)\n-\t\tflow_dv_tbl_resource_release(dev, dest_tbl);\n+\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);\n \tif (dcs)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(dcs));\n \treturn ret;\n",
    "prefixes": [
        "v5",
        "24/34"
    ]
}