get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83984/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83984,
    "url": "http://patches.dpdk.org/api/patches/83984/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-4-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111071417.21177-4-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201111071417.21177-4-getelson@nvidia.com",
    "date": "2020-11-11T07:14:16",
    "name": "[3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "48473659fd6c710c2c19991b0cdaa0c53b2866be",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-4-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13799,
            "url": "http://patches.dpdk.org/api/series/13799/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13799",
            "date": "2020-11-11T07:14:13",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/13799/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83984/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/83984/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EDBAFA09D2;\n\tWed, 11 Nov 2020 08:15:34 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 76D245A0F;\n\tWed, 11 Nov 2020 08:14:46 +0100 (CET)",
            "from hqnvemgate25.nvidia.com (hqnvemgate25.nvidia.com\n [216.228.121.64]) by dpdk.org (Postfix) with ESMTP id B27DE5913\n for <dev@dpdk.org>; Wed, 11 Nov 2020 08:14:43 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate25.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fab8f5c0002>; Tue, 10 Nov 2020 23:14:36 -0800",
            "from nvidia.com (172.20.13.39) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Wed, 11 Nov\n 2020 07:14:39 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>, \"Shahaf\n Shuler\" <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Xueming Li <xuemingl@nvidia.com>",
        "Date": "Wed, 11 Nov 2020 09:14:16 +0200",
        "Message-ID": "<20201111071417.21177-4-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201111071417.21177-1-getelson@nvidia.com>",
        "References": "<20201111071417.21177-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.13.39]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605078876; bh=wHVzReFfp3YAVwmcbMfstuO4bo7Y0gnsZFRg0GDwHBU=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=sPYhadi7XE6wTx0avnUZR/7MXZBNLx+/kBFIDKXuQBkOddDPDmSJUAYazDHMOrCQ/\n R+Ni8eY4IsyLXp+6UVDpRAocluLyeadqV6F9N11RqR/zzficZWg1IqwBD8y4gRSg0S\n 9guJsgYZIhHOJPax00CCTsWqC1t/23d49yHDBjAmxMipQSZRddvekD8TfqPiA5PYtQ\n 5afnf1Q9D7pnsA6dYaWDgQIVc3y2bRVkkPRy82d5oG28ncY+se0m6UXvVCEzoqlEEs\n gdUQ5wIkqLMswpgE0YbGftjKsiadEc2fZUnH/AXq57BW+Zd91fj9UnBtS6uLuNa4up\n CxpZEBshySyXQ==",
        "Subject": "[dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload\n\tmatch rule destruction",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The new flow table resource management API triggered a PMD crash in\ntunnel offload mode, when tunnel match flow rule was inserted before\ntunnel set rule.\n\nReason for the crash was double flow table registration. The table was\nregistered by the tunnel offload code for the first time and once\nmore by PMD code, as part of general table processing. The table\ncounter was decremented only once during the rule destruction and\ncaused a resource leak that triggered the crash.\n\nThe patch updates PMD registration with tunnel offload parameters and\nremoves table registration in tunnel related code.\n\nFixes: 663ad57dabb2 (\"net/mlx5: make flow table cache thread safe\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    | 16 ++++++++++----\n drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++----------------\n 2 files changed, 32 insertions(+), 23 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 2f01e34033..185b4ba51a 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -7024,7 +7024,15 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n \tstruct mlx5_hlist *group_hash;\n \n \tgroup_hash = tunnel ? tunnel->groups : thub->groups;\n-\the = mlx5_hlist_register(group_hash, key.val, NULL);\n+\the = mlx5_hlist_lookup(group_hash, key.val, NULL);\n+\tif (!he) {\n+\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - generate table id\",\n+\t\tdev->data->port_id, key.tunnel_id, group);\n+\t\the = mlx5_hlist_register(group_hash, key.val, NULL);\n+\t} else {\n+\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - skip table id\",\n+\t\tdev->data->port_id, key.tunnel_id, group);\n+\t}\n \tif (!he)\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n@@ -7032,8 +7040,8 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n \t\t\t\t\t  \"tunnel group index not supported\");\n \ttte = container_of(he, typeof(*tte), hash);\n \t*table = tte->flow_table;\n-\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%#x table=%#x\",\n-\t\tdev->data->port_id, key.tunnel_id, group, *table);\n+\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u table=%u\",\n+\tdev->data->port_id, key.tunnel_id, group, *table);\n \treturn 0;\n }\n \n@@ -7114,7 +7122,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev,\n \t\tstandard_translation = true;\n \t}\n \tDRV_LOG(DEBUG,\n-\t\t\"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s\",\n+\t\t\"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s\",\n \t\tdev->data->port_id, group, grp_info.transfer,\n \t\tgrp_info.external, grp_info.fdb_def_rule,\n \t\tstandard_translation ? \"STANDARD\" : \"TUNNEL\");\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 78c710fef9..95165980f4 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,\n \t\t\t\t   \"cannot get table\");\n \t\treturn NULL;\n \t}\n+\tDRV_LOG(DEBUG, \"Table_id %u tunnel %u group %u registered.\",\n+\t\ttable_id, tunnel ? tunnel->tunnel_id : 0, group_id);\n \ttbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);\n \treturn &tbl_data->tbl;\n }\n@@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,\n \t\tif (he)\n \t\t\tmlx5_hlist_unregister(tunnel_grp_hash, he);\n \t\tDRV_LOG(DEBUG,\n-\t\t\t\"Table_id %#x tunnel %u group %u released.\",\n+\t\t\t\"Table_id %u tunnel %u group %u released.\",\n \t\t\ttable_id,\n \t\t\ttbl_data->tunnel ?\n \t\t\ttbl_data->tunnel->tunnel_id : 0,\n@@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,\n \t\t\t struct mlx5_flow_dv_matcher *ref,\n \t\t\t union mlx5_flow_tbl_key *key,\n \t\t\t struct mlx5_flow *dev_flow,\n+\t\t\t const struct mlx5_flow_tunnel *tunnel,\n+\t\t\t uint32_t group_id,\n \t\t\t struct rte_flow_error *error)\n {\n \tstruct mlx5_cache_entry *entry;\n@@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,\n \t\t.data = ref,\n \t};\n \n-\ttbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,\n-\t\t\t\t       key->domain, false, NULL, 0, 0, error);\n+\t/**\n+\t * tunnel offload API requires this registration for cases when\n+\t * tunnel match rule was inserted before tunnel set rule.\n+\t */\n+\ttbl = flow_dv_tbl_resource_get(dev, key->table_id,\n+\t\t\t\t       key->direction, key->domain,\n+\t\t\t\t       dev_flow->external, tunnel,\n+\t\t\t\t       group_id, 0, error);\n \tif (!tbl)\n \t\treturn -rte_errno;\t/* No need to refill the error info */\n \ttbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);\n@@ -9605,10 +9615,14 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t/*\n \t\t * do not add decap action if match rule drops packet\n \t\t * HW rejects rules with decap & drop\n+\t\t *\n+\t\t * if tunnel match rule was inserted before matching tunnel set\n+\t\t * rule flow table used in the match rule must be registered.\n+\t\t * current implementation handles that in the\n+\t\t * flow_dv_match_register() at the function end.\n \t\t */\n \t\tbool add_decap = true;\n \t\tconst struct rte_flow_action *ptr = actions;\n-\t\tstruct mlx5_flow_tbl_resource *tbl;\n \n \t\tfor (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {\n \t\t\tif (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {\n@@ -9625,20 +9639,6 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\t\t\tdev_flow->dv.encap_decap->action;\n \t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t}\n-\t\t/*\n-\t\t * bind table_id with <group, table> for tunnel match rule.\n-\t\t * Tunnel set rule establishes that bind in JUMP action handler.\n-\t\t * Required for scenario when application creates tunnel match\n-\t\t * rule before tunnel set rule.\n-\t\t */\n-\t\ttbl = flow_dv_tbl_resource_get(dev, table, attr->egress,\n-\t\t\t\t\t       attr->transfer,\n-\t\t\t\t\t       !!dev_flow->external, tunnel,\n-\t\t\t\t\t       attr->group, 0, error);\n-\t\tif (!tbl)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t       actions, \"cannot register tunnel group\");\n \t}\n \tfor (; !actions_end ; actions++) {\n \t\tconst struct rte_flow_action_queue *queue;\n@@ -10468,7 +10468,8 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \ttbl_key.domain = attr->transfer;\n \ttbl_key.direction = attr->egress;\n \ttbl_key.table_id = dev_flow->dv.group;\n-\tif (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))\n+\tif (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,\n+\t\t\t\t     tunnel, attr->group, error))\n \t\treturn -rte_errno;\n \treturn 0;\n }\n",
    "prefixes": [
        "3/4"
    ]
}