get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83982/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83982,
    "url": "http://patches.dpdk.org/api/patches/83982/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-2-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111071417.21177-2-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201111071417.21177-2-getelson@nvidia.com",
    "date": "2020-11-11T07:14:14",
    "name": "[1/4] net/mlx5: fix offloaded tunnel allocation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3f58a5506a7eeb60d88c616cf6a5ac9c983b0832",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201111071417.21177-2-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13799,
            "url": "http://patches.dpdk.org/api/series/13799/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13799",
            "date": "2020-11-11T07:14:13",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/13799/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83982/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/83982/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BF93DA09D2;\n\tWed, 11 Nov 2020 08:14:56 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 3B0B14CA6;\n\tWed, 11 Nov 2020 08:14:41 +0100 (CET)",
            "from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com\n [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id 5C7EF4C90\n for <dev@dpdk.org>; Wed, 11 Nov 2020 08:14:39 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fab8f610000>; Tue, 10 Nov 2020 23:14:41 -0800",
            "from nvidia.com (172.20.13.39) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Wed, 11 Nov\n 2020 07:14:34 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>, \"Shahaf\n Shuler\" <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Xueming Li <xuemingl@nvidia.com>",
        "Date": "Wed, 11 Nov 2020 09:14:14 +0200",
        "Message-ID": "<20201111071417.21177-2-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201111071417.21177-1-getelson@nvidia.com>",
        "References": "<20201111071417.21177-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.13.39]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605078881; bh=lEspkHkzolrVtxTzn9WavyPBoVHP5vPm745LiD0RuBU=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=JS+7PeZ9mkaYplFLzun/6xDQVIyQkpNEAanU+rfnQhI6exWIc+I4SbuRYqM2hR+Ly\n zusuI3visKquhDWe/iTIELLpWl9h93KYymbJihSA2k/Zi2LGcQ/OW2vgN1nVlP5QqX\n YaluMDHeN5/qdPj72gwKo6aloJfYSPJDbQmG7fTzm6TJTZvxC4m6i1aKN7Vc9Fa8/T\n ecsh7kY+u4bUGdXc6pVCieKMdTUGTJEZHlZRlbutGNfXY55v1iOAtKaEzLBOt8qTMR\n oPFxefHfIS1CuophtiyeeqKwOZcA+NK3z3Z/37pTTBw2NbMCb7iHnpwkbq5f8MxjrS\n 5eygvA0ZPNzaw==",
        "Subject": "[dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The original patch allocated tunnel offload objects with invalid\nindexes. As the result, PMD tunnel object allocation failed.\n\nIn this patch indexed pool provides both an index and memory for a new\ntunnel offload object.\nAlso tunnel offload ipool moved to dv enabled code only.\n\nFixes: f2e8093 (\"net/mlx5: use indexed pool as id generator\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------\n drivers/net/mlx5/mlx5.h      |  4 +--\n drivers/net/mlx5/mlx5_flow.c | 41 ++++++++++-------------------\n 3 files changed, 40 insertions(+), 55 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 43344391df..e1faa819a3 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;\n \n static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\t{\n+\t[MLX5_IPOOL_DECAP_ENCAP] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_encap_decap_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_PUSH_VLAN] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_push_vlan_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_TAG] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_tag_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_tag_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_PORT_ID] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_port_id_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_JUMP] = {\n \t\t.size = sizeof(struct mlx5_flow_tbl_data_entry),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_jump_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_SAMPLE] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_sample_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_sample_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_DEST_ARRAY] = {\n \t\t.size = sizeof(struct mlx5_flow_dv_dest_array_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_dest_array_ipool\",\n \t},\n+\t[MLX5_IPOOL_TUNNEL_OFFLOAD] = {\n+\t\t.size = sizeof(struct mlx5_flow_tunnel),\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = 1,\n+\t\t.type = \"mlx5_tunnel_offload\",\n+\t},\n+\t[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID] = {\n+\t\t.size = 0,\n+\t\t.need_lock = 1,\n+\t\t.type = \"mlx5_flow_tnl_tbl_ipool\",\n+\t},\n #endif\n-\t{\n+\t[MLX5_IPOOL_MTR] = {\n \t\t.size = sizeof(struct mlx5_flow_meter),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_meter_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_MCP] = {\n \t\t.size = sizeof(struct mlx5_flow_mreg_copy_resource),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_mcp_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_HRXQ] = {\n \t\t.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_hrxq_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_MLX5_FLOW] = {\n \t\t/*\n \t\t * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.\n \t\t * It set in run time according to PCI function configuration.\n@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_flow_handle_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_RTE_FLOW] = {\n \t\t.size = sizeof(struct rte_flow),\n \t\t.trunk_size = 4096,\n \t\t.need_lock = 1,\n@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"rte_flow_ipool\",\n \t},\n-\t{\n+\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {\n \t\t.size = 0,\n \t\t.need_lock = 1,\n \t\t.type = \"mlx5_flow_rss_id_ipool\",\n \t},\n-\t{\n-\t\t.size = 0,\n-\t\t.need_lock = 1,\n-\t\t.type = \"mlx5_flow_tnl_flow_ipool\",\n-\t},\n-\t{\n-\t\t.size = 0,\n-\t\t.need_lock = 1,\n-\t\t.type = \"mlx5_flow_tnl_tbl_ipool\",\n-\t},\n-\t{\n+\t[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {\n \t\t.size = sizeof(struct mlx5_shared_action_rss),\n \t\t.trunk_size = 64,\n \t\t.grow_trunk = 3,\n@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_shared_action_rss\",\n \t},\n-\n };\n \n \ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 7ee63a7a14..af097d6a7e 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -44,6 +44,8 @@ enum mlx5_ipool_index {\n \tMLX5_IPOOL_JUMP, /* Pool for jump resource. */\n \tMLX5_IPOOL_SAMPLE, /* Pool for sample resource. */\n \tMLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */\n+\tMLX5_IPOOL_TUNNEL_OFFLOAD, /* Pool for tunnel offload context */\n+\tMLX5_IPOOL_TUNNEL_FLOW_TBL_ID, /* Pool for tunnel table ID. */\n #endif\n \tMLX5_IPOOL_MTR, /* Pool for meter resource. */\n \tMLX5_IPOOL_MCP, /* Pool for metadata resource. */\n@@ -51,8 +53,6 @@ enum mlx5_ipool_index {\n \tMLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */\n \tMLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */\n \tMLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */\n-\tMLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */\n-\tMLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */\n \tMLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */\n \tMLX5_IPOOL_MAX,\n };\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 92adfcacca..31c9d82b4a 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -6934,7 +6934,7 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,\n \tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n \tstruct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);\n \n-\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],\n+\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n \t\t\ttunnel_flow_tbl_to_id(tte->flow_table));\n \tmlx5_free(tte);\n }\n@@ -6952,12 +6952,12 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,\n \t\t\t  SOCKET_ID_ANY);\n \tif (!tte)\n \t\tgoto err;\n-\tmlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],\n+\tmlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n \t\t\t  &tte->flow_table);\n \tif (tte->flow_table >= MLX5_MAX_TABLES) {\n \t\tDRV_LOG(ERR, \"Tunnel TBL ID %d exceed max limit.\",\n \t\t\ttte->flow_table);\n-\t\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],\n+\t\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n \t\t\t\ttte->flow_table);\n \t\tgoto err;\n \t} else if (!tte->flow_table) {\n@@ -7465,14 +7465,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n \t\t      struct mlx5_flow_tunnel *tunnel)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indexed_pool *ipool;\n \n \tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n \t\tdev->data->port_id, tunnel->tunnel_id);\n-\tRTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));\n-\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],\n-\t\t\ttunnel->tunnel_id);\n \tmlx5_hlist_destroy(tunnel->groups);\n-\tmlx5_free(tunnel);\n+\tipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];\n+\tmlx5_ipool_free(ipool, tunnel->tunnel_id);\n }\n \n static struct mlx5_flow_tunnel *\n@@ -7494,39 +7493,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n \t\t\t  const struct rte_flow_tunnel *app_tunnel)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indexed_pool *ipool;\n \tstruct mlx5_flow_tunnel *tunnel;\n \tuint32_t id;\n \n-\tmlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n-\t\t\t  &id);\n+\tipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];\n+\ttunnel = mlx5_ipool_zmalloc(ipool, &id);\n+\tif (!tunnel)\n+\t\treturn NULL;\n \tif (id >= MLX5_MAX_TUNNELS) {\n-\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n+\t\tmlx5_ipool_free(ipool, id);\n \t\tDRV_LOG(ERR, \"Tunnel ID %d exceed max limit.\", id);\n \t\treturn NULL;\n-\t} else if (!id) {\n-\t\treturn NULL;\n-\t}\n-\t/**\n-\t * mlx5 flow tunnel is an auxlilary data structure\n-\t * It's not part of IO. No need to allocate it from\n-\t * huge pages pools dedicated for IO\n-\t */\n-\ttunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),\n-\t\t\t     0, SOCKET_ID_ANY);\n-\tif (!tunnel) {\n-\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n-\t\treturn NULL;\n \t}\n \ttunnel->groups = mlx5_hlist_create(\"tunnel groups\", 1024, 0, 0,\n \t\t\t\t\t   mlx5_flow_tunnel_grp2tbl_create_cb,\n \t\t\t\t\t   NULL,\n \t\t\t\t\t   mlx5_flow_tunnel_grp2tbl_remove_cb);\n \tif (!tunnel->groups) {\n-\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n-\t\tmlx5_free(tunnel);\n+\t\tmlx5_ipool_free(ipool, id);\n \t\treturn NULL;\n \t}\n \ttunnel->groups->ctx = priv->sh;\n",
    "prefixes": [
        "1/4"
    ]
}