get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42811/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42811,
    "url": "http://patches.dpdk.org/api/patches/42811/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/f8a4f87fd244b405c8879a871a28f101af4a3eaf.1531293415.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<f8a4f87fd244b405c8879a871a28f101af4a3eaf.1531293415.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/f8a4f87fd244b405c8879a871a28f101af4a3eaf.1531293415.git.nelio.laranjeiro@6wind.com",
    "date": "2018-07-11T07:22:51",
    "name": "[v3,18/21] net/mlx5: add flow VXLAN-GPE item",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0639684cea94f122cdf9606117b3341e3536dfcf",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/f8a4f87fd244b405c8879a871a28f101af4a3eaf.1531293415.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 512,
            "url": "http://patches.dpdk.org/api/series/512/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=512",
            "date": "2018-07-11T07:22:33",
            "name": "net/mlx5: flow rework",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/512/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42811/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42811/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 97F4C1B581;\n\tWed, 11 Jul 2018 09:24:05 +0200 (CEST)",
            "from mail-wm0-f66.google.com (mail-wm0-f66.google.com\n\t[74.125.82.66]) by dpdk.org (Postfix) with ESMTP id C0AC61B476\n\tfor <dev@dpdk.org>; Wed, 11 Jul 2018 09:23:33 +0200 (CEST)",
            "by mail-wm0-f66.google.com with SMTP id i139-v6so1339956wmf.4\n\tfor <dev@dpdk.org>; Wed, 11 Jul 2018 00:23:33 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\tt10-v6sm31314212wre.95.2018.07.11.00.23.32\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tWed, 11 Jul 2018 00:23:32 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=c0XnzNbmXJpgPiO4Wc/NZfu9/9xz31c4k+MQH2o/yp8=;\n\tb=zxiKQ3bkLbpTiLiRGiHYAJkITln4ij8jceTejB80nxKGzB6XCXWjHPrXkdAn2U45Ak\n\tRxDDTOyY0jD5nCraW65n7MqAUWb8j3TJdEXJcGhq6AZc/T7vL70KJg0mDlOny3q82BU3\n\tJ+6tw6i/tPAYb+7YEcmBIxrSVMfh13aQ4OIfnPaMKmuetdX3P/wwIvzCNEuETlif+dgt\n\t6fZS//ye6E9KKR9AuJdyrW2z35Gj9qke+DrGGMZlDCDo0KqkZtB59f/RFEQ2DKbQV/g8\n\t9RqsK3oaYu8ROjJ/RE69ZjyIdqFAQgHeIyV7FBe9Pan9YLpF4A/3z7ILJ91AM4GEm4yC\n\taaKg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=c0XnzNbmXJpgPiO4Wc/NZfu9/9xz31c4k+MQH2o/yp8=;\n\tb=Q7M0K+Rd3V/tJoHut1od7Mn6JapXG7iurP7m7CS6dt8IHfJyV+xk+SF5uZA0hDCFqk\n\tDlUFKjC3/oVJZZy0ivuDbAIlLPcWoK9v7CQIt7tnirSueZ8J2p+0XgDj9BYB1TBWprHa\n\tIyCvu1k4RkHnjuOCL9YmLYGElUFfzRAEifGC9Rle6jH4Zpm+621ugbeq3pj7Tm3/3w/C\n\to6tnhkgAZkiHZs+78MZTED4gketYdBBaO/ki0mkyxSI1YHg4BzlodZlylljj9VkTVOd4\n\tYwB/nitkYs/WXLFfN4YoabUNvEZSgu1VImW5ucRuzLuJ9jNS+YqHWaM0JogLNaFMcnLt\n\t2FbA==",
        "X-Gm-Message-State": "AOUpUlEyJVHoyriegRmBlCP++l27p/I7nKPDe8lqwHAunDH7mAlqojG8\n\tUpt4K08g5ETHlT6MsGHy8yLvnhPhSA==",
        "X-Google-Smtp-Source": "AAOMgpf53MhvG8nK/yojHT0lD9vu33TME8ILeNJ/3uNjpA52csW8Kp1BWYzmiWTtqt3DsinQVLiXMg==",
        "X-Received": "by 2002:a1c:6c14:: with SMTP id\n\th20-v6mr2960790wmc.138.1531293813149; \n\tWed, 11 Jul 2018 00:23:33 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Date": "Wed, 11 Jul 2018 09:22:51 +0200",
        "Message-Id": "<f8a4f87fd244b405c8879a871a28f101af4a3eaf.1531293415.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1530111623.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531293415.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v3 18/21] net/mlx5: add flow VXLAN-GPE item",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 219 ++++++++++++++++++++++++++++++++---\n drivers/net/mlx5/mlx5_rxtx.h |   5 +-\n 2 files changed, 209 insertions(+), 15 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 95507059e..e06df0eb5 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -53,6 +53,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n /* Pattern tunnel Layer bits. */\n #define MLX5_FLOW_LAYER_VXLAN (1u << 12)\n+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)\n \n /* Outer Masks. */\n #define MLX5_FLOW_LAYER_OUTER_L3 \\\n@@ -64,7 +65,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \t MLX5_FLOW_LAYER_OUTER_L4)\n \n /* Tunnel Masks. */\n-#define MLX5_FLOW_LAYER_TUNNEL MLX5_FLOW_LAYER_VXLAN\n+#define MLX5_FLOW_LAYER_TUNNEL \\\n+\t(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)\n \n /* Inner Masks. */\n #define MLX5_FLOW_LAYER_INNER_L3 \\\n@@ -102,6 +104,7 @@ enum mlx5_expansion {\n \tMLX5_EXPANSION_OUTER_IPV6_UDP,\n \tMLX5_EXPANSION_OUTER_IPV6_TCP,\n \tMLX5_EXPANSION_VXLAN,\n+\tMLX5_EXPANSION_VXLAN_GPE,\n \tMLX5_EXPANSION_ETH,\n \tMLX5_EXPANSION_IPV4,\n \tMLX5_EXPANSION_IPV4_UDP,\n@@ -140,7 +143,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t\tETH_RSS_NONFRAG_IPV4_OTHER,\n \t},\n \t[MLX5_EXPANSION_OUTER_IPV4_UDP] = {\n-\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN),\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,\n+\t\t\t\t\t\t MLX5_EXPANSION_VXLAN_GPE),\n \t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n \t\t.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,\n \t},\n@@ -157,7 +161,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t\tETH_RSS_NONFRAG_IPV6_OTHER,\n \t},\n \t[MLX5_EXPANSION_OUTER_IPV6_UDP] = {\n-\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN),\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,\n+\t\t\t\t\t\t MLX5_EXPANSION_VXLAN_GPE),\n \t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n \t\t.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,\n \t},\n@@ -169,6 +174,12 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),\n \t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN,\n \t},\n+\t[MLX5_EXPANSION_VXLAN_GPE] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,\n+\t\t\t\t\t\t MLX5_EXPANSION_IPV4,\n+\t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,\n+\t},\n \t[MLX5_EXPANSION_ETH] = {\n \t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,\n \t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n@@ -236,8 +247,6 @@ struct rte_flow {\n \tstruct mlx5_flow_verbs *cur_verbs;\n \t/**< Current Verbs flow structure being filled. */\n \tstruct rte_flow_action_rss rss;/**< RSS context. */\n-\tuint32_t tunnel_ptype;\n-\t/**< Store tunnel packet type data to store in Rx queue. */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n \tuint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */\n };\n@@ -304,6 +313,23 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {\n \t{ 9, 10, 11 }, { 12, 13, 14 },\n };\n \n+/* Tunnel information. */\n+struct mlx5_flow_tunnel_info {\n+\tuint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */\n+\tuint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */\n+};\n+\n+static struct mlx5_flow_tunnel_info tunnels_info[] = {\n+\t{\n+\t\t.tunnel = MLX5_FLOW_LAYER_VXLAN,\n+\t\t.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,\n+\t},\n+\t{\n+\t\t.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,\n+\t\t.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,\n+\t},\n+};\n+\n /**\n  * Discover the maximum number of priority available.\n  *\n@@ -1263,7 +1289,119 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n \t}\n \tflow->layers |= MLX5_FLOW_LAYER_VXLAN;\n-\tflow->tunnel_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p item into a Verbs specification after ensuring the NIC\n+ * will understand and process it correctly.\n+ * If the necessary size for the conversion is greater than the @p flow_size,\n+ * nothing is written in @p flow, the validation is still performed.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small, nothing is\n+ *   written.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n+ *   otherwise another call with this returned memory size should be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_item *item,\n+\t\t\t struct rte_flow *flow, const size_t flow_size,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_vxlan_gpe *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan_gpe *mask = item->mask;\n+\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n+\tstruct ibv_flow_spec_tunnel vxlan_gpe = {\n+\t\t.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,\n+\t\t.size = size,\n+\t};\n+\tint ret;\n+\tunion vni {\n+\t\tuint32_t vlan_id;\n+\t\tuint8_t vni[4];\n+\t} id = { .vlan_id = 0, };\n+\n+\tif (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"L3 VXLAN is not enabled by device\"\n+\t\t\t\t\t  \" parameter and/or not configured in\"\n+\t\t\t\t\t  \" firmware\");\n+\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"a tunnel is already present\");\n+\t/*\n+\t * Verify only UDPv4 is present as defined in\n+\t * https://tools.ietf.org/html/rfc7348\n+\t */\n+\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"no outer UDP layer found\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vxlan_gpe_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,\n+\t\t sizeof(struct rte_flow_item_vxlan_gpe), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tif (spec) {\n+\t\tmemcpy(&id.vni[1], spec->vni, 3);\n+\t\tvxlan_gpe.val.tunnel_id = id.vlan_id;\n+\t\tmemcpy(&id.vni[1], mask->vni, 3);\n+\t\tvxlan_gpe.mask.tunnel_id = id.vlan_id;\n+\t\tif (spec->protocol)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t item,\n+\t\t\t\t \"VxLAN-GPE protocol not supported\");\n+\t\t/* Remove unwanted bits from values. */\n+\t\tvxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;\n+\t}\n+\t/*\n+\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n+\t * layer is defined in the Verbs specification it is interpreted as\n+\t * wildcard and all packets will match this rule, if it follows a full\n+\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n+\t * before will also match this rule.  To avoid such situation, VNI 0\n+\t * is currently refused.\n+\t */\n+\tif (!vxlan_gpe.val.tunnel_id)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"VXLAN-GPE vni cannot be 0\");\n+\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"VXLAN-GPE tunnel must be fully\"\n+\t\t\t\t\t  \" defined\");\n+\tif (size <= flow_size) {\n+\t\tmlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);\n+\t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n+\t}\n+\tflow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;\n \treturn size;\n }\n \n@@ -1293,7 +1431,8 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n  *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_items(const struct rte_flow_item pattern[],\n+mlx5_flow_items(struct rte_eth_dev *dev,\n+\t\tconst struct rte_flow_item pattern[],\n \t\tstruct rte_flow *flow, const size_t flow_size,\n \t\tstruct rte_flow_error *error)\n {\n@@ -1328,6 +1467,10 @@ mlx5_flow_items(const struct rte_flow_item pattern[],\n \t\t\tret = mlx5_flow_item_vxlan(pattern, flow, remain,\n \t\t\t\t\t\t   error);\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\t\tret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,\n+\t\t\t\t\t\t       remain, error);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n@@ -1908,7 +2051,8 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t}\n \t\t}\n \t\tret = mlx5_flow_items\n-\t\t\t((const struct rte_flow_item *)\n+\t\t\t(dev,\n+\t\t\t (const struct rte_flow_item *)\n \t\t\t &buf->entry[i].pattern[pattern_start_idx],\n \t\t\t flow,\n \t\t\t (size < flow_size) ? flow_size - size : 0, error);\n@@ -1934,6 +2078,34 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \treturn size;\n }\n \n+/**\n+ * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,\n+ * if several tunnel rules are used on this queue, the tunnel ptype will be\n+ * cleared.\n+ *\n+ * @param rxq_ctrl\n+ *   Rx queue to update.\n+ */\n+static void\n+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)\n+{\n+\tunsigned int i;\n+\tuint32_t tunnel_ptype = 0;\n+\n+\t/* Look up for the ptype to use. */\n+\tfor (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {\n+\t\tif (!rxq_ctrl->flow_tunnels_n[i])\n+\t\t\tcontinue;\n+\t\tif (!tunnel_ptype) {\n+\t\t\ttunnel_ptype = tunnels_info[i].ptype;\n+\t\t} else {\n+\t\t\ttunnel_ptype = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trxq_ctrl->rxq.tunnel = tunnel_ptype;\n+}\n+\n /**\n  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.\n  *\n@@ -1962,8 +2134,17 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t\trxq_ctrl->flow_mark_n++;\n \t\t}\n \t\tif (tunnel) {\n-\t\t\trxq_ctrl->rxq.tunnel = flow->tunnel_ptype;\n-\t\t\trxq_ctrl->flow_vxlan_n++;\n+\t\t\tunsigned int j;\n+\n+\t\t\t/* Increase the counter matching the flow. */\n+\t\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {\n+\t\t\t\tif ((tunnels_info[j].tunnel & flow->layers) ==\n+\t\t\t\t    tunnels_info[j].tunnel) {\n+\t\t\t\t\trxq_ctrl->flow_tunnels_n[j]++;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tmlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);\n \t\t}\n \t}\n }\n@@ -1998,9 +2179,17 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t\trxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;\n \t\t}\n \t\tif (tunnel) {\n-\t\t\trxq_ctrl->flow_vxlan_n++;\n-\t\t\tif (!rxq_ctrl->flow_vxlan_n)\n-\t\t\t\trxq_ctrl->rxq.tunnel = 0;\n+\t\t\tunsigned int j;\n+\n+\t\t\t/* Decrease the counter matching the flow. */\n+\t\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {\n+\t\t\t\tif ((tunnels_info[j].tunnel & flow->layers) ==\n+\t\t\t\t    tunnels_info[j].tunnel) {\n+\t\t\t\t\trxq_ctrl->flow_tunnels_n[j]--;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tmlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);\n \t\t}\n \t}\n }\n@@ -2020,6 +2209,7 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \n \tfor (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\t\tunsigned int j;\n \n \t\tif (!(*priv->rxqs)[idx])\n \t\t\tcontinue;\n@@ -2027,7 +2217,8 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \t\t\t\t\tstruct mlx5_rxq_ctrl, rxq);\n \t\trxq_ctrl->flow_mark_n = 0;\n \t\trxq_ctrl->rxq.mark = 0;\n-\t\trxq_ctrl->flow_vxlan_n = 0;\n+\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j)\n+\t\t\trxq_ctrl->flow_tunnels_n[j] = 0;\n \t\trxq_ctrl->rxq.tunnel = 0;\n \t\t++idx;\n \t}\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex ae9b564dd..e97f5766c 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -34,6 +34,9 @@\n #include \"mlx5_defs.h\"\n #include \"mlx5_prm.h\"\n \n+/* Support tunnel matching. */\n+#define MLX5_FLOW_TUNNEL 2\n+\n struct mlx5_rxq_stats {\n \tunsigned int idx; /**< Mapping index. */\n #ifdef MLX5_PMD_SOFT_COUNTERS\n@@ -139,7 +142,7 @@ struct mlx5_rxq_ctrl {\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint16_t idx; /* Queue index. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n-\tuint32_t flow_vxlan_n; /* Number of VXLAN flows using this queue. */\n+\tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\n };\n \n /* Indirection table. */\n",
    "prefixes": [
        "v3",
        "18/21"
    ]
}