get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42965/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42965,
    "url": "http://patches.dpdk.org/api/patches/42965/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/4eb31dbe1f5bdf15f622a17a7c9cf0b14c364151.1531387413.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<4eb31dbe1f5bdf15f622a17a7c9cf0b14c364151.1531387413.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/4eb31dbe1f5bdf15f622a17a7c9cf0b14c364151.1531387413.git.nelio.laranjeiro@6wind.com",
    "date": "2018-07-12T09:31:04",
    "name": "[v4,18/21] net/mlx5: add flow VXLAN-GPE item",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "5361adc916f6699f10f12d10fd131f8e58ac3473",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/4eb31dbe1f5bdf15f622a17a7c9cf0b14c364151.1531387413.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 544,
            "url": "http://patches.dpdk.org/api/series/544/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=544",
            "date": "2018-07-12T09:30:46",
            "name": "net/mlx5: flow rework",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/544/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42965/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42965/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 67FCA1B8B8;\n\tThu, 12 Jul 2018 11:32:05 +0200 (CEST)",
            "from mail-wr1-f68.google.com (mail-wr1-f68.google.com\n\t[209.85.221.68]) by dpdk.org (Postfix) with ESMTP id 3C8331B5C5\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 11:31:41 +0200 (CEST)",
            "by mail-wr1-f68.google.com with SMTP id h10-v6so20954370wre.6\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 02:31:41 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\ts2-v6sm18717603wrn.75.2018.07.12.02.31.39\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tThu, 12 Jul 2018 02:31:40 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=IjP0n5cg9PlRTIV61/k9ADK+AxEtnsrgl1+BqQGHC3A=;\n\tb=nxa2aYtUHCNegju6bHygsN4f7yD0KW1UhXIcEVvRKi5W9XOA6IDhZKwBAkqsA8q00v\n\tLJRstkVmflZv78Psivqfp/bOe6ATxV9SqkPCiz5ta4Axqmt78FrUCnC4ULMIUDqtVuQo\n\t4yxWOy2Ct7W/AbwR1grOyG7k8Ev/1zy+oVwm86N1rdUdhZh7dcKpGT/z7fWTzyedOr+F\n\trzfPAXbH163wsr47+arYLMC/HR+F0qiteuRyQ02ypnGRPiL623roekq7rkW+tqDy3N11\n\txYBnYsb2NLedHcjU3WjMQJupx/QrCVkRdo9sbCqSDkvPatyecysDAnfuJ20EoF0pq45P\n\tNNqg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=IjP0n5cg9PlRTIV61/k9ADK+AxEtnsrgl1+BqQGHC3A=;\n\tb=MPFb4APt26ojl8bl0wJQjjFqx8hvqQPGNA6UTkpApOr79amtKw8dH7M1TXWbZPmoQs\n\t1g/aY/HubkA4o8dvdh46EZNWu1gGCUB0f8oXGYQc87DixYk1Ro10mFakHJZ3G0whOy+r\n\titq41kvcde5hXcX0ZZB3yXr4AU+6AV1z1R+SgHWzlklN3tSB/L9uHqhwhkP5im/TwPKs\n\tsWqNrPvxxzfuSqYSNG30HaqxCj8vGR9ZdhARL6BxOROmjoSwi7M1q1r0i1qWjofzqFTu\n\tvBNiKucpaPAVt910xw5u1UOSYWnRNp52oWW/WD/Apx58tbZQENbGnmYOXeYWPq/J4x99\n\tJQ9g==",
        "X-Gm-Message-State": "AOUpUlEmOmincI1e9dyvyQKWEvOC2mAc+4Z3cPfMyc5PwsxxAdEcvrA1\n\tpn97+qoQVPKEni0YIXTifjcmo7rBmA==",
        "X-Google-Smtp-Source": "AAOMgpebTl6mQcINQCTHxPRxid0yceaN+4yIJIJTBecOuHtFhWQnqi8bQn0WmEz4FvX+uUE7SZQaVg==",
        "X-Received": "by 2002:a5d:4c4c:: with SMTP id\n\tn12-v6mr1082299wrt.71.1531387900737; \n\tThu, 12 Jul 2018 02:31:40 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Date": "Thu, 12 Jul 2018 11:31:04 +0200",
        "Message-Id": "<4eb31dbe1f5bdf15f622a17a7c9cf0b14c364151.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v4 18/21] net/mlx5: add flow VXLAN-GPE item",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 219 ++++++++++++++++++++++++++++++++---\n drivers/net/mlx5/mlx5_rxtx.h |   5 +-\n 2 files changed, 209 insertions(+), 15 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 7eb5d7da3..5d0ad4a04 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -53,6 +53,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n /* Pattern tunnel Layer bits. */\n #define MLX5_FLOW_LAYER_VXLAN (1u << 12)\n+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)\n \n /* Outer Masks. */\n #define MLX5_FLOW_LAYER_OUTER_L3 \\\n@@ -64,7 +65,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \t MLX5_FLOW_LAYER_OUTER_L4)\n \n /* Tunnel Masks. */\n-#define MLX5_FLOW_LAYER_TUNNEL MLX5_FLOW_LAYER_VXLAN\n+#define MLX5_FLOW_LAYER_TUNNEL \\\n+\t(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)\n \n /* Inner Masks. */\n #define MLX5_FLOW_LAYER_INNER_L3 \\\n@@ -102,6 +104,7 @@ enum mlx5_expansion {\n \tMLX5_EXPANSION_OUTER_IPV6_UDP,\n \tMLX5_EXPANSION_OUTER_IPV6_TCP,\n \tMLX5_EXPANSION_VXLAN,\n+\tMLX5_EXPANSION_VXLAN_GPE,\n \tMLX5_EXPANSION_ETH,\n \tMLX5_EXPANSION_IPV4,\n \tMLX5_EXPANSION_IPV4_UDP,\n@@ -140,7 +143,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t\tETH_RSS_NONFRAG_IPV4_OTHER,\n \t},\n \t[MLX5_EXPANSION_OUTER_IPV4_UDP] = {\n-\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN),\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,\n+\t\t\t\t\t\t MLX5_EXPANSION_VXLAN_GPE),\n \t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n \t\t.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,\n \t},\n@@ -157,7 +161,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t\tETH_RSS_NONFRAG_IPV6_OTHER,\n \t},\n \t[MLX5_EXPANSION_OUTER_IPV6_UDP] = {\n-\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN),\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,\n+\t\t\t\t\t\t MLX5_EXPANSION_VXLAN_GPE),\n \t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n \t\t.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,\n \t},\n@@ -169,6 +174,12 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),\n \t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN,\n \t},\n+\t[MLX5_EXPANSION_VXLAN_GPE] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,\n+\t\t\t\t\t\t MLX5_EXPANSION_IPV4,\n+\t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,\n+\t},\n \t[MLX5_EXPANSION_ETH] = {\n \t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,\n \t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n@@ -236,8 +247,6 @@ struct rte_flow {\n \tstruct mlx5_flow_verbs *cur_verbs;\n \t/**< Current Verbs flow structure being filled. */\n \tstruct rte_flow_action_rss rss;/**< RSS context. */\n-\tuint32_t tunnel_ptype;\n-\t/**< Store tunnel packet type data to store in Rx queue. */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n \tuint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */\n };\n@@ -304,6 +313,23 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {\n \t{ 9, 10, 11 }, { 12, 13, 14 },\n };\n \n+/* Tunnel information. */\n+struct mlx5_flow_tunnel_info {\n+\tuint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */\n+\tuint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */\n+};\n+\n+static struct mlx5_flow_tunnel_info tunnels_info[] = {\n+\t{\n+\t\t.tunnel = MLX5_FLOW_LAYER_VXLAN,\n+\t\t.ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,\n+\t},\n+\t{\n+\t\t.tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,\n+\t\t.ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,\n+\t},\n+};\n+\n /**\n  * Discover the maximum number of priority available.\n  *\n@@ -1265,7 +1291,119 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n \t}\n \tflow->layers |= MLX5_FLOW_LAYER_VXLAN;\n-\tflow->tunnel_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p item into a Verbs specification after ensuring the NIC\n+ * will understand and process it correctly.\n+ * If the necessary size for the conversion is greater than the @p flow_size,\n+ * nothing is written in @p flow, the validation is still performed.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small, nothing is\n+ *   written.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n+ *   otherwise another call with this returned memory size should be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_item *item,\n+\t\t\t struct rte_flow *flow, const size_t flow_size,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_vxlan_gpe *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan_gpe *mask = item->mask;\n+\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n+\tstruct ibv_flow_spec_tunnel vxlan_gpe = {\n+\t\t.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,\n+\t\t.size = size,\n+\t};\n+\tint ret;\n+\tunion vni {\n+\t\tuint32_t vlan_id;\n+\t\tuint8_t vni[4];\n+\t} id = { .vlan_id = 0, };\n+\n+\tif (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"L3 VXLAN is not enabled by device\"\n+\t\t\t\t\t  \" parameter and/or not configured in\"\n+\t\t\t\t\t  \" firmware\");\n+\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"a tunnel is already present\");\n+\t/*\n+\t * Verify only UDPv4 is present as defined in\n+\t * https://tools.ietf.org/html/rfc7348\n+\t */\n+\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"no outer UDP layer found\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vxlan_gpe_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,\n+\t\t sizeof(struct rte_flow_item_vxlan_gpe), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tif (spec) {\n+\t\tmemcpy(&id.vni[1], spec->vni, 3);\n+\t\tvxlan_gpe.val.tunnel_id = id.vlan_id;\n+\t\tmemcpy(&id.vni[1], mask->vni, 3);\n+\t\tvxlan_gpe.mask.tunnel_id = id.vlan_id;\n+\t\tif (spec->protocol)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t item,\n+\t\t\t\t \"VxLAN-GPE protocol not supported\");\n+\t\t/* Remove unwanted bits from values. */\n+\t\tvxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;\n+\t}\n+\t/*\n+\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n+\t * layer is defined in the Verbs specification it is interpreted as\n+\t * wildcard and all packets will match this rule, if it follows a full\n+\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n+\t * before will also match this rule.  To avoid such situation, VNI 0\n+\t * is currently refused.\n+\t */\n+\tif (!vxlan_gpe.val.tunnel_id)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"VXLAN-GPE vni cannot be 0\");\n+\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"VXLAN-GPE tunnel must be fully\"\n+\t\t\t\t\t  \" defined\");\n+\tif (size <= flow_size) {\n+\t\tmlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);\n+\t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n+\t}\n+\tflow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;\n \treturn size;\n }\n \n@@ -1295,7 +1433,8 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n  *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_items(const struct rte_flow_item pattern[],\n+mlx5_flow_items(struct rte_eth_dev *dev,\n+\t\tconst struct rte_flow_item pattern[],\n \t\tstruct rte_flow *flow, const size_t flow_size,\n \t\tstruct rte_flow_error *error)\n {\n@@ -1330,6 +1469,10 @@ mlx5_flow_items(const struct rte_flow_item pattern[],\n \t\t\tret = mlx5_flow_item_vxlan(pattern, flow, remain,\n \t\t\t\t\t\t   error);\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\t\tret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,\n+\t\t\t\t\t\t       remain, error);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n@@ -1904,7 +2047,8 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t}\n \t\t}\n \t\tret = mlx5_flow_items\n-\t\t\t((const struct rte_flow_item *)\n+\t\t\t(dev,\n+\t\t\t (const struct rte_flow_item *)\n \t\t\t &buf->entry[i].pattern[expanded_pattern_idx],\n \t\t\t flow,\n \t\t\t (size < flow_size) ? flow_size - size : 0, error);\n@@ -1944,6 +2088,34 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \treturn size;\n }\n \n+/**\n+ * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,\n+ * if several tunnel rules are used on this queue, the tunnel ptype will be\n+ * cleared.\n+ *\n+ * @param rxq_ctrl\n+ *   Rx queue to update.\n+ */\n+static void\n+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)\n+{\n+\tunsigned int i;\n+\tuint32_t tunnel_ptype = 0;\n+\n+\t/* Look up for the ptype to use. */\n+\tfor (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {\n+\t\tif (!rxq_ctrl->flow_tunnels_n[i])\n+\t\t\tcontinue;\n+\t\tif (!tunnel_ptype) {\n+\t\t\ttunnel_ptype = tunnels_info[i].ptype;\n+\t\t} else {\n+\t\t\ttunnel_ptype = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trxq_ctrl->rxq.tunnel = tunnel_ptype;\n+}\n+\n /**\n  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.\n  *\n@@ -1972,8 +2144,17 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t\trxq_ctrl->flow_mark_n++;\n \t\t}\n \t\tif (tunnel) {\n-\t\t\trxq_ctrl->rxq.tunnel = flow->tunnel_ptype;\n-\t\t\trxq_ctrl->flow_vxlan_n++;\n+\t\t\tunsigned int j;\n+\n+\t\t\t/* Increase the counter matching the flow. */\n+\t\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {\n+\t\t\t\tif ((tunnels_info[j].tunnel & flow->layers) ==\n+\t\t\t\t    tunnels_info[j].tunnel) {\n+\t\t\t\t\trxq_ctrl->flow_tunnels_n[j]++;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tmlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);\n \t\t}\n \t}\n }\n@@ -2008,9 +2189,17 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)\n \t\t\trxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;\n \t\t}\n \t\tif (tunnel) {\n-\t\t\trxq_ctrl->flow_vxlan_n++;\n-\t\t\tif (!rxq_ctrl->flow_vxlan_n)\n-\t\t\t\trxq_ctrl->rxq.tunnel = 0;\n+\t\t\tunsigned int j;\n+\n+\t\t\t/* Decrease the counter matching the flow. */\n+\t\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {\n+\t\t\t\tif ((tunnels_info[j].tunnel & flow->layers) ==\n+\t\t\t\t    tunnels_info[j].tunnel) {\n+\t\t\t\t\trxq_ctrl->flow_tunnels_n[j]--;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tmlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);\n \t\t}\n \t}\n }\n@@ -2030,6 +2219,7 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \n \tfor (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\t\tunsigned int j;\n \n \t\tif (!(*priv->rxqs)[idx])\n \t\t\tcontinue;\n@@ -2037,7 +2227,8 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \t\t\t\t\tstruct mlx5_rxq_ctrl, rxq);\n \t\trxq_ctrl->flow_mark_n = 0;\n \t\trxq_ctrl->rxq.mark = 0;\n-\t\trxq_ctrl->flow_vxlan_n = 0;\n+\t\tfor (j = 0; j != MLX5_FLOW_TUNNEL; ++j)\n+\t\t\trxq_ctrl->flow_tunnels_n[j] = 0;\n \t\trxq_ctrl->rxq.tunnel = 0;\n \t\t++idx;\n \t}\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex ae9b564dd..e97f5766c 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -34,6 +34,9 @@\n #include \"mlx5_defs.h\"\n #include \"mlx5_prm.h\"\n \n+/* Support tunnel matching. */\n+#define MLX5_FLOW_TUNNEL 2\n+\n struct mlx5_rxq_stats {\n \tunsigned int idx; /**< Mapping index. */\n #ifdef MLX5_PMD_SOFT_COUNTERS\n@@ -139,7 +142,7 @@ struct mlx5_rxq_ctrl {\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint16_t idx; /* Queue index. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n-\tuint32_t flow_vxlan_n; /* Number of VXLAN flows using this queue. */\n+\tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\n };\n \n /* Indirection table. */\n",
    "prefixes": [
        "v4",
        "18/21"
    ]
}