get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41690/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41690,
    "url": "http://patches.dpdk.org/api/patches/41690/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com",
    "date": "2018-06-27T15:07:48",
    "name": "[v2,16/20] net/mlx5: add flow VXLAN item",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "135e9617950031b9fa2446eadb8d76f4c71841c2",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 268,
            "url": "http://patches.dpdk.org/api/series/268/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=268",
            "date": "2018-06-27T15:07:32",
            "name": "net/mlx5: flow rework",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/268/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/41690/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/41690/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 204D71BFFC;\n\tWed, 27 Jun 2018 17:08:10 +0200 (CEST)",
            "from mail-wr0-f196.google.com (mail-wr0-f196.google.com\n\t[209.85.128.196]) by dpdk.org (Postfix) with ESMTP id 30C701BFB5\n\tfor <dev@dpdk.org>; Wed, 27 Jun 2018 17:07:49 +0200 (CEST)",
            "by mail-wr0-f196.google.com with SMTP id u7-v6so2396265wrn.12\n\tfor <dev@dpdk.org>; Wed, 27 Jun 2018 08:07:49 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\tk17-v6sm4872513wrp.19.2018.06.27.08.07.47\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tWed, 27 Jun 2018 08:07:47 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:subject:date:message-id:in-reply-to:references;\n\tbh=W4hG2Adl28FBtFPZh1X+CWOcxoVqTL3DmEOsmwObpv8=;\n\tb=gLYldRR4A9RkD0qdOi9HulDMejlfwONvntzOmkmkcBd8xBsnmhv5amZMsGLLs6IuXy\n\tnq+BPW3aXhAgGSMRObKvXk/rwCn/WvBMiUeZJgMGRK7HMtpLyRs1xLGiGs+94dE5sST6\n\tzPN71LMDCqYvr7NOP9PAdR+stA/zdXwcqHfNeJ8nKgJ/yCga1zHzo63TadAoWZRH0lhH\n\tGJxGAyjyKH22F1zuOMN1/tSoG9ZWetm0UksBVDvkuOIfcktkvNHJ1V08algVuY3EKXZr\n\taUVnPklMawDKAfVOo0W1aEBKgqAxm9WNCJY5/8embgPi1X8ykBPN9yyEbGaUgsEUL7bm\n\tNInQ==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=W4hG2Adl28FBtFPZh1X+CWOcxoVqTL3DmEOsmwObpv8=;\n\tb=tdU+jfQzziE37EpUNbDizB+BbwqpsPPHhabizYAE9PL29ShesH0lXy3oO+vnMoxgIL\n\t/kTq99itNwc5MHwBEGmHks6A/AV8MC9RK34/dZs/O8tL8Q6glZRnP3qpZxTCuPZnqHz9\n\tBGdZ+b61QphFAjq4H4GYlGuvQ47mcmyrXOsYzBY/d+kI56OfZAs/GiBktNXxAHSGqnWa\n\tTC3Z5QXv2ppqZiiGfc4R5ydHonpUsp7WppNpKypMAznieli8UaupSiQBEZUzc4iFgD3Q\n\txr+cLLDtfgbmdACJQ4UZWdFjfZyrAfmnhs4YqXyS5f9gCBp+C6quPO9tE2/JXhWcIeEV\n\tZbKQ==",
        "X-Gm-Message-State": "APt69E0crUgsXeLOAMNyovjRjoyBCWGmq2yxiZQop2OW/J4aLA2ffNDi\n\t88LzhUBiPA7McfWeNSQyPN6vJfBeaA==",
        "X-Google-Smtp-Source": "AAOMgpcnRh8awJ+TqEwwlP39p2IQhO/6Sn40YMMcelURnA1ohj4EvtmdVKPLATOKkotXdtl976soSQ==",
        "X-Received": "by 2002:adf:fc8c:: with SMTP id\n\tg12-v6mr5509411wrr.216.1530112068368; \n\tWed, 27 Jun 2018 08:07:48 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org, Adrien Mazarguil <adrien.mazarguil@6wind.com>,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Date": "Wed, 27 Jun 2018 17:07:48 +0200",
        "Message-Id": "<723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1530111623.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1527506071.git.nelio.laranjeiro@6wind.com>\n\t<cover.1530111623.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v2 16/20] net/mlx5: add flow VXLAN item",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 242 +++++++++++++++++++++++++++++------\n 1 file changed, 202 insertions(+), 40 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex eedf0c461..daf5b9b5a 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -35,18 +35,45 @@\n extern const struct eth_dev_ops mlx5_dev_ops;\n extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n-/* Pattern Layer bits. */\n+/* Pattern outer Layer bits. */\n #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)\n #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)\n #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)\n #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)\n-/* Masks. */\n+\n+/* Pattern inner Layer bits. */\n+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)\n+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)\n+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)\n+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)\n+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)\n+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)\n+\n+/* Pattern tunnel Layer bits. */\n+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)\n+\n+/* Outer Masks. */\n #define MLX5_FLOW_LAYER_OUTER_L3 \\\n \t(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)\n #define MLX5_FLOW_LAYER_OUTER_L4 \\\n \t(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)\n+#define MLX5_FLOW_LAYER_OUTER \\\n+\t(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \\\n+\t MLX5_FLOW_LAYER_OUTER_L4)\n+\n+/* Tunnel masks. */\n+#define MLX5_FLOW_LAYER_TUNNEL MLX5_FLOW_LAYER_VXLAN\n+\n+/* Inner Masks. */\n+#define MLX5_FLOW_LAYER_INNER_L3 \\\n+\t(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)\n+#define MLX5_FLOW_LAYER_INNER_L4 \\\n+\t(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)\n+#define MLX5_FLOW_LAYER_INNER \\\n+\t(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \\\n+\t MLX5_FLOW_LAYER_INNER_L4)\n \n /* Action fate on the packet. */\n #define MLX5_FLOW_FATE_DROP (1u << 0)\n@@ -406,10 +433,14 @@ mlx5_flow_print(struct rte_flow *flow __rte_unused)\n \tLIST_FOREACH(verbs, &flow->verbs, next) {\n \t\tuint32_t layers = flow->layers | verbs->layers;\n \n-\t\tfprintf(stdout, \" layers: %s/%s/%s\\n\",\n+\t\tfprintf(stdout, \" layers: %s/%s/%s/%s/%s/%s/%s\\n\",\n \t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L2 ? \"l2\" : \"-\",\n \t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L3 ? \"l3\" : \"-\",\n-\t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L4 ? \"l4\" : \"-\");\n+\t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L4 ? \"l4\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_TUNNEL ? \"T\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_INNER_L2 ? \"l2\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_INNER_L3 ? \"l3\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_INNER_L4 ? \"l4\" : \"-\");\n \t\tif (verbs->attr) {\n \t\t\tstruct ibv_spec_header *hdr =\n \t\t\t\t(struct ibv_spec_header *)verbs->specs;\n@@ -634,16 +665,18 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n \t\t.type = RTE_BE16(0xffff),\n \t};\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tconst unsigned int size = sizeof(struct ibv_flow_spec_eth);\n \tstruct ibv_flow_spec_eth eth = {\n-\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n \tint ret;\n \n \tif (!flow->expand) {\n-\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L2)\n+\t\tif (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L2))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -658,7 +691,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (ret)\n \t\t\treturn ret;\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2);\n+\tmlx5_flow_layers_update(flow, tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L2);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -694,7 +728,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,\n \t\t\t   struct ibv_flow_spec_eth *eth)\n {\n \tunsigned int i;\n-\tenum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;\n+\tconst enum ibv_flow_spec_type search = eth->type;\n \tstruct ibv_spec_header *hdr = (struct ibv_spec_header *)\n \t\t((uint8_t *)attr + sizeof(struct ibv_flow_attr));\n \n@@ -737,17 +771,20 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.tci = RTE_BE16(0x0fff),\n \t};\n \tunsigned int size = sizeof(struct ibv_flow_spec_eth);\n-\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tstruct ibv_flow_spec_eth eth = {\n-\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t lm = MLX5_FLOW_LAYER_OUTER_L3 |\n-\t\t\tMLX5_FLOW_LAYER_OUTER_L4;\n-\tconst uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;\n-\tconst uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst uint32_t lm = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |\n+\t\t\t\t      MLX5_FLOW_LAYER_INNER_L4) :\n+\t\t(MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);\n+\tconst uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :\n+\t\tMLX5_FLOW_LAYER_OUTER_VLAN;\n+\tconst uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\tMLX5_FLOW_LAYER_OUTER_L2;\n \n \tif (!flow->expand) {\n \t\tif (layers & vlanm)\n@@ -801,12 +838,17 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (size <= flow_size)\n \t\t\tmlx5_flow_spec_verbs_add(flow, &eth, size);\n \t} else {\n-\t\tif (verbs->attr)\n-\t\t\tmlx5_flow_item_vlan_update(verbs->attr, &eth);\n+\t\tif (flow->cur_verbs)\n+\t\t\tmlx5_flow_item_vlan_update(flow->cur_verbs->attr,\n+\t\t\t\t\t\t   &eth);\n \t\tsize = 0; /**< Only an update is done in eth specification. */\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2 |\n-\t\t\t\tMLX5_FLOW_LAYER_OUTER_VLAN);\n+\tmlx5_flow_layers_update(flow,\n+\t\t\t\ttunnel ?\n+\t\t\t\t(MLX5_FLOW_LAYER_INNER_L2 |\n+\t\t\t\t MLX5_FLOW_LAYER_INNER_VLAN) :\n+\t\t\t\t(MLX5_FLOW_LAYER_OUTER_L2 |\n+\t\t\t\t MLX5_FLOW_LAYER_OUTER_VLAN));\n \treturn size;\n }\n \n@@ -840,22 +882,26 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t.next_proto_id = 0xff,\n \t\t},\n \t};\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);\n \tstruct ibv_flow_spec_ipv4_ext ipv4 = {\n-\t\t.type = IBV_FLOW_SPEC_IPV4_EXT,\n+\t\t.type = IBV_FLOW_SPEC_IPV4_EXT |\n+\t\t\t(tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n \tif (!flow->expand) {\n-\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\t\tif (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L3))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n \t\t\t\t\t\t  \"multiple L3 layers not\"\n \t\t\t\t\t\t  \" supported\");\n-\t\telse if (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\telse if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L4))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -870,7 +916,9 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV4);\n+\tmlx5_flow_layers_update(flow,\n+\t\t\t\ttunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV4);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -931,22 +979,25 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t.hop_limits = 0xff,\n \t\t},\n \t};\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv6);\n \tstruct ibv_flow_spec_ipv6 ipv6 = {\n-\t\t.type = IBV_FLOW_SPEC_IPV6,\n+\t\t.type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n \tif (!flow->expand) {\n-\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\t\tif (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L3))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n \t\t\t\t\t\t  \"multiple L3 layers not\"\n \t\t\t\t\t\t  \" supported\");\n-\t\telse if (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\telse if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L4))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -961,7 +1012,9 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV6);\n+\tmlx5_flow_layers_update(flow,\n+\t\t\t\ttunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -1029,22 +1082,25 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n {\n \tconst struct rte_flow_item_udp *spec = item->spec;\n \tconst struct rte_flow_item_udp *mask = item->mask;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);\n \tstruct ibv_flow_spec_tcp_udp udp = {\n-\t\t.type = IBV_FLOW_SPEC_UDP,\n+\t\t.type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n \tif (!flow->expand) {\n-\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\t\tif (!(layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3)))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n \t\t\t\t\t\t  \"L3 is mandatory to filter\"\n \t\t\t\t\t\t  \" on L4\");\n-\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\tif (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L4))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -1059,7 +1115,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_UDP);\n+\tmlx5_flow_layers_update(flow,\n+\t\t\t\ttunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L4_UDP);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -1097,22 +1155,25 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n {\n \tconst struct rte_flow_item_tcp *spec = item->spec;\n \tconst struct rte_flow_item_tcp *mask = item->mask;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tconst int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);\n \tstruct ibv_flow_spec_tcp_udp tcp = {\n-\t\t.type = IBV_FLOW_SPEC_TCP,\n+\t\t.type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n \tif (!flow->expand) {\n-\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\t\tif (!(layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3)))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n \t\t\t\t\t\t  \"L3 is mandatory to filter\"\n \t\t\t\t\t\t  \" on L4\");\n-\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\tif (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L4))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -1127,7 +1188,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t}\n-\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_TCP);\n+\tmlx5_flow_layers_update(flow,\n+\t\t\t\ttunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_L4_TCP);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -1143,6 +1206,102 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \treturn size;\n }\n \n+/**\n+ * Validate VXLAN layer and possibly create the Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param flow[in, out]\n+ *   Pointer to flow structure.\n+ * @param flow_size[in]\n+ *   Size in bytes of the available space for to store the flow information.\n+ * @param error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   size in bytes necessary for the conversion, a negative errno value\n+ *   otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n+\t\t     const size_t flow_size, struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_vxlan *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan *mask = item->mask;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n+\tstruct ibv_flow_spec_tunnel vxlan = {\n+\t\t.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,\n+\t\t.size = size,\n+\t};\n+\tint ret;\n+\tunion vni {\n+\t\tuint32_t vlan_id;\n+\t\tuint8_t vni[4];\n+\t} id = { .vlan_id = 0, };\n+\n+\tif (!flow->expand) {\n+\t\tif (layers & MLX5_FLOW_LAYER_TUNNEL)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"a tunnel is already\"\n+\t\t\t\t\t\t  \" present\");\n+\t\t/*\n+\t\t * Verify only UDPv4 is present as defined in\n+\t\t * https://tools.ietf.org/html/rfc7348\n+\t\t */\n+\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"no outer UDP layer found\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_vxlan_mask;\n+\t\tret = mlx5_flow_item_validate\n+\t\t\t(item, (const uint8_t *)mask,\n+\t\t\t (const uint8_t *)&rte_flow_item_vxlan_mask,\n+\t\t\t sizeof(struct rte_flow_item_vxlan), error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\tif (spec) {\n+\t\tmemcpy(&id.vni[1], spec->vni, 3);\n+\t\tvxlan.val.tunnel_id = id.vlan_id;\n+\t\tmemcpy(&id.vni[1], mask->vni, 3);\n+\t\tvxlan.mask.tunnel_id = id.vlan_id;\n+\t\t/* Remove unwanted bits from values. */\n+\t\tvxlan.val.tunnel_id &= vxlan.mask.tunnel_id;\n+\t}\n+\tif (!flow->expand) {\n+\t\t/*\n+\t\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if\n+\t\t * only this layer is defined in the Verbs specification it is\n+\t\t * interpreted as wildcard and all packets will match this\n+\t\t * rule, if it follows a full stack layer (ex: eth / ipv4 /\n+\t\t * udp), all packets matching the layers before will also\n+\t\t * match this rule.  To avoid such situation, VNI 0 is\n+\t\t * currently refused.\n+\t\t */\n+\t\tif (!vxlan.val.tunnel_id)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"VXLAN vni cannot be 0\");\n+\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER))\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"VXLAN tunnel must be fully\"\n+\t\t\t\t\t\t  \" defined\");\n+\t}\n+\tif (size <= flow_size)\n+\t\tmlx5_flow_spec_verbs_add(flow, &vxlan, size);\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_VXLAN);\n+\tflow->ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;\n+\treturn size;\n+}\n+\n /**\n  * Validate items provided by the user.\n  *\n@@ -1191,6 +1350,9 @@ mlx5_flow_items(const struct rte_flow_item items[],\n \t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n \t\t\tret = mlx5_flow_item_tcp(items, flow, remain, error);\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tret = mlx5_flow_item_vxlan(items, flow, remain, error);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n",
    "prefixes": [
        "v2",
        "16/20"
    ]
}