Show a patch.

GET /api/patches/42963/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42963,
    "url": "http://patches.dpdk.org/api/patches/42963/",
    "web_url": "http://patches.dpdk.org/patch/42963/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk"
    },
    "msgid": "<a3590f4c1a43bcfc46c1a1512e9e60b4264a66af.1531387413.git.nelio.laranjeiro@6wind.com>",
    "date": "2018-07-12T09:31:02",
    "name": "[v4,16/21] net/mlx5: support inner RSS computation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "e440e90a65462ced6d58eb5843971edf1a940f84",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/patch/42963/mbox/",
    "series": [
        {
            "id": 544,
            "url": "http://patches.dpdk.org/api/series/544/",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=544",
            "date": "2018-07-12T09:30:46",
            "name": "net/mlx5: flow rework",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/544/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42963/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42963/checks/",
    "tags": {},
    "headers": {
        "X-Mailman-Version": "2.1.15",
        "In-Reply-To": "<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "Errors-To": "dev-bounces@dpdk.org",
        "X-Mailer": "git-send-email 2.18.0",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 176091B76F;\n\tThu, 12 Jul 2018 11:32:03 +0200 (CEST)",
            "from mail-wr1-f66.google.com (mail-wr1-f66.google.com\n\t[209.85.221.66]) by dpdk.org (Postfix) with ESMTP id CEBC41B51C\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 11:31:39 +0200 (CEST)",
            "by mail-wr1-f66.google.com with SMTP id r16-v6so20964341wrt.11\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 02:31:39 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\ts2-v6sm18717603wrn.75.2018.07.12.02.31.38\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tThu, 12 Jul 2018 02:31:38 -0700 (PDT)"
        ],
        "References": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Google-Smtp-Source": "AAOMgpcwo/ZALz7XkRL5DnaTbjTLMTPiJIxZTC7bWf7YSd0TwkAek5Rww6LvXl2xT8xNG3yedI1SxQ==",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=fK9qcrlLtftXIiFMee4QfMUAhGHzzA7ce4Jq7ToTdPk=;\n\tb=xMA46pz23RXqtGnGPzS8Q9Go7Y9T3c7LtKhqIfX9vQcf3JEnkCaynOaFc6mx3pCmsb\n\t3Izgnj1Y0SEpKmaHDtgwuwt6NXGl8I4wQTYj82n48sGS2xOdoh3Hu3TdsZeeJ63Emp74\n\tzW5UrGhIlN4YOOrt8B51ZuIE4QQcR77Hap3JWhTH2++0L/X+m8zBIg4fD3RN0+oKxsTh\n\tKzp3AirOPpDOk1qnOIO4i5F11Cc7pjSdOBy2awR4uML3j27K3hudPKMGzQ/2BIvoJHnt\n\t+FPabNnpq3hrv5srsdGW6pQIY6fZQNiTEb3E2ZXoC0dZvjuJthdpIgnR9fMXOYsh7zCH\n\t96EQ==",
        "X-BeenThere": "dev@dpdk.org",
        "X-Received": "by 2002:adf:fdcd:: with SMTP id\n\ti13-v6mr1078715wrs.276.1531387899006; \n\tThu, 12 Jul 2018 02:31:39 -0700 (PDT)",
        "Message-Id": "<a3590f4c1a43bcfc46c1a1512e9e60b4264a66af.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=fK9qcrlLtftXIiFMee4QfMUAhGHzzA7ce4Jq7ToTdPk=;\n\tb=l3+K82ecRnApusanhc+LQZHztjfJNLVyj+rTLwG6OnSpY94wNU1kXZs6zsSsHxLe+O\n\t8MlfNa5TcqC76NikMhQf/QOQYNMtNy8wxxuObET5HRc2Zo4gKOM5q9GCl/ELbMedq/yl\n\tZNeOsJLQAId195nBdFTsyj9xPmLw8gS48N1L+3kisS0Gi7C2ghUE8sUN8Ny2V9/zDz6i\n\tjwBFCSyfh34JxjTMfu/YJ+wavdVt691/gBtC21NwBtobTbHnCc6mj3bIuMxcz2EMzUzv\n\tm9FYATC7WNJLX9NTb8zAM2AgANJ7ZnVKs9655GE4lRLKs4ej+WoKEpe4WNac8sMvkQJo\n\tV6Kg==",
        "Delivered-To": "patchwork@dpdk.org",
        "Precedence": "list",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "X-Original-To": "patchwork@dpdk.org",
        "List-Post": "<mailto:dev@dpdk.org>",
        "Return-Path": "<dev-bounces@dpdk.org>",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "Date": "Thu, 12 Jul 2018 11:31:02 +0200",
        "X-Gm-Message-State": "AOUpUlFs5vcq5qbEVnFp3IZFvzn3Fsw8WS4JTrlH3PV/aBtKD/RgyVFQ\n\t6PySItqm5GZlzj0wdF/LJKUL0/gAdg==",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "Subject": "[dpdk-dev] [PATCH v4 16/21] net/mlx5: support inner RSS computation"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 245 ++++++++++++++++++++++++++---------\n 1 file changed, 185 insertions(+), 60 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 730360b22..84bd99b3e 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -35,18 +35,42 @@\n extern const struct eth_dev_ops mlx5_dev_ops;\n extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n-/* Pattern Layer bits. */\n+/* Pattern outer Layer bits. */\n #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)\n #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)\n #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)\n #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)\n #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)\n-/* Masks. */\n+\n+/* Pattern inner Layer bits. */\n+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)\n+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)\n+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)\n+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)\n+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)\n+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)\n+\n+/* Outer Masks. */\n #define MLX5_FLOW_LAYER_OUTER_L3 \\\n \t(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)\n #define MLX5_FLOW_LAYER_OUTER_L4 \\\n \t(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)\n+#define MLX5_FLOW_LAYER_OUTER \\\n+\t(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \\\n+\t MLX5_FLOW_LAYER_OUTER_L4)\n+\n+/* Tunnel Masks. */\n+#define MLX5_FLOW_LAYER_TUNNEL 0\n+\n+/* Inner Masks. */\n+#define MLX5_FLOW_LAYER_INNER_L3 \\\n+\t(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)\n+#define MLX5_FLOW_LAYER_INNER_L4 \\\n+\t(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)\n+#define MLX5_FLOW_LAYER_INNER \\\n+\t(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \\\n+\t MLX5_FLOW_LAYER_INNER_L4)\n \n /* Actions that modify the fate of matching traffic. */\n #define MLX5_FLOW_FATE_DROP (1u << 0)\n@@ -66,6 +90,14 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n enum mlx5_expansion {\n \tMLX5_EXPANSION_ROOT,\n+\tMLX5_EXPANSION_ROOT_OUTER,\n+\tMLX5_EXPANSION_OUTER_ETH,\n+\tMLX5_EXPANSION_OUTER_IPV4,\n+\tMLX5_EXPANSION_OUTER_IPV4_UDP,\n+\tMLX5_EXPANSION_OUTER_IPV4_TCP,\n+\tMLX5_EXPANSION_OUTER_IPV6,\n+\tMLX5_EXPANSION_OUTER_IPV6_UDP,\n+\tMLX5_EXPANSION_OUTER_IPV6_TCP,\n \tMLX5_EXPANSION_ETH,\n \tMLX5_EXPANSION_IPV4,\n \tMLX5_EXPANSION_IPV4_UDP,\n@@ -83,6 +115,50 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n \t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n \t\t.type = RTE_FLOW_ITEM_TYPE_END,\n \t},\n+\t[MLX5_EXPANSION_ROOT_OUTER] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,\n+\t\t\t\t\t\t MLX5_EXPANSION_OUTER_IPV4,\n+\t\t\t\t\t\t MLX5_EXPANSION_OUTER_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_ETH] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,\n+\t\t\t\t\t\t MLX5_EXPANSION_OUTER_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t.rss_types = 0,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT\n+\t\t\t(MLX5_EXPANSION_OUTER_IPV4_UDP,\n+\t\t\t MLX5_EXPANSION_OUTER_IPV4_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |\n+\t\t\tETH_RSS_NONFRAG_IPV4_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4_UDP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6] = {\n+\t\t.next = RTE_FLOW_EXPAND_RSS_NEXT\n+\t\t\t(MLX5_EXPANSION_OUTER_IPV6_UDP,\n+\t\t\t MLX5_EXPANSION_OUTER_IPV6_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |\n+\t\t\tETH_RSS_NONFRAG_IPV6_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6_UDP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,\n+\t},\n \t[MLX5_EXPANSION_ETH] = {\n \t\t.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,\n \t\t\t\t\t\t MLX5_EXPANSION_IPV6),\n@@ -453,6 +529,34 @@ mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)\n \tverbs->size += size;\n }\n \n+/**\n+ * Adjust verbs hash fields according to the @p flow information.\n+ *\n+ * @param[in, out] flow.\n+ *   Pointer to flow structure.\n+ * @param[in] tunnel\n+ *   1 when the hash field is for a tunnel item.\n+ * @param[in] layer_types\n+ *   ETH_RSS_* types.\n+ * @param[in] hash_fields\n+ *   Item hash fields.\n+ */\n+static void\n+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, int tunnel __rte_unused,\n+\t\t\t\t  uint32_t layer_types, uint64_t hash_fields)\n+{\n+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+\thash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);\n+\tif (flow->rss.level == 2 && !tunnel)\n+\t\thash_fields = 0;\n+\telse if (flow->rss.level < 2 && tunnel)\n+\t\thash_fields = 0;\n+#endif\n+\tif (!(flow->rss.types & layer_types))\n+\t\thash_fields = 0;\n+\tflow->cur_verbs->hash_fields |= hash_fields;\n+}\n+\n /**\n  * Convert the @p item into a Verbs specification after ensuring the NIC\n  * will understand and process it correctly.\n@@ -486,14 +590,16 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n \t\t.type = RTE_BE16(0xffff),\n \t};\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tconst unsigned int size = sizeof(struct ibv_flow_spec_eth);\n \tstruct ibv_flow_spec_eth eth = {\n-\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)\n+\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t    MLX5_FLOW_LAYER_OUTER_L2))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n@@ -506,7 +612,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t\t\terror);\n \tif (ret)\n \t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L2;\n+\tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\tMLX5_FLOW_LAYER_OUTER_L2;\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -543,7 +650,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,\n \t\t\t   struct ibv_flow_spec_eth *eth)\n {\n \tunsigned int i;\n-\tenum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;\n+\tconst enum ibv_flow_spec_type search = eth->type;\n \tstruct ibv_spec_header *hdr = (struct ibv_spec_header *)\n \t\t((uint8_t *)attr + sizeof(struct ibv_flow_attr));\n \n@@ -596,16 +703,19 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.inner_type = RTE_BE16(0xffff),\n \t};\n \tunsigned int size = sizeof(struct ibv_flow_spec_eth);\n-\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tstruct ibv_flow_spec_eth eth = {\n-\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n-\tconst uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |\n-\t\t\tMLX5_FLOW_LAYER_OUTER_L4;\n-\tconst uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;\n-\tconst uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;\n+\tconst uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |\n+\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L4) :\n+\t\t(MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);\n+\tconst uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :\n+\t\tMLX5_FLOW_LAYER_OUTER_VLAN;\n+\tconst uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\tMLX5_FLOW_LAYER_OUTER_L2;\n \n \tif (flow->layers & vlanm)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n@@ -648,11 +758,14 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\tmlx5_flow_spec_verbs_add(flow, &eth, size);\n \t\t}\n \t} else {\n-\t\tif (verbs->attr)\n-\t\t\tmlx5_flow_item_vlan_update(verbs->attr, &eth);\n+\t\tif (flow->cur_verbs)\n+\t\t\tmlx5_flow_item_vlan_update(flow->cur_verbs->attr,\n+\t\t\t\t\t\t   &eth);\n \t\tsize = 0; /* Only an update is done in eth specification. */\n \t}\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN;\n+\tflow->layers |= tunnel ?\n+\t\t(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :\n+\t\t(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);\n \treturn size;\n }\n \n@@ -692,19 +805,23 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t.next_proto_id = 0xff,\n \t\t},\n \t};\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);\n \tstruct ibv_flow_spec_ipv4_ext ipv4 = {\n-\t\t.type = IBV_FLOW_SPEC_IPV4_EXT,\n+\t\t.type = IBV_FLOW_SPEC_IPV4_EXT |\n+\t\t\t(tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t    MLX5_FLOW_LAYER_OUTER_L3))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\telse if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t MLX5_FLOW_LAYER_OUTER_L4))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n@@ -717,7 +834,8 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t sizeof(struct rte_flow_item_ipv4), error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV4;\n \tif (spec) {\n \t\tipv4.val = (struct ibv_flow_ipv4_ext_filter){\n \t\t\t.src_ip = spec->hdr.src_addr,\n@@ -740,14 +858,11 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \tflow->l3_protocol_en = !!ipv4.mask.proto;\n \tflow->l3_protocol = ipv4.val.proto;\n \tif (size <= flow_size) {\n-\t\tuint64_t hash_fields = IBV_RX_HASH_SRC_IPV4 |\n-\t\t\tIBV_RX_HASH_DST_IPV4;\n-\n-\t\tif (!(flow->rss.types &\n-\t\t      (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |\n-\t\t       ETH_RSS_NONFRAG_IPV4_OTHER)))\n-\t\t\thash_fields = 0;\n-\t\tflow->cur_verbs->hash_fields |= hash_fields;\n+\t\tmlx5_flow_verbs_hashfields_adjust\n+\t\t\t(flow, tunnel,\n+\t\t\t (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |\n+\t\t\t  ETH_RSS_NONFRAG_IPV4_OTHER),\n+\t\t\t (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;\n \t\tmlx5_flow_spec_verbs_add(flow, &ipv4, size);\n \t}\n@@ -795,19 +910,22 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t.hop_limits = 0xff,\n \t\t},\n \t};\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv6);\n \tstruct ibv_flow_spec_ipv6 ipv6 = {\n-\t\t.type = IBV_FLOW_SPEC_IPV6,\n+\t\t.type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t    MLX5_FLOW_LAYER_OUTER_L3))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\telse if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t MLX5_FLOW_LAYER_OUTER_L4))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n@@ -820,7 +938,8 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t sizeof(struct rte_flow_item_ipv6), error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6;\n \tif (spec) {\n \t\tunsigned int i;\n \t\tuint32_t vtc_flow_val;\n@@ -863,13 +982,10 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \tflow->l3_protocol_en = !!ipv6.mask.next_hdr;\n \tflow->l3_protocol = ipv6.val.next_hdr;\n \tif (size <= flow_size) {\n-\t\tuint64_t hash_fields = IBV_RX_HASH_SRC_IPV6 |\n-\t\t\tIBV_RX_HASH_DST_IPV6;\n-\n-\t\tif (!(flow->rss.types &\n-\t\t      (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER)))\n-\t\t\thash_fields = 0;\n-\t\tflow->cur_verbs->hash_fields |= hash_fields;\n+\t\tmlx5_flow_verbs_hashfields_adjust\n+\t\t\t(flow, tunnel,\n+\t\t\t (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),\n+\t\t\t (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;\n \t\tmlx5_flow_spec_verbs_add(flow, &ipv6, size);\n \t}\n@@ -904,9 +1020,10 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n {\n \tconst struct rte_flow_item_udp *spec = item->spec;\n \tconst struct rte_flow_item_udp *mask = item->mask;\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);\n \tstruct ibv_flow_spec_tcp_udp udp = {\n-\t\t.type = IBV_FLOW_SPEC_UDP,\n+\t\t.type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n@@ -917,13 +1034,15 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"protocol filtering not compatible\"\n \t\t\t\t\t  \" with UDP layer\");\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\tif (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L3)))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"L3 is mandatory to filter\"\n \t\t\t\t\t  \" on L4\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t    MLX5_FLOW_LAYER_OUTER_L4))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n@@ -937,7 +1056,8 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t sizeof(struct rte_flow_item_udp), error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\tMLX5_FLOW_LAYER_OUTER_L4_UDP;\n \tif (spec) {\n \t\tudp.val.dst_port = spec->hdr.dst_port;\n \t\tudp.val.src_port = spec->hdr.src_port;\n@@ -948,12 +1068,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tudp.val.dst_port &= udp.mask.dst_port;\n \t}\n \tif (size <= flow_size) {\n-\t\tuint64_t hash_fields = IBV_RX_HASH_SRC_PORT_UDP |\n-\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n-\n-\t\tif (!(flow->rss.types & ETH_RSS_UDP))\n-\t\t\thash_fields = 0;\n-\t\tflow->cur_verbs->hash_fields |= hash_fields;\n+\t\tmlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,\n+\t\t\t\t\t\t  (IBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\t\t\t\t   IBV_RX_HASH_DST_PORT_UDP));\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;\n \t\tmlx5_flow_spec_verbs_add(flow, &udp, size);\n \t}\n@@ -988,9 +1105,10 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n {\n \tconst struct rte_flow_item_tcp *spec = item->spec;\n \tconst struct rte_flow_item_tcp *mask = item->mask;\n+\tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);\n \tstruct ibv_flow_spec_tcp_udp tcp = {\n-\t\t.type = IBV_FLOW_SPEC_TCP,\n+\t\t.type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n \tint ret;\n@@ -1001,12 +1119,14 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"protocol filtering not compatible\"\n \t\t\t\t\t  \" with TCP layer\");\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\tif (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t      MLX5_FLOW_LAYER_OUTER_L3)))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t    MLX5_FLOW_LAYER_OUTER_L4))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n@@ -1019,7 +1139,8 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t sizeof(struct rte_flow_item_tcp), error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\tflow->layers |=  tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\tMLX5_FLOW_LAYER_OUTER_L4_TCP;\n \tif (spec) {\n \t\ttcp.val.dst_port = spec->hdr.dst_port;\n \t\ttcp.val.src_port = spec->hdr.src_port;\n@@ -1030,12 +1151,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\ttcp.val.dst_port &= tcp.mask.dst_port;\n \t}\n \tif (size <= flow_size) {\n-\t\tuint64_t hash_fields = IBV_RX_HASH_SRC_PORT_TCP |\n-\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n-\n-\t\tif (!(flow->rss.types & ETH_RSS_TCP))\n-\t\t\thash_fields = 0;\n-\t\tflow->cur_verbs->hash_fields |= hash_fields;\n+\t\tmlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,\n+\t\t\t\t\t\t  (IBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\t\t\t\t   IBV_RX_HASH_DST_PORT_TCP));\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;\n \t\tmlx5_flow_spec_verbs_add(flow, &tcp, size);\n \t}\n@@ -1261,7 +1379,11 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n \t\t\t\t\t  &rss->func,\n \t\t\t\t\t  \"RSS hash function not supported\");\n+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+\tif (rss->level > 2)\n+#else\n \tif (rss->level > 1)\n+#endif\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n \t\t\t\t\t  &rss->level,\n@@ -1301,6 +1423,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,\n \tflow->rss.queue_num = rss->queue_num;\n \tmemcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);\n \tflow->rss.types = rss->types;\n+\tflow->rss.level = rss->level;\n \tflow->fate |= MLX5_FLOW_FATE_RSS;\n \treturn 0;\n }\n@@ -1608,7 +1731,9 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\tret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),\n \t\t\t\t\t  pattern, local_flow.rss.types,\n \t\t\t\t\t  mlx5_support_expansion,\n-\t\t\t\t\t  MLX5_EXPANSION_ROOT);\n+\t\t\t\t\t  local_flow.rss.level < 2 ?\n+\t\t\t\t\t  MLX5_EXPANSION_ROOT :\n+\t\t\t\t\t  MLX5_EXPANSION_ROOT_OUTER);\n \t\tassert(ret > 0 &&\n \t\t       (unsigned int)ret < sizeof(expand_buffer.buffer));\n \t} else {\n@@ -1979,8 +2104,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,\n \t\t\treturn NULL;\n \t\t}\n \t}\n-\tmlx5_flow_rxq_mark_set(dev, flow);\n \tTAILQ_INSERT_TAIL(list, flow, next);\n+\tmlx5_flow_rxq_mark_set(dev, flow);\n \treturn flow;\n }\n \n",
    "prefixes": [
        "v4",
        "16/21"
    ]
}