get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41687/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41687,
    "url": "http://patches.dpdk.org/api/patches/41687/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/ae5d5fc2b1a1501ca622e31c9d1cc6a348b2bd15.1530111623.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<ae5d5fc2b1a1501ca622e31c9d1cc6a348b2bd15.1530111623.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/ae5d5fc2b1a1501ca622e31c9d1cc6a348b2bd15.1530111623.git.nelio.laranjeiro@6wind.com",
    "date": "2018-06-27T15:07:45",
    "name": "[v2,13/20] net/mlx5: add RSS flow action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5404fafa06d57f17a85300650952ba080c2ff63c",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/ae5d5fc2b1a1501ca622e31c9d1cc6a348b2bd15.1530111623.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 268,
            "url": "http://patches.dpdk.org/api/series/268/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=268",
            "date": "2018-06-27T15:07:32",
            "name": "net/mlx5: flow rework",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/268/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/41687/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/41687/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4E8AF1BFF6;\n\tWed, 27 Jun 2018 17:08:03 +0200 (CEST)",
            "from mail-wr0-f196.google.com (mail-wr0-f196.google.com\n\t[209.85.128.196]) by dpdk.org (Postfix) with ESMTP id AF6551BFC7\n\tfor <dev@dpdk.org>; Wed, 27 Jun 2018 17:07:47 +0200 (CEST)",
            "by mail-wr0-f196.google.com with SMTP id l2-v6so1823334wro.7\n\tfor <dev@dpdk.org>; Wed, 27 Jun 2018 08:07:47 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\tk17-v6sm4872513wrp.19.2018.06.27.08.07.45\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tWed, 27 Jun 2018 08:07:45 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:subject:date:message-id:in-reply-to:references;\n\tbh=2qr9b5Z3XYB3gsS8Slff33S6PWoHfnkYKrv8VzPcw/U=;\n\tb=bjqFXDTG8ga8sjo6bQWBTUS+bjp+EqICOViLfo+cXT11HETPloOCNuGezirpQM+O8z\n\ts/QyrSf7VZdhvPjLn+Igl0Q42pIXDMeqVvCmqT9GNYm9CkIiCJpFcvlhfXLUlZ8A0SoZ\n\tO229OaUt5fskABGkwptagpcS7QfgZi4xksIf2o81bQ987JjbyQl3bWnG8r8EW1tYmnP7\n\tDWUheiHKnhOZfMKoX+nEH1SfrrE9pFCBjRu7KoBMF/ZBzQN12DrQ4V7rTHlkFesjHfPT\n\t8X4dvBBZaKmv3hW3I1hj5HbYpgWQ/BEJbgV+1zuEocdtg1PgyYCgI5NUzU9oqO1wbzB8\n\tzc4A==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=2qr9b5Z3XYB3gsS8Slff33S6PWoHfnkYKrv8VzPcw/U=;\n\tb=dhxkUtGClvQn39vZl3/zJ+gvcUevqlLGiIpmihv1j536mrnlfmibfJkD67BvVscnbE\n\tnf1NY9tqxbxpOf0HL7DV5Z2xRiVm4aslZPfXTFzJAGREYDfl8tm2PcwdGgtmCNkuN9/d\n\tGWY33G+QsNacMUom4Z1L1ZuLAdEFMkwexkcN+KLtUhDGkWAPewiTTohwWbu/KeuJOswR\n\tq+WDZOPsmptnXZJ35EfIq4dCwnbJvHb3zHwFJuOIXMrEomHoaKCHDH98phFZmXTpdaXI\n\tjuZYYoIcT8xo11QRaNTHKK3aavCIuPxfqLTYf2n5qdC1qaB5HlU6mlAyOoeYztsyWPOc\n\t1TmQ==",
        "X-Gm-Message-State": "APt69E2z+piOF82Xswc8+jsFT0OA5uASSMDTrHv8FIz39owR/gFw0jz9\n\tLhchoYYbKkUhKJhLb7mMrzvU8Ooo0Q==",
        "X-Google-Smtp-Source": "AAOMgpe5AvsVP9Ek/+nb2tH5P082PtoXD4IoiD3YDIGcXjMhY0VQE6ia5KI8Xzsp9qtZdRexsXqbTw==",
        "X-Received": "by 2002:adf:ff0b:: with SMTP id\n\tk11-v6mr5917544wrr.15.1530112066036; \n\tWed, 27 Jun 2018 08:07:46 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org, Adrien Mazarguil <adrien.mazarguil@6wind.com>,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Date": "Wed, 27 Jun 2018 17:07:45 +0200",
        "Message-Id": "<ae5d5fc2b1a1501ca622e31c9d1cc6a348b2bd15.1530111623.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1530111623.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1527506071.git.nelio.laranjeiro@6wind.com>\n\t<cover.1530111623.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v2 13/20] net/mlx5: add RSS flow action",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 1211 +++++++++++++++++++++++++---------\n 1 file changed, 899 insertions(+), 312 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex a39157533..08e0a6556 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -51,13 +51,148 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n /* Action fate on the packet. */\n #define MLX5_FLOW_FATE_DROP (1u << 0)\n #define MLX5_FLOW_FATE_QUEUE (1u << 1)\n+#define MLX5_FLOW_FATE_RSS (1u << 2)\n \n /* Modify a packet. */\n #define MLX5_FLOW_MOD_FLAG (1u << 0)\n #define MLX5_FLOW_MOD_MARK (1u << 1)\n \n+/* Priority reserved for default flows. */\n+#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)\n+\n+enum mlx5_expansion {\n+\tMLX5_EXPANSION_ROOT,\n+\tMLX5_EXPANSION_ROOT2,\n+\tMLX5_EXPANSION_OUTER_ETH,\n+\tMLX5_EXPANSION_OUTER_IPV4,\n+\tMLX5_EXPANSION_OUTER_IPV4_UDP,\n+\tMLX5_EXPANSION_OUTER_IPV4_TCP,\n+\tMLX5_EXPANSION_OUTER_IPV6,\n+\tMLX5_EXPANSION_OUTER_IPV6_UDP,\n+\tMLX5_EXPANSION_OUTER_IPV6_TCP,\n+\tMLX5_EXPANSION_VXLAN,\n+\tMLX5_EXPANSION_VXLAN_GPE,\n+\tMLX5_EXPANSION_GRE,\n+\tMLX5_EXPANSION_MPLS,\n+\tMLX5_EXPANSION_ETH,\n+\tMLX5_EXPANSION_IPV4,\n+\tMLX5_EXPANSION_IPV4_UDP,\n+\tMLX5_EXPANSION_IPV4_TCP,\n+\tMLX5_EXPANSION_IPV6,\n+\tMLX5_EXPANSION_IPV6_UDP,\n+\tMLX5_EXPANSION_IPV6_TCP,\n+};\n+\n+/** Supported expansion of items. */\n+static const struct rte_flow_expand_node mlx5_support_expansion[] = {\n+\t[MLX5_EXPANSION_ROOT] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_ETH,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV4,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t},\n+\t[MLX5_EXPANSION_ROOT2] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_OUTER_ETH,\n+\t\t\t\t\t      MLX5_EXPANSION_OUTER_IPV4,\n+\t\t\t\t\t      MLX5_EXPANSION_OUTER_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_ETH] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_OUTER_IPV4,\n+\t\t\t\t\t      MLX5_EXPANSION_OUTER_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t.rss_types = 0,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_OUTER_IPV4_UDP,\n+\t\t\t\t\t      MLX5_EXPANSION_OUTER_IPV4_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |\n+\t\t\tETH_RSS_NONFRAG_IPV4_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4_UDP] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_VXLAN),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV4_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_OUTER_IPV6_UDP,\n+\t\t\t\t\t      MLX5_EXPANSION_OUTER_IPV6_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |\n+\t\t\tETH_RSS_NONFRAG_IPV6_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6_UDP] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_VXLAN),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,\n+\t},\n+\t[MLX5_EXPANSION_OUTER_IPV6_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,\n+\t},\n+\t[MLX5_EXPANSION_VXLAN] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_ETH),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN,\n+\t},\n+\t[MLX5_EXPANSION_VXLAN_GPE] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_ETH,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV4,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,\n+\t},\n+\t[MLX5_EXPANSION_GRE] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_IPV4),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_GRE,\n+\t},\n+\t[MLX5_EXPANSION_ETH] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_IPV4,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV6),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t},\n+\t[MLX5_EXPANSION_IPV4] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_IPV4_UDP,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV4_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV4,\n+\t\t.rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |\n+\t\t\tETH_RSS_NONFRAG_IPV4_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_IPV4_UDP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_UDP,\n+\t},\n+\t[MLX5_EXPANSION_IPV4_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV4_TCP,\n+\t},\n+\t[MLX5_EXPANSION_IPV6] = {\n+\t\t.next = RTE_FLOW_EXPAND_ITEMS(MLX5_EXPANSION_IPV6_UDP,\n+\t\t\t\t\t      MLX5_EXPANSION_IPV6_TCP),\n+\t\t.type = RTE_FLOW_ITEM_TYPE_IPV6,\n+\t\t.rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |\n+\t\t\tETH_RSS_NONFRAG_IPV6_OTHER,\n+\t},\n+\t[MLX5_EXPANSION_IPV6_UDP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_UDP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_UDP,\n+\t},\n+\t[MLX5_EXPANSION_IPV6_TCP] = {\n+\t\t.type = RTE_FLOW_ITEM_TYPE_TCP,\n+\t\t.rss_types = ETH_RSS_NONFRAG_IPV6_TCP,\n+\t},\n+};\n+\n /** Handles information leading to a drop fate. */\n struct mlx5_flow_verbs {\n+\tLIST_ENTRY(mlx5_flow_verbs) next;\n+\tuint32_t layers;\n+\t/**< Bit-fields of expanded layers see MLX5_FLOW_ITEMS_*. */\n+\tuint32_t modifier;\n+\t/**< Bit-fields of expanded modifier see MLX5_FLOW_MOD_*. */\n \tunsigned int size; /**< Size of the attribute. */\n \tstruct {\n \t\tstruct ibv_flow_attr *attr;\n@@ -66,20 +201,26 @@ struct mlx5_flow_verbs {\n \t};\n \tstruct ibv_flow *flow; /**< Verbs flow pointer. */\n \tstruct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */\n+\tuint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */\n };\n \n /* Flow structure. */\n struct rte_flow {\n \tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n \tstruct rte_flow_attr attributes; /**< User flow attribute. */\n+\tuint32_t expand:1; /**< Flow is expanded due to RSS configuration. */\n \tuint32_t layers;\n \t/**< Bit-fields of present layers see MLX5_FLOW_ITEMS_*. */\n \tuint32_t modifier;\n \t/**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */\n \tuint32_t fate;\n \t/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */\n-\tstruct mlx5_flow_verbs verbs; /* Verbs flow. */\n-\tuint16_t queue; /**< Destination queue to redirect traffic to. */\n+\tLIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */\n+\tstruct mlx5_flow_verbs *cur_verbs;\n+\t/**< Current Verbs flow structure being filled. */\n+\tstruct rte_flow_action_rss rss;/**< RSS context. */\n+\tuint8_t key[40]; /**< RSS hash key. */\n+\tuint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */\n };\n \n static const struct rte_flow_ops mlx5_flow_ops = {\n@@ -122,16 +263,27 @@ struct ibv_spec_header {\n \tuint16_t size;\n };\n \n- /**\n-  * Get the maximum number of priority available.\n-  *\n-  * @param dev\n-  *   Pointer to Ethernet device.\n-  *\n-  * @return\n-  *   number of supported flow priority on success, a negative errno value\n-  *   otherwise and rte_errno is set.\n-  */\n+/* Map of Verbs to Flow priority with 8 Verbs priorities. */\n+static const uint32_t priority_map_3[][3] = {\n+\t{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },\n+};\n+\n+/* Map of Verbs to Flow priority with 16 Verbs priorities. */\n+static const uint32_t priority_map_5[][3] = {\n+\t{ 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },\n+\t{ 9, 10, 11 }, { 12, 13, 14 },\n+};\n+\n+/**\n+ * Get the maximum number of priority available.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   number of supported flow priority on success, a negative errno\n+ *   value otherwise and rte_errno is set.\n+ */\n int\n mlx5_flow_priorities(struct rte_eth_dev *dev)\n {\n@@ -156,6 +308,7 @@ mlx5_flow_priorities(struct rte_eth_dev *dev)\n \tstruct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);\n \tuint16_t vprio[] = { 8, 16 };\n \tint i;\n+\tint priority = 0;\n \n \tif (!drop) {\n \t\trte_errno = ENOTSUP;\n@@ -167,11 +320,54 @@ mlx5_flow_priorities(struct rte_eth_dev *dev)\n \t\tif (!flow)\n \t\t\tbreak;\n \t\tclaim_zero(mlx5_glue->destroy_flow(flow));\n+\t\tpriority = vprio[i];\n+\t}\n+\tswitch (priority) {\n+\tcase 8:\n+\t\tpriority = 3;\n+\t\tbreak;\n+\tcase 16:\n+\t\tpriority = 5;\n+\t\tbreak;\n+\tdefault:\n+\t\trte_errno = ENOTSUP;\n+\t\tDRV_LOG(ERR,\n+\t\t\t\"port %u verbs maximum priority: %d expected 8/16\",\n+\t\t\tdev->data->port_id, vprio[i]);\n+\t\treturn -rte_errno;\n \t}\n \tmlx5_hrxq_drop_release(dev, drop);\n \tDRV_LOG(INFO, \"port %u flow maximum priority: %d\",\n-\t\tdev->data->port_id, vprio[i]);\n-\treturn vprio[i];\n+\t\tdev->data->port_id, priority);\n+\treturn priority;\n+}\n+\n+/**\n+ * Adjust flow priority.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param flow\n+ *   Pointer to an rte flow.\n+ *\n+ * @return\n+ *   The priority adjusted.\n+ */\n+static int\n+mlx5_flow_priority(struct rte_eth_dev *dev, uint32_t priority,\n+\t\t   uint32_t subpriority)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\n+\tswitch (priv->config.flow_prio) {\n+\tcase 3:\n+\t\tpriority = priority_map_3[priority][subpriority];\n+\t\tbreak;\n+\tcase 5:\n+\t\tpriority = priority_map_5[priority][subpriority];\n+\t\tbreak;\n+\t}\n+\treturn priority;\n }\n \n /**\n@@ -185,6 +381,8 @@ void\n mlx5_flow_print(struct rte_flow *flow __rte_unused)\n {\n #ifndef NDEBUG\n+\tstruct mlx5_flow_verbs *verbs = LIST_FIRST(&flow->verbs);\n+\n \tfprintf(stdout, \"---------8<------------\\n\");\n \tfprintf(stdout, \"%s: flow information\\n\", MLX5_DRIVER_NAME);\n \tfprintf(stdout, \" attributes: group %u priority %u ingress %d egress %d\"\n@@ -193,26 +391,36 @@ mlx5_flow_print(struct rte_flow *flow __rte_unused)\n \t\tflow->attributes.ingress,\n \t\tflow->attributes.egress,\n \t\tflow->attributes.transfer);\n-\tfprintf(stdout, \" layers: %s/%s/%s\\n\",\n-\t\tflow->layers & MLX5_FLOW_LAYER_OUTER_L2 ? \"l2\" : \"-\",\n-\t\tflow->layers & MLX5_FLOW_LAYER_OUTER_L3 ? \"l3\" : \"-\",\n-\t\tflow->layers & MLX5_FLOW_LAYER_OUTER_L4 ? \"l4\" : \"-\");\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n \t\tfprintf(stdout, \" fate: drop queue\\n\");\n-\telse if (flow->fate & MLX5_FLOW_FATE_QUEUE)\n-\t\tfprintf(stdout, \" fate: target queue %u\\n\", flow->queue);\n-\tif (flow->verbs.attr) {\n-\t\tstruct ibv_spec_header *hdr =\n-\t\t\t(struct ibv_spec_header *)flow->verbs.specs;\n-\t\tconst int n = flow->verbs.attr->num_of_specs;\n-\t\tint i;\n-\n-\t\tfprintf(stdout, \" Verbs attributes: specs_n %u\\n\",\n-\t\t\tflow->verbs.attr->num_of_specs);\n-\t\tfor (i = 0; i != n; ++i) {\n-\t\t\trte_hexdump(stdout, \" \", hdr, hdr->size);\n-\t\t\thdr = (struct ibv_spec_header *)\n-\t\t\t\t((uint8_t *)hdr + hdr->size);\n+\t} else {\n+\t\tuint16_t i;\n+\n+\t\tfprintf(stdout, \" fate: target queues\");\n+\t\tfor (i = 0; i != flow->rss.queue_num; ++i)\n+\t\t\tfprintf(stdout, \" %u\", (*flow->queue)[i]);\n+\t\tfprintf(stdout, \"\\n\");\n+\t}\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tuint32_t layers = flow->layers | verbs->layers;\n+\n+\t\tfprintf(stdout, \" layers: %s/%s/%s\\n\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L2 ? \"l2\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L3 ? \"l3\" : \"-\",\n+\t\t\tlayers & MLX5_FLOW_LAYER_OUTER_L4 ? \"l4\" : \"-\");\n+\t\tif (verbs->attr) {\n+\t\t\tstruct ibv_spec_header *hdr =\n+\t\t\t\t(struct ibv_spec_header *)verbs->specs;\n+\t\t\tconst int n = verbs->attr->num_of_specs;\n+\t\t\tint i;\n+\n+\t\t\tfprintf(stdout, \" Verbs attributes: specs_n %u\\n\",\n+\t\t\t\tverbs->attr->num_of_specs);\n+\t\t\tfor (i = 0; i != n; ++i) {\n+\t\t\t\trte_hexdump(stdout, \" \", hdr, hdr->size);\n+\t\t\t\thdr = (struct ibv_spec_header *)\n+\t\t\t\t\t((uint8_t *)hdr + hdr->size);\n+\t\t\t}\n \t\t}\n \t}\n \tfprintf(stdout, \"--------->8------------\\n\");\n@@ -239,18 +447,20 @@ mlx5_flow_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t     struct rte_flow *flow, struct rte_flow_error *error)\n {\n \tuint32_t priority_max =\n-\t\t((struct priv *)dev->data->dev_private)->config.flow_prio;\n+\t\t((struct priv *)dev->data->dev_private)->config.flow_prio - 1;\n \n \tif (attr->group)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"groups are not supported\");\n-\tif (attr->priority >= priority_max)\n+\tif (attr->priority != MLX5_FLOW_PRIO_RSVD &&\n+\t    attr->priority >= priority_max)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n \t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"priority value is not supported\");\n+\t\t\t\t\t  \"requested priority value is not\"\n+\t\t\t\t\t  \" supported\");\n \tif (attr->egress)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n@@ -267,6 +477,8 @@ mlx5_flow_attributes(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"only ingress is supported\");\n \tflow->attributes = *attr;\n+\tif (attr->priority == MLX5_FLOW_PRIO_RSVD)\n+\t\tflow->attributes.priority = priority_max;\n \treturn 0;\n }\n \n@@ -346,14 +558,51 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,\n static void\n mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)\n {\n-\tif (flow->verbs.specs) {\n+\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n+\n+\tif (verbs->specs) {\n \t\tvoid *dst;\n \n-\t\tdst = (void *)(flow->verbs.specs + flow->verbs.size);\n+\t\tdst = (void *)(verbs->specs + verbs->size);\n \t\tmemcpy(dst, src, size);\n-\t\t++flow->verbs.attr->num_of_specs;\n+\t\t++verbs->attr->num_of_specs;\n \t}\n-\tflow->verbs.size += size;\n+\tverbs->size += size;\n+}\n+\n+/**\n+ * Update layer bit-field.\n+ *\n+ * @param flow[in, out]\n+ *   Pointer to flow structure.\n+ * @param layers\n+ *   Bit-fields of layers to add see MLX5_FLOW_ITEMS_*.\n+ */\n+static void\n+mlx5_flow_layers_update(struct rte_flow *flow, uint32_t layers)\n+{\n+\tif (flow->expand) {\n+\t\tif (flow->cur_verbs)\n+\t\t\tflow->cur_verbs->layers |= layers;\n+\t} else {\n+\t\tflow->layers |= layers;\n+\t}\n+}\n+\n+/**\n+ * Get layers bit-field.\n+ *\n+ * @param flow[in, out]\n+ *   Pointer to flow structure.\n+ */\n+static uint32_t\n+mlx5_flow_layers(struct rte_flow *flow)\n+{\n+\tuint32_t layers = flow->layers;\n+\n+\tif (flow->expand && flow->cur_verbs)\n+\t\tlayers |= flow->cur_verbs->layers;\n+\treturn layers;\n }\n \n /**\n@@ -388,22 +637,26 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.type = IBV_FLOW_SPEC_ETH,\n \t\t.size = size,\n \t};\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \tint ret;\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L2 layers already configured\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_eth_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&nic_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_eth),\n-\t\t\t\t      error);\n-\tif (ret)\n-\t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L2;\n+\tif (!flow->expand) {\n+\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L2)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L2 layers already\"\n+\t\t\t\t\t\t  \" configured\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_eth_mask;\n+\t\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n+\t\t\t\t\t      (const uint8_t *)&nic_mask,\n+\t\t\t\t\t      sizeof(struct rte_flow_item_eth),\n+\t\t\t\t\t      error);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -482,6 +735,7 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.tci = RTE_BE16(0x0fff),\n \t};\n \tunsigned int size = sizeof(struct ibv_flow_spec_eth);\n+\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n \tstruct ibv_flow_spec_eth eth = {\n \t\t.type = IBV_FLOW_SPEC_ETH,\n \t\t.size = size,\n@@ -491,24 +745,30 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\tMLX5_FLOW_LAYER_OUTER_L4;\n \tconst uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;\n \tconst uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n-\tif (flow->layers & vlanm)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L2 layers already configured\");\n-\telse if ((flow->layers & lm) != 0)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L2 layer cannot follow L3/L4 layer\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_vlan_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&nic_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_vlan), error);\n-\tif (ret)\n-\t\treturn ret;\n+\tif (!flow->expand) {\n+\t\tif (layers & vlanm)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L2 layers already\"\n+\t\t\t\t\t\t  \" configured\");\n+\t\telse if ((layers & lm) != 0)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L2 layer cannot follow\"\n+\t\t\t\t\t\t  \" L3/L4 layer\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_vlan_mask;\n+\t\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n+\t\t\t\t\t      (const uint8_t *)&nic_mask,\n+\t\t\t\t\t      sizeof(struct rte_flow_item_vlan),\n+\t\t\t\t\t      error);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n \tif (spec) {\n \t\teth.val.vlan_tag = spec->tci;\n \t\teth.mask.vlan_tag = mask->tci;\n@@ -517,32 +777,34 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\teth.mask.ether_type = mask->inner_type;\n \t\teth.val.ether_type &= eth.mask.ether_type;\n \t}\n-\t/*\n-\t * From verbs perspective an empty VLAN is equivalent\n-\t * to a packet without VLAN layer.\n-\t */\n-\tif (!eth.mask.vlan_tag)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n-\t\t\t\t\t  item->spec,\n-\t\t\t\t\t  \"VLAN cannot be empty\");\n-\t/* Outer TPID cannot be matched. */\n-\tif (eth.mask.ether_type)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n-\t\t\t\t\t  item->spec,\n-\t\t\t\t\t  \"VLAN TPID matching is not\"\n-\t\t\t\t\t  \" supported\");\n-\tif (!(flow->layers & l2m)) {\n+\tif (!flow->expand) {\n+\t\t/*\n+\t\t * From verbs perspective an empty VLAN is equivalent\n+\t\t * to a packet without VLAN layer.\n+\t\t */\n+\t\tif (!eth.mask.vlan_tag)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n+\t\t\t\t\t\t  item->spec,\n+\t\t\t\t\t\t  \"VLAN cannot be empty\");\n+\t\t/* Outer TPID cannot be matched. */\n+\t\tif (eth.mask.ether_type)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n+\t\t\t\t\t\t  item->spec,\n+\t\t\t\t\t\t  \"VLAN TPID matching is not\"\n+\t\t\t\t\t\t  \" supported\");\n+\t}\n+\tif (!(layers & l2m)) {\n \t\tif (size <= flow_size)\n \t\t\tmlx5_flow_spec_verbs_add(flow, &eth, size);\n \t} else {\n-\t\tif (flow->verbs.attr)\n-\t\t\tmlx5_flow_item_vlan_update(flow->verbs.attr, &eth);\n+\t\tif (verbs->attr)\n+\t\t\tmlx5_flow_item_vlan_update(verbs->attr, &eth);\n \t\tsize = 0; /**< Only an update is done in eth specification. */\n \t}\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |\n-\t\tMLX5_FLOW_LAYER_OUTER_VLAN;\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2 |\n+\t\t\t\tMLX5_FLOW_LAYER_OUTER_VLAN);\n \treturn size;\n }\n \n@@ -582,25 +844,31 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.size = size,\n \t};\n \tint ret;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_ipv4_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&nic_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_ipv4), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\tif (!flow->expand) {\n+\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"multiple L3 layers not\"\n+\t\t\t\t\t\t  \" supported\");\n+\t\telse if (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L3 cannot follow an L4\"\n+\t\t\t\t\t\t  \" layer\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_ipv4_mask;\n+\t\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n+\t\t\t\t\t      (const uint8_t *)&nic_mask,\n+\t\t\t\t\t      sizeof(struct rte_flow_item_ipv4),\n+\t\t\t\t\t      error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV4);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -667,25 +935,31 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.size = size,\n \t};\n \tint ret;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_ipv6_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&nic_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_ipv6), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\tif (!flow->expand) {\n+\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L3)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"multiple L3 layers not\"\n+\t\t\t\t\t\t  \" supported\");\n+\t\telse if (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L3 cannot follow an L4\"\n+\t\t\t\t\t\t  \" layer\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_ipv6_mask;\n+\t\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n+\t\t\t\t\t      (const uint8_t *)&nic_mask,\n+\t\t\t\t\t      sizeof(struct rte_flow_item_ipv6),\n+\t\t\t\t\t      error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV6);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -759,25 +1033,31 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.size = size,\n \t};\n \tint ret;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L4 layer is already present\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_udp_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&rte_flow_item_udp_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_udp), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\tif (!flow->expand) {\n+\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L3 is mandatory to filter\"\n+\t\t\t\t\t\t  \" on L4\");\n+\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L4 layer is already\"\n+\t\t\t\t\t\t  \" present\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_udp_mask;\n+\t\tret = mlx5_flow_item_validate\n+\t\t\t(item, (const uint8_t *)mask,\n+\t\t\t (const uint8_t *)&rte_flow_item_udp_mask,\n+\t\t\t sizeof(struct rte_flow_item_udp), error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_UDP);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -821,25 +1101,31 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.size = size,\n \t};\n \tint ret;\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n \n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L4 layer is already present\");\n-\tif (!mask)\n-\t\tmask = &rte_flow_item_tcp_mask;\n-\tret = mlx5_flow_item_validate(item, (const uint8_t *)mask,\n-\t\t\t\t      (const uint8_t *)&rte_flow_item_tcp_mask,\n-\t\t\t\t      sizeof(struct rte_flow_item_tcp), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\tif (!flow->expand) {\n+\t\tif (!(layers & MLX5_FLOW_LAYER_OUTER_L3))\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L3 is mandatory to filter\"\n+\t\t\t\t\t\t  \" on L4\");\n+\t\tif (layers & MLX5_FLOW_LAYER_OUTER_L4)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"L4 layer is already\"\n+\t\t\t\t\t\t  \" present\");\n+\t\tif (!mask)\n+\t\t\tmask = &rte_flow_item_tcp_mask;\n+\t\tret = mlx5_flow_item_validate\n+\t\t\t(item, (const uint8_t *)mask,\n+\t\t\t (const uint8_t *)&rte_flow_item_tcp_mask,\n+\t\t\t sizeof(struct rte_flow_item_tcp), error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t}\n+\tmlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_TCP);\n \tif (size > flow_size)\n \t\treturn size;\n \tif (spec) {\n@@ -954,18 +1240,20 @@ mlx5_flow_action_drop(const struct rte_flow_action *actions,\n \t\t\t.size = size,\n \t};\n \n-\tif (flow->fate)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"multiple fate actions are not\"\n-\t\t\t\t\t  \" supported\");\n-\tif (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"drop is not compatible with\"\n-\t\t\t\t\t  \" flag/mark action\");\n+\tif (!flow->expand) {\n+\t\tif (flow->fate)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"multiple fate actions are\"\n+\t\t\t\t\t\t  \" not supported\");\n+\t\tif (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"drop is not compatible with\"\n+\t\t\t\t\t\t  \" flag/mark action\");\n+\t}\n \tif (size < flow_size)\n \t\tmlx5_flow_spec_verbs_add(flow, &drop, size);\n \tflow->fate |= MLX5_FLOW_FATE_DROP;\n@@ -998,6 +1286,8 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,\n \tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_queue *queue = actions->conf;\n \n+\tif (flow->expand)\n+\t\treturn 0;\n \tif (flow->fate)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -1014,11 +1304,162 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n \t\t\t\t\t  &queue->index,\n \t\t\t\t\t  \"queue is not configured\");\n-\tflow->queue = queue->index;\n+\tif (flow->queue)\n+\t\t(*flow->queue)[0] = queue->index;\n+\tflow->rss.queue_num = 1;\n \tflow->fate |= MLX5_FLOW_FATE_QUEUE;\n \treturn 0;\n }\n \n+/**\n+ * Store the Verbs hash fields and priority according to the layer and types.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param flow\n+ *   Pointer to flow structure.\n+ * @param types\n+ *   RSS types for this flow (see ETH_RSS_*).\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_action_rss_verbs_attr(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t\t\t\tuint32_t types)\n+{\n+\tconst uint32_t layers = mlx5_flow_layers(flow);\n+\tuint64_t hash_fields;\n+\tuint32_t priority;\n+\n+\tif ((types & ETH_RSS_NONFRAG_IPV4_TCP) &&\n+\t    (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV4 |\n+\t\t\tIBV_RX_HASH_DST_IPV4 |\n+\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n+\t\tpriority = 0;\n+\t} else if ((types & ETH_RSS_NONFRAG_IPV4_UDP) &&\n+\t\t (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV4 |\n+\t\t\tIBV_RX_HASH_DST_IPV4 |\n+\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n+\t\tpriority = 0;\n+\t} else if ((types & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) &&\n+\t\t (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV4 |\n+\t\t\tIBV_RX_HASH_DST_IPV4;\n+\t\tpriority = 1;\n+\t} else if ((types & ETH_RSS_NONFRAG_IPV6_TCP) &&\n+\t\t (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV6 |\n+\t\t\tIBV_RX_HASH_DST_IPV6 |\n+\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n+\t\tpriority = 0;\n+\t} else if ((types & ETH_RSS_NONFRAG_IPV6_UDP) &&\n+\t\t (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV6 |\n+\t\t\tIBV_RX_HASH_DST_IPV6 |\n+\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n+\t\tpriority = 0;\n+\t} else if ((types & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) &&\n+\t\t (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) {\n+\t\thash_fields = IBV_RX_HASH_SRC_IPV6 |\n+\t\t\tIBV_RX_HASH_DST_IPV6;\n+\t\tpriority = 1;\n+\t} else {\n+\t\thash_fields = 0;\n+\t\tpriority = 2;\n+\t}\n+\tflow->cur_verbs->hash_fields = hash_fields;\n+\tflow->cur_verbs->attr->priority =\n+\t\tmlx5_flow_priority(dev, flow->attributes.priority, priority);\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate action queue provided by the user.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param actions\n+ *   Pointer to flow actions array.\n+ * @param flow\n+ *   Pointer to the rte_flow structure.\n+ * @param error\n+ *   Pointer to error structure.\n+ */\n+static int\n+mlx5_flow_action_rss(struct rte_eth_dev *dev,\n+\t\t     const struct rte_flow_action *actions,\n+\t\t     struct rte_flow *flow,\n+\t\t     struct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_rss *rss = actions->conf;\n+\tunsigned int i;\n+\n+\tif (flow->expand)\n+\t\treturn 0;\n+\tif (flow->fate)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  actions,\n+\t\t\t\t\t  \"multiple fate actions are not\"\n+\t\t\t\t\t  \" supported\");\n+\tif (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&\n+\t    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->func,\n+\t\t\t\t\t  \"RSS hash function not supported\");\n+\tif (rss->level > 1)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->level,\n+\t\t\t\t\t  \"tunnel RSS is not supported\");\n+\tif (rss->key_len < rss_hash_default_key_len)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->key_len,\n+\t\t\t\t\t  \"RSS hash key too small\");\n+\tif (rss->key_len > rss_hash_default_key_len)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->key_len,\n+\t\t\t\t\t  \"RSS hash key too large\");\n+\tif (rss->queue_num > priv->config.ind_table_max_size)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->queue_num,\n+\t\t\t\t\t  \"number of queues too large\");\n+\tif (rss->types & MLX5_RSS_HF_MASK)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->types,\n+\t\t\t\t\t  \"some RSS protocols are not\"\n+\t\t\t\t\t  \" supported\");\n+\tfor (i = 0; i != rss->queue_num; ++i) {\n+\t\tif (!(*priv->rxqs)[rss->queue[i]])\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t &rss->queue[i],\n+\t\t\t\t \"queue is not configured\");\n+\t}\n+\tif (flow->queue)\n+\t\tmemcpy((*flow->queue), rss->queue,\n+\t\t       rss->queue_num * sizeof(uint16_t));\n+\tflow->rss.queue_num = rss->queue_num;\n+\tmemcpy(flow->key, rss->key, rss_hash_default_key_len);\n+\tflow->rss.types = rss->types;\n+\tflow->fate |= MLX5_FLOW_FATE_RSS;\n+\treturn 0;\n+}\n+\n /**\n  * Validate action flag provided by the user.\n  *\n@@ -1046,43 +1487,59 @@ mlx5_flow_action_flag(const struct rte_flow_action *actions,\n \t\t.size = size,\n \t\t.tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),\n \t};\n+\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n \n-\tif (flow->modifier & MLX5_FLOW_MOD_FLAG)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"flag action already present\");\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"flag is not compatible with drop\"\n-\t\t\t\t\t  \" action\");\n-\tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n-\t\treturn 0;\n+\tif (!flow->expand) {\n+\t\tif (flow->modifier & MLX5_FLOW_MOD_FLAG)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"flag action already present\");\n+\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"flag is not compatible with\"\n+\t\t\t\t\t\t  \" drop action\");\n+\t}\n+\t/*\n+\t * The two only possible cases, a mark has already been added in the\n+\t * specification, in such case, the flag is already present in\n+\t * addition of the mark.\n+\t * Second case, has it is not possible to have two flags, it just\n+\t * needs to add it.\n+\t */\n+\tif (verbs) {\n+\t\tverbs->modifier |= MLX5_FLOW_MOD_FLAG;\n+\t\tif (verbs->modifier & MLX5_FLOW_MOD_MARK)\n+\t\t\tsize = 0;\n+\t\telse if (size <= flow_size)\n+\t\t\tmlx5_flow_spec_verbs_add(flow, &tag, size);\n+\t} else {\n+\t\tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n+\t\t\tsize = 0;\n+\t}\n \tflow->modifier |= MLX5_FLOW_MOD_FLAG;\n-\tif (size <= flow_size)\n-\t\tmlx5_flow_spec_verbs_add(flow, &tag, size);\n \treturn size;\n }\n \n /**\n  * Update verbs specification to modify the flag to mark.\n  *\n- * @param flow\n- *   Pointer to the rte_flow structure.\n+ * @param verbs\n+ *   Pointer to the mlx5_flow_verbs structure.\n  * @param mark_id\n  *   Mark identifier to replace the flag.\n  */\n static void\n-mlx5_flow_verbs_mark_update(struct rte_flow *flow, uint32_t mark_id)\n+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)\n {\n \tstruct ibv_spec_header *hdr;\n \tint i;\n \n \t/* Update Verbs specification. */\n-\thdr = (struct ibv_spec_header *)flow->verbs.specs;\n-\tfor (i = 0; i != flow->verbs.attr->num_of_specs; ++i) {\n+\thdr = (struct ibv_spec_header *)verbs->specs;\n+\tfor (i = 0; i != verbs->attr->num_of_specs; ++i) {\n \t\tif (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {\n \t\t\tstruct ibv_flow_spec_action_tag *t =\n \t\t\t\t(struct ibv_flow_spec_action_tag *)hdr;\n@@ -1120,38 +1577,52 @@ mlx5_flow_action_mark(const struct rte_flow_action *actions,\n \t\t.type = IBV_FLOW_SPEC_ACTION_TAG,\n \t\t.size = size,\n \t};\n+\tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n \n-\tif (!mark)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"configuration cannot be null\");\n-\tif (mark->id >= MLX5_FLOW_MARK_MAX)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &mark->id,\n-\t\t\t\t\t  \"mark must be between 0 and\"\n-\t\t\t\t\t  \" 16777199\");\n-\tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"mark action already present\");\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  actions,\n-\t\t\t\t\t  \"mark is not compatible with drop\"\n-\t\t\t\t\t  \" action\");\n-\tif (flow->modifier & MLX5_FLOW_MOD_FLAG) {\n-\t\tmlx5_flow_verbs_mark_update(flow, mark->id);\n-\t\tsize = 0; /**< Only an update is done in the specification. */\n-\t} else {\n-\t\ttag.tag_id = mlx5_flow_mark_set(mark->id);\n-\t\tif (size <= flow_size) {\n+\tif (!flow->expand) {\n+\t\tif (!mark)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"configuration cannot be\"\n+\t\t\t\t\t\t  \" null\");\n+\t\tif (mark->id >= MLX5_FLOW_MARK_MAX)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t &mark->id,\n+\t\t\t\t \"mark must be between 0 and 16777199\");\n+\t\tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"mark action already\"\n+\t\t\t\t\t\t  \" present\");\n+\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"mark is not compatible with\"\n+\t\t\t\t\t\t  \" drop action\");\n+\t}\n+\t/*\n+\t * The two only possible cases, a flag has already been added in the\n+\t * specification, in such case, it needs to be update to add the id.\n+\t * Second case, has it is not possible to have two mark, it just\n+\t * needs to add it.\n+\t */\n+\tif (verbs) {\n+\t\tverbs->modifier |= MLX5_FLOW_MOD_MARK;\n+\t\tif (verbs->modifier & MLX5_FLOW_MOD_FLAG) {\n+\t\t\tmlx5_flow_verbs_mark_update(verbs, mark->id);\n+\t\t\tsize = 0;\n+\t\t} else if (size <= flow_size) {\n \t\t\ttag.tag_id = mlx5_flow_mark_set(mark->id);\n \t\t\tmlx5_flow_spec_verbs_add(flow, &tag, size);\n \t\t}\n+\t} else {\n+\t\tif (flow->modifier & MLX5_FLOW_MOD_FLAG)\n+\t\t\tsize = 0;\n \t}\n \tflow->modifier |= MLX5_FLOW_MOD_MARK;\n \treturn size;\n@@ -1185,6 +1656,15 @@ mlx5_flow_actions(struct rte_eth_dev *dev,\n \tint remain = flow_size;\n \tint ret = 0;\n \n+\t/*\n+\t * FLAG/MARK are the only actions having a specification in Verbs and\n+\t * not making part of the packet fate.  Due to this specificity and to\n+\t * avoid extra variable, their bit in the flow->modifier bit-field are\n+\t * disabled here to compute the exact necessary memory those action\n+\t * needs.\n+\t */\n+\tflow->modifier &= ~(MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK);\n+\t/* Process the actions. */\n \tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n \t\tswitch (actions->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n@@ -1204,6 +1684,9 @@ mlx5_flow_actions(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n \t\t\tret = mlx5_flow_action_queue(dev, actions, flow, error);\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\tret = mlx5_flow_action_rss(dev, actions, flow, error);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -1257,27 +1740,92 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\tstruct rte_flow_error *error)\n {\n \tstruct rte_flow local_flow = { .layers = 0, };\n-\tsize_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);\n+\tsize_t size = sizeof(*flow);\n \tint remain = (flow_size > size) ? flow_size - size : 0;\n+\tstruct rte_flow_expand_rss *buf;\n \tint ret;\n+\tuint32_t i;\n \n \tif (!remain)\n \t\tflow = &local_flow;\n \tret = mlx5_flow_attributes(dev, attr, flow, error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tret = mlx5_flow_items(items, flow, remain, error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\tsize += ret;\n-\tremain = (flow_size > size) ? flow_size - size : 0;\n-\tret = mlx5_flow_actions(dev, actions, flow, remain, error);\n+\tret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);\n \tif (ret < 0)\n \t\treturn ret;\n-\tsize += ret;\n+\tret = rte_flow_expand_rss(NULL, 0, items, local_flow.rss.types,\n+\t\t\t\t  mlx5_support_expansion,\n+\t\t\t\t  local_flow.rss.level < 2 ?\n+\t\t\t\t  MLX5_EXPANSION_ROOT : MLX5_EXPANSION_ROOT2);\n+\tassert(ret > 0);\n+\tbuf = rte_calloc(__func__, 1, ret, 0);\n+\tif (!buf) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"not enough memory to expand the RSS flow\");\n+\t\tgoto error;\n+\t}\n+\tret = rte_flow_expand_rss(buf, ret, items, local_flow.rss.types,\n+\t\t\t\t  mlx5_support_expansion,\n+\t\t\t\t  local_flow.rss.level < 2 ?\n+\t\t\t\t  MLX5_EXPANSION_ROOT : MLX5_EXPANSION_ROOT2);\n+\tassert(ret > 0);\n+\tsize += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),\n+\t\t\t       sizeof(void *));\n \tif (size <= flow_size)\n-\t\tflow->verbs.attr->priority = flow->attributes.priority;\n+\t\tflow->queue = (void *)(flow + 1);\n+\tLIST_INIT(&flow->verbs);\n+\tflow->layers = 0;\n+\tflow->modifier = 0;\n+\tflow->fate = 0;\n+\tfor (i = 0; i != buf->entries; ++i) {\n+\t\tsize_t off = size;\n+\n+\t\tsize += sizeof(struct ibv_flow_attr) +\n+\t\t\tsizeof(struct mlx5_flow_verbs);\n+\t\tremain = (flow_size > size) ? flow_size - size : 0;\n+\t\tif (remain) {\n+\t\t\tflow->cur_verbs = (void *)((uintptr_t)flow + off);\n+\t\t\tflow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);\n+\t\t\tflow->cur_verbs->specs =\n+\t\t\t\t(void *)(flow->cur_verbs->attr + 1);\n+\t\t}\n+\t\tret = mlx5_flow_items\n+\t\t\t((const struct rte_flow_item *)buf->patterns[i],\n+\t\t\t flow, remain, error);\n+\t\tif (ret < 0)\n+\t\t\tgoto error;\n+\t\tsize += ret;\n+\t\tif (remain > ret)\n+\t\t\tremain -= ret;\n+\t\telse\n+\t\t\tremain = 0;\n+\t\tret = mlx5_flow_actions(dev, actions, flow, remain, error);\n+\t\tif (ret < 0)\n+\t\t\tgoto error;\n+\t\tsize += ret;\n+\t\tif (remain > ret)\n+\t\t\tremain -= ret;\n+\t\telse\n+\t\t\tremain = 0;\n+\t\tif (size <= flow_size) {\n+\t\t\tflow->cur_verbs->attr->priority =\n+\t\t\t\tflow->attributes.priority;\n+\t\t\tret = mlx5_flow_action_rss_verbs_attr(dev, flow,\n+\t\t\t\t\t\t\t      flow->rss.types);\n+\t\t\tif (ret < 0)\n+\t\t\t\tgoto error;\n+\t\t\tLIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);\n+\t\t}\n+\t\tflow->expand = !!(buf->entries > 1);\n+\t}\n+\trte_free(buf);\n \treturn size;\n+error:\n+\trte_free(buf);\n+\treturn ret;\n }\n \n /**\n@@ -1292,9 +1840,13 @@ static void\n mlx5_flow_rxq_mark(struct rte_eth_dev *dev, struct rte_flow *flow)\n {\n \tstruct priv *priv = dev->data->dev_private;\n+\tconst uint32_t mask = MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK;\n+\tuint32_t i;\n \n-\t(*priv->rxqs)[flow->queue]->mark |=\n-\t\tflow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK);\n+\tif (!(flow->modifier & mask))\n+\t\treturn;\n+\tfor (i = 0; i != flow->rss.queue_num; ++i)\n+\t\t(*priv->rxqs)[(*flow->queue)[i]]->mark = 1;\n }\n \n /**\n@@ -1328,18 +1880,20 @@ mlx5_flow_validate(struct rte_eth_dev *dev,\n static void\n mlx5_flow_fate_remove(struct rte_eth_dev *dev, struct rte_flow *flow)\n {\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n-\t\tif (flow->verbs.flow) {\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));\n-\t\t\tflow->verbs.flow = NULL;\n+\tstruct mlx5_flow_verbs *verbs;\n+\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tif (verbs->flow) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_flow(verbs->flow));\n+\t\t\tverbs->flow = NULL;\n+\t\t}\n+\t\tif (verbs->hrxq) {\n+\t\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\t\t\t\tmlx5_hrxq_drop_release(dev, verbs->hrxq);\n+\t\t\telse\n+\t\t\t\tmlx5_hrxq_release(dev, verbs->hrxq);\n+\t\t\tverbs->hrxq = NULL;\n \t\t}\n-\t}\n-\tif (flow->verbs.hrxq) {\n-\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\t\tmlx5_hrxq_drop_release(dev, flow->verbs.hrxq);\n-\t\telse if (flow->fate & MLX5_FLOW_FATE_QUEUE)\n-\t\t\tmlx5_hrxq_release(dev, flow->verbs.hrxq);\n-\t\tflow->verbs.hrxq = NULL;\n \t}\n }\n \n@@ -1360,46 +1914,68 @@ static int\n mlx5_flow_fate_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t     struct rte_flow_error *error)\n {\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n-\t\tflow->verbs.hrxq = mlx5_hrxq_drop_new(dev);\n-\t\tif (!flow->verbs.hrxq)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, errno,\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t NULL,\n-\t\t\t\t \"cannot allocate Drop queue\");\n-\t} else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {\n-\t\tstruct mlx5_hrxq *hrxq;\n-\n-\t\thrxq = mlx5_hrxq_get(dev, rss_hash_default_key,\n-\t\t\t\t     rss_hash_default_key_len, 0,\n-\t\t\t\t     &flow->queue, 1, 0, 0);\n-\t\tif (!hrxq)\n-\t\t\thrxq = mlx5_hrxq_new(dev, rss_hash_default_key,\n-\t\t\t\t\t     rss_hash_default_key_len, 0,\n-\t\t\t\t\t     &flow->queue, 1, 0, 0);\n-\t\tif (!hrxq)\n-\t\t\treturn rte_flow_error_set(error, rte_errno,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\tNULL,\n-\t\t\t\t\t\"cannot create flow\");\n-\t\tflow->verbs.hrxq = hrxq;\n-\t}\n-\tflow->verbs.flow =\n-\t\tmlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);\n-\tif (!flow->verbs.flow) {\n-\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\t\tmlx5_hrxq_drop_release(dev, flow->verbs.hrxq);\n-\t\telse\n-\t\t\tmlx5_hrxq_release(dev, flow->verbs.hrxq);\n-\t\tflow->verbs.hrxq = NULL;\n-\t\treturn rte_flow_error_set(error, errno,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"kernel module refuses to create\"\n-\t\t\t\t\t  \" flow\");\n+\tstruct mlx5_flow_verbs *verbs;\n+\tint err;\n+\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n+\t\t\tverbs->hrxq = mlx5_hrxq_drop_new(dev);\n+\t\t\tif (!verbs->hrxq) {\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error, errno,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t NULL,\n+\t\t\t\t\t \"cannot get drop hash queue\");\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tstruct mlx5_hrxq *hrxq;\n+\n+\t\t\thrxq = mlx5_hrxq_get(dev, flow->key,\n+\t\t\t\t\t     rss_hash_default_key_len,\n+\t\t\t\t\t     verbs->hash_fields,\n+\t\t\t\t\t     (*flow->queue),\n+\t\t\t\t\t     flow->rss.queue_num, 0, 0);\n+\t\t\tif (!hrxq)\n+\t\t\t\thrxq = mlx5_hrxq_new(dev, flow->key,\n+\t\t\t\t\t\t     rss_hash_default_key_len,\n+\t\t\t\t\t\t     verbs->hash_fields,\n+\t\t\t\t\t\t     (*flow->queue),\n+\t\t\t\t\t\t     flow->rss.queue_num, 0, 0);\n+\t\t\tif (!hrxq) {\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error, rte_errno,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t NULL,\n+\t\t\t\t\t \"cannot get hash queue\");\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t\tverbs->hrxq = hrxq;\n+\t\t}\n+\t\tverbs->flow =\n+\t\t\tmlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);\n+\t\tif (!verbs->flow) {\n+\t\t\trte_flow_error_set(error, errno,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"hardware refuses to create flow\");\n+\t\t\tgoto error;\n+\t\t}\n \t}\n \treturn 0;\n+error:\n+\terr = rte_errno; /* Save rte_errno before cleanup. */\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tif (verbs->hrxq) {\n+\t\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\t\t\t\tmlx5_hrxq_drop_release(dev, verbs->hrxq);\n+\t\t\telse\n+\t\t\t\tmlx5_hrxq_release(dev, verbs->hrxq);\n+\t\t\tverbs->hrxq = NULL;\n+\t\t}\n+\t}\n+\trte_errno = err; /* Restore rte_errno. */\n+\treturn -rte_errno;\n }\n \n /**\n@@ -1429,42 +2005,43 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,\n \t\t      const struct rte_flow_action actions[],\n \t\t      struct rte_flow_error *error)\n {\n-\tstruct rte_flow *flow;\n-\tsize_t size;\n+\tstruct rte_flow *flow = NULL;\n+\tsize_t size = 0;\n \tint ret;\n \n-\tret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);\n+\tret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);\n \tif (ret < 0)\n \t\treturn NULL;\n \tsize = ret;\n-\tflow = rte_zmalloc(__func__, size, 0);\n+\tflow = rte_calloc(__func__, 1, size, 0);\n \tif (!flow) {\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t   NULL,\n-\t\t\t\t   \"cannot allocate memory\");\n+\t\t\t\t   \"not enough memory to create flow\");\n \t\treturn NULL;\n \t}\n-\tflow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);\n-\tflow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);\n \tret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);\n-\tif (ret < 0)\n-\t\tgoto error;\n+\tif (ret < 0) {\n+\t\trte_free(flow);\n+\t\treturn NULL;\n+\t}\n \tassert((size_t)ret == size);\n \tif (dev->data->dev_started) {\n \t\tret = mlx5_flow_fate_apply(dev, flow, error);\n-\t\tif (ret < 0)\n-\t\t\tgoto error;\n+\t\tif (ret < 0) {\n+\t\t\tret = rte_errno; /* Save rte_errno before cleanup. */\n+\t\t\tif (flow) {\n+\t\t\t\tmlx5_flow_fate_remove(dev, flow);\n+\t\t\t\trte_free(flow);\n+\t\t\t}\n+\t\t\trte_errno = ret; /* Restore rte_errno. */\n+\t\t\treturn NULL;\n+\t\t}\n \t}\n \tmlx5_flow_rxq_mark(dev, flow);\n \tTAILQ_INSERT_TAIL(list, flow, next);\n \treturn flow;\n-error:\n-\tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tmlx5_flow_fate_remove(dev, flow);\n-\trte_free(flow);\n-\trte_errno = ret; /* Restore rte_errno. */\n-\treturn NULL;\n }\n \n /**\n@@ -1502,7 +2079,7 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,\n \tstruct priv *priv = dev->data->dev_private;\n \tstruct rte_flow *rflow;\n \tconst uint32_t mask = MLX5_FLOW_MOD_FLAG & MLX5_FLOW_MOD_MARK;\n-\tint mark = 0;\n+\tunsigned int i;\n \n \tmlx5_flow_fate_remove(dev, flow);\n \tTAILQ_REMOVE(list, flow, next);\n@@ -1512,18 +2089,28 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,\n \t}\n \t/*\n \t * When a flow is removed and this flow has a flag/mark modifier, all\n-\t * flows needs to be parse to verify if the Rx queue use by the flow\n+\t * flows needs to be parse to verify if the Rx queues use by the flow\n \t * still need to track the flag/mark request.\n \t */\n-\tTAILQ_FOREACH(rflow, &priv->flows, next) {\n-\t\tif (!(rflow->modifier & mask))\n-\t\t\tcontinue;\n-\t\tif (flow->queue == rflow->queue) {\n-\t\t\tmark = 1;\n-\t\t\tbreak;\n+\tfor (i = 0; i != flow->rss.queue_num; ++i) {\n+\t\tint mark = 0;\n+\n+\t\tTAILQ_FOREACH(rflow, &priv->flows, next) {\n+\t\t\tunsigned int j;\n+\n+\t\t\tif (!(rflow->modifier & mask))\n+\t\t\t\tcontinue;\n+\t\t\tfor (j = 0; j != rflow->rss.queue_num; ++j) {\n+\t\t\t\tif ((*flow->queue)[i] == (*rflow->queue)[j]) {\n+\t\t\t\t\tmark = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tif (mark)\n+\t\t\t\tbreak;\n \t\t}\n+\t\t(*priv->rxqs)[i]->mark = !!mark;\n \t}\n-\t(*priv->rxqs)[flow->queue]->mark = !!mark;\n \trte_free(flow);\n }\n \n@@ -1654,7 +2241,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\n \tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_attr attr = {\n \t\t.ingress = 1,\n-\t\t.priority = priv->config.flow_prio - 1,\n+\t\t.priority = MLX5_FLOW_PRIO_RSVD,\n \t};\n \tstruct rte_flow_item items[] = {\n \t\t{\n",
    "prefixes": [
        "v2",
        "13/20"
    ]
}