get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45249/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45249,
    "url": "http://patches.dpdk.org/api/patches/45249/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180924231721.15799-2-yskoh@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180924231721.15799-2-yskoh@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180924231721.15799-2-yskoh@mellanox.com",
    "date": "2018-09-24T23:17:35",
    "name": "[v3,01/11] net/mlx5: split flow validation to dedicated function",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8f9bca05d44d7339bb46c912e84979b95aad6ed6",
    "submitter": {
        "id": 636,
        "url": "http://patches.dpdk.org/api/people/636/?format=api",
        "name": "Yongseok Koh",
        "email": "yskoh@mellanox.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180924231721.15799-2-yskoh@mellanox.com/mbox/",
    "series": [
        {
            "id": 1476,
            "url": "http://patches.dpdk.org/api/series/1476/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1476",
            "date": "2018-09-24T23:17:33",
            "name": "net/mlx5: add Direct Verbs flow driver support",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/1476/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/45249/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/45249/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A78941B105;\n\tTue, 25 Sep 2018 01:17:40 +0200 (CEST)",
            "from EUR02-HE1-obe.outbound.protection.outlook.com\n\t(mail-eopbgr10045.outbound.protection.outlook.com [40.107.1.45])\n\tby dpdk.org (Postfix) with ESMTP id 3A4141B0FE\n\tfor <dev@dpdk.org>; Tue, 25 Sep 2018 01:17:38 +0200 (CEST)",
            "from DB3PR0502MB3980.eurprd05.prod.outlook.com (52.134.72.27) by\n\tDB3PR0502MB3996.eurprd05.prod.outlook.com (52.134.65.142) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.1164.25; Mon, 24 Sep 2018 23:17:35 +0000",
            "from DB3PR0502MB3980.eurprd05.prod.outlook.com\n\t([fe80::1cb0:661b:ecab:6045]) by\n\tDB3PR0502MB3980.eurprd05.prod.outlook.com\n\t([fe80::1cb0:661b:ecab:6045%2]) with mapi id 15.20.1164.017;\n\tMon, 24 Sep 2018 23:17:35 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Mellanox.com;\n\ts=selector1;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=6xou6evMeLU/ENphQLCvsrTv0I1d6vR/zpjx7SdcK5A=;\n\tb=hbIDlZ4vYztgi69s9tR8QoFDJSQsV5pLCSGvmIhca3Y5ulCqvAkOjAW+AIzB8mwFDmYLpF5tuAjyUdWHD0ZuaXvaBNWTW7HgaljttAKS7rNYqYP00anrL6xwL/y+LYuP042ls/u8g1jbBdLVR3HHUFwVzuN/uO6QkU5XZ92La3I=",
        "From": "Yongseok Koh <yskoh@mellanox.com>",
        "To": "Thomas Monjalon <thomas@monjalon.net>,\n\tShahaf Shuler <shahafs@mellanox.com>",
        "CC": "\"dev@dpdk.org\" <dev@dpdk.org>, Ori Kam <orika@mellanox.com>",
        "Thread-Topic": "[PATCH v3 01/11] net/mlx5: split flow validation to dedicated\n\tfunction",
        "Thread-Index": "AQHUVFzHzYwpZiia70yoBE+XeoM90Q==",
        "Date": "Mon, 24 Sep 2018 23:17:35 +0000",
        "Message-ID": "<20180924231721.15799-2-yskoh@mellanox.com>",
        "References": "<20180919072143.23211-1-yskoh@mellanox.com>\n\t<20180924231721.15799-1-yskoh@mellanox.com>",
        "In-Reply-To": "<20180924231721.15799-1-yskoh@mellanox.com>",
        "Accept-Language": "en-US",
        "Content-Language": "en-US",
        "X-MS-Has-Attach": "",
        "X-MS-TNEF-Correlator": "",
        "x-clientproxiedby": "BN6PR03CA0021.namprd03.prod.outlook.com\n\t(2603:10b6:404:23::31) To DB3PR0502MB3980.eurprd05.prod.outlook.com\n\t(2603:10a6:8:10::27)",
        "authentication-results": "spf=none (sender IP is )\n\tsmtp.mailfrom=yskoh@mellanox.com; ",
        "x-ms-exchange-messagesentrepresentingtype": "1",
        "x-originating-ip": "[209.116.155.178]",
        "x-ms-publictraffictype": "Email",
        "x-microsoft-exchange-diagnostics": "1; DB3PR0502MB3996;\n\t6:Fe2BCfsF2rIt/lqCObTnLp3qwmn4jiz+S+FimBMpmk+n7i4hLKztDUE9fz5HtKSwgrJ/Y8jWYYDmlZMj+Ah6iL77VYJWktjQ2d7yn3Zye7+8er0TAjH9i/EaerjbDtFGFAcEmAh5ih96m60H1Y1zRAW6NvjRJdvfEIYmwOIE7lITAnBd47Gwl2ymacNw8cWGU/lY5M965Xv7+Il52Isq1P450/CELKsB/f4I4uVUcsATiYZl+kqNHUifhLVWUOrrkpZLII7D0N3NdF2qtoqPSM0xr6++VSZ6Te3xjAWHvtHzKPCvANHuO95s3nQV+YIJlwtNa7cAWeE9r0Srb+h9QRoTt95Kguw006t4yY762ZqsBHUgbFLx/hPi+y7+IcNnabmJg0p+8A/yPjt3IK0fpQxoMr7q6yV7wUK49FyVgmjFpD+yhmqHyB+tH9jWVVe/nheFLiy1YBQH546pu+MKYA==;\n\t5:u2P+44geahNO2h7CKmmpuk+JYuznsrNlrxIEPrP52/XIu8IkF6nxNdbZjcBB+Mv79wbnU2BVe8ovTGpz99Q25wwwpXU6JzCCPTCriPpVWVMWYo8Ji43+YNh5IcNy4w7OLkgp4NVb8FlRl8wO3yws0+H+8PYDx10oHeujVi7Qe0I=;\n\t7:7DpWbqz5KRtCmbeZqWTmia61Q1rPXWiF5zK+zWzAw4kmQPpCDJwfJWB+lWxbRF5uKVkx3ixLnjLE7D2Yh3rZuoOJyWFQMbXiJQIYv0UNHXmld+/t8+uGKlfPQkiHjQDpH77yZ9GTCAsuT3zIhC8lEnsFfrvUJDWq+z8LmgNTxgqAZcTFf1B+5GQ8WM6rbmpjl1TLVz2nQIWjKqLM+fPbXn1PiwA9tYumdpG382XJnvotjDUN0tECWdRKaMy7kGMn",
        "x-ms-office365-filtering-correlation-id": "702468d0-a6c1-4c39-a112-08d62273e938",
        "x-ms-office365-filtering-ht": "Tenant",
        "x-microsoft-antispam": "BCL:0; PCL:0;\n\tRULEID:(7020095)(4652040)(8989299)(5600074)(711020)(4618075)(4534165)(4627221)(201703031133081)(201702281549075)(8990200)(2017052603328)(7153060)(7193020);\n\tSRVR:DB3PR0502MB3996; ",
        "x-ms-traffictypediagnostic": "DB3PR0502MB3996:",
        "x-ld-processed": "a652971c-7d2e-4d9b-a6a4-d149256f461b,ExtAddr",
        "x-microsoft-antispam-prvs": "<DB3PR0502MB3996D5A0AF072C6333963CDAC3170@DB3PR0502MB3996.eurprd05.prod.outlook.com>",
        "x-exchange-antispam-report-test": "UriScan:(17755550239193);",
        "x-ms-exchange-senderadcheck": "1",
        "x-exchange-antispam-report-cfa-test": "BCL:0; PCL:0;\n\tRULEID:(8211001083)(6040522)(2401047)(8121501046)(5005006)(10201501046)(3002001)(93006095)(93001095)(3231355)(944501410)(52105095)(6055026)(149066)(150027)(6041310)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123558120)(20161123560045)(20161123562045)(20161123564045)(201708071742011)(7699051);\n\tSRVR:DB3PR0502MB3996; BCL:0; PCL:0; RULEID:; SRVR:DB3PR0502MB3996; ",
        "x-forefront-prvs": "0805EC9467",
        "x-forefront-antispam-report": "SFV:NSPM;\n\tSFS:(10009020)(396003)(376002)(346002)(366004)(39860400002)(136003)(199004)(189003)(51234002)(66066001)(2616005)(476003)(446003)(486006)(11346002)(16200700003)(6436002)(305945005)(7736002)(53946003)(575784001)(386003)(6506007)(6306002)(6486002)(6512007)(186003)(102836004)(5250100002)(76176011)(25786009)(26005)(68736007)(6116002)(54906003)(110136005)(99286004)(1076002)(3846002)(71200400001)(52116002)(86362001)(53936002)(71190400001)(97736004)(8936002)(107886003)(14444005)(256004)(4326008)(316002)(2900100001)(81156014)(8676002)(36756003)(81166006)(105586002)(2906002)(106356001)(6636002)(478600001)(5660300001)(14454004)(966005)(579004)(559001)(569006);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:DB3PR0502MB3996;\n\tH:DB3PR0502MB3980.eurprd05.prod.outlook.com; FPR:; SPF:None; LANG:en; \n\tPTR:InfoNoRecords; A:1; MX:1; ",
        "received-spf": "None (protection.outlook.com: mellanox.com does not designate\n\tpermitted sender hosts)",
        "x-microsoft-antispam-message-info": "s7bzdWzHQuHihwUtltIBltpBVBOH/qD44l+kipXsr2XS2KFspQldjpxyuX+hfQxNkYqY63Gi9vBHEmNci5js0TKm4Pfvca2P3DgqhQgo53ramt/uxkGK98ERDu0PAJhDFgKv/5bQrP7bNmEUweDTHf5tbdbUzszZoQSPNnpLFS4vrU0aIrAdW1wy2nisiZLmnGgJMtARs7Ai91zrfD56HOvWivGpNcNiBOEbutHshfcAvMIKirHTFy32gjcuDuuuouu6iNKvEW/oZu9dXEYibNKNnGB964gxxPr78ip+axyBiuUUtCReBSD87uTSe/wxy0jS2Ej0miDzRresF7bmT79ZB1gwymVE6RAuSOVPYt0=",
        "spamdiagnosticoutput": "1:99",
        "spamdiagnosticmetadata": "NSPM",
        "Content-Type": "text/plain; charset=\"iso-8859-1\"",
        "Content-Transfer-Encoding": "quoted-printable",
        "MIME-Version": "1.0",
        "X-OriginatorOrg": "Mellanox.com",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "702468d0-a6c1-4c39-a112-08d62273e938",
        "X-MS-Exchange-CrossTenant-originalarrivaltime": "24 Sep 2018 23:17:35.7483\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-fromentityheader": "Hosted",
        "X-MS-Exchange-CrossTenant-id": "a652971c-7d2e-4d9b-a6a4-d149256f461b",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DB3PR0502MB3996",
        "Subject": "[dpdk-dev] [PATCH v3 01/11] net/mlx5: split flow validation to\n\tdedicated function",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Ori Kam <orika@mellanox.com>\n\nIn current implementation the validation logic reside in the same function\nthat calculates the size of the verbs spec and also create the verbs spec.\nThis approach results in hard to maintain code which can't be shared.\nalso in current logic there is a use of parser entity that holds the\ninformation between function calls. The main problem with this parser is\nthat it assumes the connection between different functions. For example\nit assumes that the validation function was called and relevant values\nwere set. This may result in an issue if and when we for example only\ncall the validation function, or call the apply function without the\nvalidation (Currently according to RTE flow we must call validation\nbefore creating flow, but if we want to change that to save time during\nflow creation, for example the user validated some rule and just want to\nchange the IP there is no true reason the validate the rule again).\n\nThis commit address both of those issues by extracting the validation logic\ninto detected functions and remove the use of the parser object.\nThe side effect of those changes is that in some cases there will be a\nneed to traverse the item list again.\n\nSigned-off-by: Ori Kam <orika@mellanox.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 1905 +++++++++++++++++++++++++++---------------\n 1 file changed, 1240 insertions(+), 665 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 3f548a9a4..799064c0c 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -91,6 +91,14 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n #define MLX5_FLOW_MOD_MARK (1u << 1)\n #define MLX5_FLOW_MOD_COUNT (1u << 2)\n \n+/* Actions */\n+#define MLX5_ACTION_DROP (1u << 0)\n+#define MLX5_ACTION_QUEUE (1u << 1)\n+#define MLX5_ACTION_RSS (1u << 2)\n+#define MLX5_ACTION_FLAG (1u << 3)\n+#define MLX5_ACTION_MARK (1u << 4)\n+#define MLX5_ACTION_COUNT (1u << 5)\n+\n /* possible L3 layers protocols filtering. */\n #define MLX5_IP_PROTOCOL_TCP 6\n #define MLX5_IP_PROTOCOL_UDP 17\n@@ -299,14 +307,12 @@ struct mlx5_flow_counter {\n struct rte_flow {\n \tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n \tstruct rte_flow_attr attributes; /**< User flow attribute. */\n-\tuint32_t l3_protocol_en:1; /**< Protocol filtering requested. */\n \tuint32_t layers;\n \t/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */\n \tuint32_t modifier;\n \t/**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */\n \tuint32_t fate;\n \t/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */\n-\tuint8_t l3_protocol; /**< valid when l3_protocol_en is set. */\n \tLIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */\n \tstruct mlx5_flow_verbs *cur_verbs;\n \t/**< Current Verbs flow structure being filled. */\n@@ -582,52 +588,23 @@ mlx5_flow_counter_release(struct mlx5_flow_counter *counter)\n  * them in the @p flow if everything is correct.\n  *\n  * @param[in] dev\n- *   Pointer to Ethernet device.\n+ *   Pointer to Ethernet device structure.\n  * @param[in] attributes\n  *   Pointer to flow attributes\n  * @param[in, out] flow\n  *   Pointer to the rte_flow structure.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ *   0 on success.\n  */\n static int\n mlx5_flow_attributes(struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_attr *attributes,\n-\t\t     struct rte_flow *flow,\n-\t\t     struct rte_flow_error *error)\n+\t\t     struct rte_flow *flow)\n {\n-\tuint32_t priority_max =\n-\t\t((struct priv *)dev->data->dev_private)->config.flow_prio - 1;\n+\tstruct priv *priv = dev->data->dev_private;\n+\tuint32_t priority_max = priv->config.flow_prio - 1;\n \n-\tif (attributes->group)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"groups is not supported\");\n-\tif (attributes->priority != MLX5_FLOW_PRIO_RSVD &&\n-\t    attributes->priority >= priority_max)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"priority out of range\");\n-\tif (attributes->egress)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"egress is not supported\");\n-\tif (attributes->transfer)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"transfer is not supported\");\n-\tif (!attributes->ingress)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"ingress attribute is mandatory\");\n \tflow->attributes = *attributes;\n \tif (attributes->priority == MLX5_FLOW_PRIO_RSVD)\n \t\tflow->attributes.priority = priority_max;\n@@ -671,8 +648,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,\n \t\t\t\t\t\t  \" bits\");\n \tif (!item->spec && (item->mask || item->last))\n \t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n \t\t\t\t\t  \"mask/last without a spec is not\"\n \t\t\t\t\t  \" supported\");\n \tif (item->spec && item->last) {\n@@ -762,8 +738,6 @@ mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n@@ -773,37 +747,19 @@ mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,\n  */\n static int\n mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t   const size_t flow_size, struct rte_flow_error *error)\n+\t\t   const size_t flow_size)\n {\n \tconst struct rte_flow_item_eth *spec = item->spec;\n \tconst struct rte_flow_item_eth *mask = item->mask;\n-\tconst struct rte_flow_item_eth nic_mask = {\n-\t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t.type = RTE_BE16(0xffff),\n-\t};\n \tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tconst unsigned int size = sizeof(struct ibv_flow_spec_eth);\n \tstruct ibv_flow_spec_eth eth = {\n \t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n-\t\t\t    MLX5_FLOW_LAYER_OUTER_L2))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L2 layers already configured\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_eth_mask;\n-\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n-\t\t\t\t\t(const uint8_t *)&nic_mask,\n-\t\t\t\t\tsizeof(struct rte_flow_item_eth),\n-\t\t\t\t\terror);\n-\tif (ret)\n-\t\treturn ret;\n \tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n \t\tMLX5_FLOW_LAYER_OUTER_L2;\n \tif (size > flow_size)\n@@ -875,8 +831,6 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n@@ -886,47 +840,21 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,\n  */\n static int\n mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t    const size_t flow_size, struct rte_flow_error *error)\n+\t\t    const size_t flow_size)\n {\n \tconst struct rte_flow_item_vlan *spec = item->spec;\n \tconst struct rte_flow_item_vlan *mask = item->mask;\n-\tconst struct rte_flow_item_vlan nic_mask = {\n-\t\t.tci = RTE_BE16(0x0fff),\n-\t\t.inner_type = RTE_BE16(0xffff),\n-\t};\n \tunsigned int size = sizeof(struct ibv_flow_spec_eth);\n \tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tstruct ibv_flow_spec_eth eth = {\n \t\t.type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n-\tconst uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |\n-\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L4) :\n-\t\t(MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);\n-\tconst uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :\n-\t\tMLX5_FLOW_LAYER_OUTER_VLAN;\n \tconst uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n \t\tMLX5_FLOW_LAYER_OUTER_L2;\n \n-\tif (flow->layers & vlanm)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VLAN layer already configured\");\n-\telse if ((flow->layers & l34m) != 0)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L2 layer cannot follow L3/L4 layer\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_vlan_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&nic_mask,\n-\t\t sizeof(struct rte_flow_item_vlan), error);\n-\tif (ret)\n-\t\treturn ret;\n \tif (spec) {\n \t\teth.val.vlan_tag = spec->tci;\n \t\teth.mask.vlan_tag = mask->tci;\n@@ -935,15 +863,6 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\teth.mask.ether_type = mask->inner_type;\n \t\teth.val.ether_type &= eth.mask.ether_type;\n \t}\n-\t/*\n-\t * From verbs perspective an empty VLAN is equivalent\n-\t * to a packet without VLAN layer.\n-\t */\n-\tif (!eth.mask.vlan_tag)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n-\t\t\t\t\t  item->spec,\n-\t\t\t\t\t  \"VLAN cannot be empty\");\n \tif (!(flow->layers & l2m)) {\n \t\tif (size <= flow_size) {\n \t\t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n@@ -974,29 +893,18 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t    const size_t flow_size, struct rte_flow_error *error)\n+\t\t    const size_t flow_size)\n {\n \tconst struct rte_flow_item_ipv4 *spec = item->spec;\n \tconst struct rte_flow_item_ipv4 *mask = item->mask;\n-\tconst struct rte_flow_item_ipv4 nic_mask = {\n-\t\t.hdr = {\n-\t\t\t.src_addr = RTE_BE32(0xffffffff),\n-\t\t\t.dst_addr = RTE_BE32(0xffffffff),\n-\t\t\t.type_of_service = 0xff,\n-\t\t\t.next_proto_id = 0xff,\n-\t\t},\n-\t};\n \tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);\n \tstruct ibv_flow_spec_ipv4_ext ipv4 = {\n@@ -1004,28 +912,9 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t\t(tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n-\t\t\t    MLX5_FLOW_LAYER_OUTER_L3))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n-\t\t\t\t MLX5_FLOW_LAYER_OUTER_L4))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_ipv4_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&nic_mask,\n-\t\t sizeof(struct rte_flow_item_ipv4), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n \t\tMLX5_FLOW_LAYER_OUTER_L3_IPV4;\n \tif (spec) {\n@@ -1047,8 +936,6 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tipv4.val.proto &= ipv4.mask.proto;\n \t\tipv4.val.tos &= ipv4.mask.tos;\n \t}\n-\tflow->l3_protocol_en = !!ipv4.mask.proto;\n-\tflow->l3_protocol = ipv4.val.proto;\n \tif (size <= flow_size) {\n \t\tmlx5_flow_verbs_hashfields_adjust\n \t\t\t(flow, tunnel,\n@@ -1076,74 +963,27 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t    const size_t flow_size, struct rte_flow_error *error)\n+\t\t    const size_t flow_size)\n {\n \tconst struct rte_flow_item_ipv6 *spec = item->spec;\n \tconst struct rte_flow_item_ipv6 *mask = item->mask;\n-\tconst struct rte_flow_item_ipv6 nic_mask = {\n-\t\t.hdr = {\n-\t\t\t.src_addr =\n-\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\n-\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t\t.dst_addr =\n-\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\n-\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t\t.vtc_flow = RTE_BE32(0xffffffff),\n-\t\t\t.proto = 0xff,\n-\t\t\t.hop_limits = 0xff,\n-\t\t},\n-\t};\n \tconst int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);\n \tunsigned int size = sizeof(struct ibv_flow_spec_ipv6);\n \tstruct ibv_flow_spec_ipv6 ipv6 = {\n \t\t.type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n-\t\t\t    MLX5_FLOW_LAYER_OUTER_L3))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"multiple L3 layers not supported\");\n-\telse if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n-\t\t\t\t MLX5_FLOW_LAYER_OUTER_L4))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n-\t/*\n-\t * IPv6 is not recognised by the NIC inside a GRE tunnel.\n-\t * Such support has to be disabled as the rule will be\n-\t * accepted.  Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and\n-\t * Mellanox OFED 4.4-1.0.0.0.\n-\t */\n-\tif (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"IPv6 inside a GRE tunnel is\"\n-\t\t\t\t\t  \" not recognised.\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_ipv6_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&nic_mask,\n-\t\t sizeof(struct rte_flow_item_ipv6), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n \t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6;\n \tif (spec) {\n@@ -1185,8 +1025,6 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\tipv6.val.next_hdr &= ipv6.mask.next_hdr;\n \t\tipv6.val.hop_limit &= ipv6.mask.hop_limit;\n \t}\n-\tflow->l3_protocol_en = !!ipv6.mask.next_hdr;\n-\tflow->l3_protocol = ipv6.val.next_hdr;\n \tif (size <= flow_size) {\n \t\tmlx5_flow_verbs_hashfields_adjust\n \t\t\t(flow, tunnel,\n@@ -1214,18 +1052,15 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t   const size_t flow_size, struct rte_flow_error *error)\n+\t\t   const size_t flow_size)\n {\n \tconst struct rte_flow_item_udp *spec = item->spec;\n \tconst struct rte_flow_item_udp *mask = item->mask;\n@@ -1235,36 +1070,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"protocol filtering not compatible\"\n-\t\t\t\t\t  \" with UDP layer\");\n-\tif (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n-\t\t\t      MLX5_FLOW_LAYER_OUTER_L3)))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 is mandatory to filter\"\n-\t\t\t\t\t  \" on L4\");\n-\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n-\t\t\t    MLX5_FLOW_LAYER_OUTER_L4))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L4 layer is already\"\n-\t\t\t\t\t  \" present\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_udp_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_udp_mask,\n-\t\t sizeof(struct rte_flow_item_udp), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tflow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n \t\tMLX5_FLOW_LAYER_OUTER_L4_UDP;\n \tif (spec) {\n@@ -1306,11 +1114,10 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t   const size_t flow_size, struct rte_flow_error *error)\n+\t\t   const size_t flow_size)\n {\n \tconst struct rte_flow_item_tcp *spec = item->spec;\n \tconst struct rte_flow_item_tcp *mask = item->mask;\n@@ -1320,34 +1127,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"protocol filtering not compatible\"\n-\t\t\t\t\t  \" with TCP layer\");\n-\tif (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n-\t\t\t      MLX5_FLOW_LAYER_OUTER_L3)))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n-\tif (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n-\t\t\t    MLX5_FLOW_LAYER_OUTER_L4))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L4 layer is already present\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_tcp_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_tcp_mask,\n-\t\t sizeof(struct rte_flow_item_tcp), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tflow->layers |=  tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n \t\tMLX5_FLOW_LAYER_OUTER_L4_TCP;\n \tif (spec) {\n@@ -1389,11 +1171,10 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n-\t\t     const size_t flow_size, struct rte_flow_error *error)\n+\t\t     const size_t flow_size)\n {\n \tconst struct rte_flow_item_vxlan *spec = item->spec;\n \tconst struct rte_flow_item_vxlan *mask = item->mask;\n@@ -1402,34 +1183,13 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,\n \t\t.size = size,\n \t};\n-\tint ret;\n \tunion vni {\n \t\tuint32_t vlan_id;\n \t\tuint8_t vni[4];\n \t} id = { .vlan_id = 0, };\n \n-\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"a tunnel is already present\");\n-\t/*\n-\t * Verify only UDPv4 is present as defined in\n-\t * https://tools.ietf.org/html/rfc7348\n-\t */\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"no outer UDP layer found\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_vxlan_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_vxlan_mask,\n-\t\t sizeof(struct rte_flow_item_vxlan), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tif (spec) {\n \t\tmemcpy(&id.vni[1], spec->vni, 3);\n \t\tvxlan.val.tunnel_id = id.vlan_id;\n@@ -1438,25 +1198,6 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n \t\t/* Remove unwanted bits from values. */\n \t\tvxlan.val.tunnel_id &= vxlan.mask.tunnel_id;\n \t}\n-\t/*\n-\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if\n-\t * only this layer is defined in the Verbs specification it is\n-\t * interpreted as wildcard and all packets will match this\n-\t * rule, if it follows a full stack layer (ex: eth / ipv4 /\n-\t * udp), all packets matching the layers before will also\n-\t * match this rule.  To avoid such situation, VNI 0 is\n-\t * currently refused.\n-\t */\n-\tif (!vxlan.val.tunnel_id)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VXLAN vni cannot be 0\");\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER))\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VXLAN tunnel must be fully defined\");\n \tif (size <= flow_size) {\n \t\tmlx5_flow_spec_verbs_add(flow, &vxlan, size);\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n@@ -1471,8 +1212,6 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n  * If the necessary size for the conversion is greater than the @p flow_size,\n  * nothing is written in @p flow, the validation is still performed.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n  * @param[in] item\n  *   Item specification.\n  * @param[in, out] flow\n@@ -1487,13 +1226,10 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,\n-\t\t\t const struct rte_flow_item *item,\n-\t\t\t struct rte_flow *flow, const size_t flow_size,\n-\t\t\t struct rte_flow_error *error)\n+mlx5_flow_item_vxlan_gpe(const struct rte_flow_item *item,\n+\t\t\t struct rte_flow *flow, const size_t flow_size)\n {\n \tconst struct rte_flow_item_vxlan_gpe *spec = item->spec;\n \tconst struct rte_flow_item_vxlan_gpe *mask = item->mask;\n@@ -1502,74 +1238,21 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,\n \t\t.type = IBV_FLOW_SPEC_VXLAN_TUNNEL,\n \t\t.size = size,\n \t};\n-\tint ret;\n \tunion vni {\n \t\tuint32_t vlan_id;\n \t\tuint8_t vni[4];\n \t} id = { .vlan_id = 0, };\n \n-\tif (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 VXLAN is not enabled by device\"\n-\t\t\t\t\t  \" parameter and/or not configured in\"\n-\t\t\t\t\t  \" firmware\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"a tunnel is already present\");\n-\t/*\n-\t * Verify only UDPv4 is present as defined in\n-\t * https://tools.ietf.org/html/rfc7348\n-\t */\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"no outer UDP layer found\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_vxlan_gpe_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,\n-\t\t sizeof(struct rte_flow_item_vxlan_gpe), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tif (spec) {\n \t\tmemcpy(&id.vni[1], spec->vni, 3);\n \t\tvxlan_gpe.val.tunnel_id = id.vlan_id;\n \t\tmemcpy(&id.vni[1], mask->vni, 3);\n \t\tvxlan_gpe.mask.tunnel_id = id.vlan_id;\n-\t\tif (spec->protocol)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t item,\n-\t\t\t\t \"VxLAN-GPE protocol not supported\");\n \t\t/* Remove unwanted bits from values. */\n \t\tvxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;\n \t}\n-\t/*\n-\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n-\t * layer is defined in the Verbs specification it is interpreted as\n-\t * wildcard and all packets will match this rule, if it follows a full\n-\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n-\t * before will also match this rule.  To avoid such situation, VNI 0\n-\t * is currently refused.\n-\t */\n-\tif (!vxlan_gpe.val.tunnel_id)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VXLAN-GPE vni cannot be 0\");\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER))\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VXLAN-GPE tunnel must be fully\"\n-\t\t\t\t\t  \" defined\");\n \tif (size <= flow_size) {\n \t\tmlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);\n \t\tflow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;\n@@ -1647,24 +1330,20 @@ mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n  *   otherwise another call with this returned memory size should be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_item_gre(const struct rte_flow_item *item,\n-\t\t   struct rte_flow *flow, const size_t flow_size,\n-\t\t   struct rte_flow_error *error)\n+mlx5_flow_item_gre(const struct rte_flow_item *item __rte_unused,\n+\t\t   struct rte_flow *flow, const size_t flow_size)\n {\n \tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n \tconst struct rte_flow_item_gre *spec = item->spec;\n \tconst struct rte_flow_item_gre *mask = item->mask;\n-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n \tunsigned int size = sizeof(struct ibv_flow_spec_gre);\n \tstruct ibv_flow_spec_gre tunnel = {\n \t\t.type = IBV_FLOW_SPEC_GRE,\n@@ -1677,33 +1356,10 @@ mlx5_flow_item_gre(const struct rte_flow_item *item,\n \t\t.size = size,\n \t};\n #endif\n-\tint ret;\n \n-\tif (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"protocol filtering not compatible\"\n-\t\t\t\t\t  \" with this GRE layer\");\n-\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"a tunnel is already present\");\n-\tif (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 Layer is missing\");\n+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n \tif (!mask)\n \t\tmask = &rte_flow_item_gre_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_gre_mask,\n-\t\t sizeof(struct rte_flow_item_gre), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n \tif (spec) {\n \t\ttunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;\n \t\ttunnel.val.protocol = spec->protocol;\n@@ -1715,13 +1371,6 @@ mlx5_flow_item_gre(const struct rte_flow_item *item,\n \t\ttunnel.val.key &= tunnel.mask.key;\n \t}\n #else\n-\tif (spec && (spec->protocol & mask->protocol))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"without MPLS support the\"\n-\t\t\t\t\t  \" specification cannot be used for\"\n-\t\t\t\t\t  \" filtering\");\n #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */\n \tif (size <= flow_size) {\n \t\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)\n@@ -1775,30 +1424,9 @@ mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,\n \t\t.type = IBV_FLOW_SPEC_MPLS,\n \t\t.size = size,\n \t};\n-\tint ret;\n \n-\tif (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"protocol filtering not compatible\"\n-\t\t\t\t\t  \" with MPLS layer\");\n-\t/* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */\n-\tif (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&\n-\t    (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"a tunnel is already\"\n-\t\t\t\t\t  \" present\");\n \tif (!mask)\n \t\tmask = &rte_flow_item_mpls_mask;\n-\tret = mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&rte_flow_item_mpls_mask,\n-\t\t sizeof(struct rte_flow_item_mpls), error);\n-\tif (ret < 0)\n-\t\treturn ret;\n \tif (spec) {\n \t\tmemcpy(&mpls.val.label, spec, sizeof(mpls.val.label));\n \t\tmemcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));\n@@ -1845,8 +1473,7 @@ mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,\n  *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_items(struct rte_eth_dev *dev,\n-\t\tconst struct rte_flow_item pattern[],\n+mlx5_flow_items(const struct rte_flow_item pattern[],\n \t\tstruct rte_flow *flow, const size_t flow_size,\n \t\tstruct rte_flow_error *error)\n {\n@@ -1860,33 +1487,32 @@ mlx5_flow_items(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\tret = mlx5_flow_item_eth(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_eth(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n-\t\t\tret = mlx5_flow_item_vlan(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_vlan(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\t\tret = mlx5_flow_item_ipv4(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_ipv4(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n-\t\t\tret = mlx5_flow_item_ipv6(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_ipv6(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n-\t\t\tret = mlx5_flow_item_udp(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_udp(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n-\t\t\tret = mlx5_flow_item_tcp(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_tcp(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n-\t\t\tret = mlx5_flow_item_vxlan(pattern, flow, remain,\n-\t\t\t\t\t\t   error);\n+\t\t\tret = mlx5_flow_item_vxlan(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n-\t\t\tret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,\n-\t\t\t\t\t\t       remain, error);\n+\t\t\tret = mlx5_flow_item_vxlan_gpe(pattern, flow,\n+\t\t\t\t\t\t       remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n-\t\t\tret = mlx5_flow_item_gre(pattern, flow, remain, error);\n+\t\t\tret = mlx5_flow_item_gre(pattern, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n \t\t\tret = mlx5_flow_item_mpls(pattern, flow, remain, error);\n@@ -1910,7 +1536,7 @@ mlx5_flow_items(struct rte_eth_dev *dev,\n \t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n \t\t};\n \n-\t\treturn mlx5_flow_item_eth(&item, flow, flow_size, error);\n+\t\treturn mlx5_flow_item_eth(&item, flow, flow_size);\n \t}\n \treturn size;\n }\n@@ -1921,15 +1547,11 @@ mlx5_flow_items(struct rte_eth_dev *dev,\n  * If the necessary size for the conversion is greater than the @p flow_size,\n  * nothing is written in @p flow, the validation is still performed.\n  *\n- * @param[in] action\n- *   Action configuration.\n  * @param[in, out] flow\n  *   Pointer to flow structure.\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n@@ -1939,9 +1561,7 @@ mlx5_flow_items(struct rte_eth_dev *dev,\n  *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_action_drop(const struct rte_flow_action *action,\n-\t\t      struct rte_flow *flow, const size_t flow_size,\n-\t\t      struct rte_flow_error *error)\n+mlx5_flow_action_drop(struct rte_flow *flow, const size_t flow_size)\n {\n \tunsigned int size = sizeof(struct ibv_flow_spec_action_drop);\n \tstruct ibv_flow_spec_action_drop drop = {\n@@ -1949,18 +1569,6 @@ mlx5_flow_action_drop(const struct rte_flow_action *action,\n \t\t\t.size = size,\n \t};\n \n-\tif (flow->fate)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"multiple fate actions are not\"\n-\t\t\t\t\t  \" supported\");\n-\tif (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"drop is not compatible with\"\n-\t\t\t\t\t  \" flag/mark action\");\n \tif (size < flow_size)\n \t\tmlx5_flow_spec_verbs_add(flow, &drop, size);\n \tflow->fate |= MLX5_FLOW_FATE_DROP;\n@@ -1971,43 +1579,20 @@ mlx5_flow_action_drop(const struct rte_flow_action *action,\n  * Convert the @p action into @p flow after ensuring the NIC will understand\n  * and process it correctly.\n  *\n- * @param[in] dev\n- *   Pointer to Ethernet device structure.\n  * @param[in] action\n  *   Action configuration.\n  * @param[in, out] flow\n  *   Pointer to flow structure.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_flow_action_queue(struct rte_eth_dev *dev,\n-\t\t       const struct rte_flow_action *action,\n-\t\t       struct rte_flow *flow,\n-\t\t       struct rte_flow_error *error)\n+mlx5_flow_action_queue(const struct rte_flow_action *action,\n+\t\t       struct rte_flow *flow)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_queue *queue = action->conf;\n \n-\tif (flow->fate)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"multiple fate actions are not\"\n-\t\t\t\t\t  \" supported\");\n-\tif (queue->index >= priv->rxqs_n)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &queue->index,\n-\t\t\t\t\t  \"queue index out of range\");\n-\tif (!(*priv->rxqs)[queue->index])\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &queue->index,\n-\t\t\t\t\t  \"queue is not configured\");\n \tif (flow->queue)\n \t\t(*flow->queue)[0] = queue->index;\n \tflow->rss.queue_num = 1;\n@@ -2018,90 +1603,20 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,\n /**\n  * Ensure the @p action will be understood and used correctly by the  NIC.\n  *\n- * @param dev\n- *   Pointer to Ethernet device structure.\n- * @param action[in]\n- *   Pointer to flow actions array.\n+ * @param[in] action\n+ *   Action configuration.\n  * @param flow[in, out]\n  *   Pointer to the rte_flow structure.\n- * @param error[in, out]\n- *   Pointer to error structure.\n  *\n  * @return\n- *   On success @p flow->queue array and @p flow->rss are filled and valid.\n- *   On error, a negative errno value is returned and rte_errno is set.\n+ *   0 On success.\n  */\n static int\n-mlx5_flow_action_rss(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_action *action,\n-\t\t     struct rte_flow *flow,\n-\t\t     struct rte_flow_error *error)\n+mlx5_flow_action_rss(const struct rte_flow_action *action,\n+\t\t\tstruct rte_flow *flow)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_rss *rss = action->conf;\n-\tunsigned int i;\n \n-\tif (flow->fate)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"multiple fate actions are not\"\n-\t\t\t\t\t  \" supported\");\n-\tif (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&\n-\t    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->func,\n-\t\t\t\t\t  \"RSS hash function not supported\");\n-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\tif (rss->level > 2)\n-#else\n-\tif (rss->level > 1)\n-#endif\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->level,\n-\t\t\t\t\t  \"tunnel RSS is not supported\");\n-\tif (rss->key_len < MLX5_RSS_HASH_KEY_LEN)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->key_len,\n-\t\t\t\t\t  \"RSS hash key too small\");\n-\tif (rss->key_len > MLX5_RSS_HASH_KEY_LEN)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->key_len,\n-\t\t\t\t\t  \"RSS hash key too large\");\n-\tif (!rss->queue_num)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  rss,\n-\t\t\t\t\t  \"no queues were provided for RSS\");\n-\tif (rss->queue_num > priv->config.ind_table_max_size)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->queue_num,\n-\t\t\t\t\t  \"number of queues too large\");\n-\tif (rss->types & MLX5_RSS_HF_MASK)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &rss->types,\n-\t\t\t\t\t  \"some RSS protocols are not\"\n-\t\t\t\t\t  \" supported\");\n-\tfor (i = 0; i != rss->queue_num; ++i) {\n-\t\tif (rss->queue[i] >= priv->rxqs_n)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t rss,\n-\t\t\t\t \"queue index out of range\");\n-\t\tif (!(*priv->rxqs)[rss->queue[i]])\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t &rss->queue[i],\n-\t\t\t\t \"queue is not configured\");\n-\t}\n \tif (flow->queue)\n \t\tmemcpy((*flow->queue), rss->queue,\n \t\t       rss->queue_num * sizeof(uint16_t));\n@@ -2119,27 +1634,20 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,\n  * If the necessary size for the conversion is greater than the @p flow_size,\n  * nothing is written in @p flow, the validation is still performed.\n  *\n- * @param[in] action\n- *   Action configuration.\n  * @param[in, out] flow\n  *   Pointer to flow structure.\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p action has fully been\n  *   converted, otherwise another call with this returned memory size should\n  *   be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n-mlx5_flow_action_flag(const struct rte_flow_action *action,\n-\t\t      struct rte_flow *flow, const size_t flow_size,\n-\t\t      struct rte_flow_error *error)\n+mlx5_flow_action_flag(struct rte_flow *flow, const size_t flow_size)\n {\n \tunsigned int size = sizeof(struct ibv_flow_spec_action_tag);\n \tstruct ibv_flow_spec_action_tag tag = {\n@@ -2149,17 +1657,6 @@ mlx5_flow_action_flag(const struct rte_flow_action *action,\n \t};\n \tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n \n-\tif (flow->modifier & MLX5_FLOW_MOD_FLAG)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"flag action already present\");\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"flag is not compatible with drop\"\n-\t\t\t\t\t  \" action\");\n \tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n \t\tsize = 0;\n \telse if (size <= flow_size && verbs)\n@@ -2213,20 +1710,16 @@ mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)\n  * @param[in] flow_size\n  *   Size in bytes of the available space in @p flow, if too small, nothing is\n  *   written.\n- * @param[out] error\n- *   Pointer to error structure.\n  *\n  * @return\n  *   On success the number of bytes consumed/necessary, if the returned value\n  *   is lesser or equal to @p flow_size, the @p action has fully been\n  *   converted, otherwise another call with this returned memory size should\n  *   be done.\n- *   On error, a negative errno value is returned and rte_errno is set.\n  */\n static int\n mlx5_flow_action_mark(const struct rte_flow_action *action,\n-\t\t      struct rte_flow *flow, const size_t flow_size,\n-\t\t      struct rte_flow_error *error)\n+\t\t      struct rte_flow *flow, const size_t flow_size)\n {\n \tconst struct rte_flow_action_mark *mark = action->conf;\n \tunsigned int size = sizeof(struct ibv_flow_spec_action_tag);\n@@ -2236,28 +1729,6 @@ mlx5_flow_action_mark(const struct rte_flow_action *action,\n \t};\n \tstruct mlx5_flow_verbs *verbs = flow->cur_verbs;\n \n-\tif (!mark)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"configuration cannot be null\");\n-\tif (mark->id >= MLX5_FLOW_MARK_MAX)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  &mark->id,\n-\t\t\t\t\t  \"mark id must in 0 <= id < \"\n-\t\t\t\t\t  RTE_STR(MLX5_FLOW_MARK_MAX));\n-\tif (flow->modifier & MLX5_FLOW_MOD_MARK)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"mark action already present\");\n-\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"mark is not compatible with drop\"\n-\t\t\t\t\t  \" action\");\n \tif (flow->modifier & MLX5_FLOW_MOD_FLAG) {\n \t\tmlx5_flow_verbs_mark_update(verbs, mark->id);\n \t\tsize = 0;\n@@ -2318,11 +1789,6 @@ mlx5_flow_action_count(struct rte_eth_dev *dev,\n \t\t\t\t\t\t  \"cannot get counter\"\n \t\t\t\t\t\t  \" context.\");\n \t}\n-\tif (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t  action,\n-\t\t\t\t\t  \"flow counters are not supported.\");\n \tflow->modifier |= MLX5_FLOW_MOD_COUNT;\n #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n \tcounter.counter_set_handle = flow->counter->cs->handle;\n@@ -2375,22 +1841,19 @@ mlx5_flow_actions(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_FLAG:\n-\t\t\tret = mlx5_flow_action_flag(actions, flow, remain,\n-\t\t\t\t\t\t    error);\n+\t\t\tret = mlx5_flow_action_flag(flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n-\t\t\tret = mlx5_flow_action_mark(actions, flow, remain,\n-\t\t\t\t\t\t    error);\n+\t\t\tret = mlx5_flow_action_mark(actions, flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n-\t\t\tret = mlx5_flow_action_drop(actions, flow, remain,\n-\t\t\t\t\t\t    error);\n+\t\t\tret = mlx5_flow_action_drop(flow, remain);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n-\t\t\tret = mlx5_flow_action_queue(dev, actions, flow, error);\n+\t\t\tret = mlx5_flow_action_queue(actions, flow);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n-\t\t\tret = mlx5_flow_action_rss(dev, actions, flow, error);\n+\t\t\tret = mlx5_flow_action_rss(actions, flow);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\tret = mlx5_flow_action_count(dev, actions, flow, remain,\n@@ -2585,7 +2048,7 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \tsize_t original_verbs_size = 0;\n \tuint32_t original_layers = 0;\n \tint expanded_pattern_idx = 0;\n-\tint ret;\n+\tint ret = 0;\n \tuint32_t i;\n \n \tif (attributes->transfer)\n@@ -2594,7 +2057,7 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t\t\t      actions, error);\n \tif (size > flow_size)\n \t\tflow = &local_flow;\n-\tret = mlx5_flow_attributes(dev, attributes, flow, error);\n+\tret = mlx5_flow_attributes(dev->data->dev_private, attributes, flow);\n \tif (ret < 0)\n \t\treturn ret;\n \tret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);\n@@ -2666,8 +2129,7 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t}\n \t\t}\n \t\tret = mlx5_flow_items\n-\t\t\t(dev,\n-\t\t\t (const struct rte_flow_item *)\n+\t\t\t((const struct rte_flow_item *)\n \t\t\t &buf->entry[i].pattern[expanded_pattern_idx],\n \t\t\t flow,\n \t\t\t (size < flow_size) ? flow_size - size : 0, error);\n@@ -2851,99 +2313,1209 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \t}\n }\n \n-/**\n- * Validate a flow supported by the NIC.\n+/*\n+ * Validate the flag action.\n  *\n- * @see rte_flow_validate()\n- * @see rte_flow_ops\n+ * @param[in] action_flags\n+ *   Bit-fields that holds the actions detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n  */\n-int\n-mlx5_flow_validate(struct rte_eth_dev *dev,\n-\t\t   const struct rte_flow_attr *attr,\n-\t\t   const struct rte_flow_item items[],\n-\t\t   const struct rte_flow_action actions[],\n-\t\t   struct rte_flow_error *error)\n+static int\n+mlx5_flow_validate_action_flag(uint64_t action_flags,\n+\t\t\t       struct rte_flow_error *error)\n {\n-\tint ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);\n \n-\tif (ret < 0)\n-\t\treturn ret;\n+\tif (action_flags & MLX5_ACTION_DROP)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't drop and flag in same flow\");\n+\tif (action_flags & MLX5_ACTION_MARK)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't mark and flag in same flow\");\n+\tif (action_flags & MLX5_ACTION_FLAG)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't have 2 flag\"\n+\t\t\t\t\t  \" actions in same flow\");\n \treturn 0;\n }\n \n-/**\n- * Remove the flow.\n+/*\n+ * Validate the mark action.\n  *\n- * @param[in] dev\n- *   Pointer to Ethernet device.\n- * @param[in, out] flow\n- *   Pointer to flow structure.\n+ * @param[in] action\n+ *   Pointer to the queue action.\n+ * @param[in] action_flags\n+ *   Bit-fields that holds the actions detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n-static void\n-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)\n+static int\n+mlx5_flow_validate_action_mark(const struct rte_flow_action *action,\n+\t\t\t       uint64_t action_flags,\n+\t\t\t       struct rte_flow_error *error)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_verbs *verbs;\n+\tconst struct rte_flow_action_mark *mark = action->conf;\n \n-\tif (flow->nl_flow && priv->mnl_socket)\n-\t\tmlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);\n-\tLIST_FOREACH(verbs, &flow->verbs, next) {\n-\t\tif (verbs->flow) {\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow(verbs->flow));\n-\t\t\tverbs->flow = NULL;\n-\t\t}\n-\t\tif (verbs->hrxq) {\n-\t\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n-\t\t\t\tmlx5_hrxq_drop_release(dev);\n-\t\t\telse\n-\t\t\t\tmlx5_hrxq_release(dev, verbs->hrxq);\n-\t\t\tverbs->hrxq = NULL;\n-\t\t}\n-\t}\n-\tif (flow->counter) {\n-\t\tmlx5_flow_counter_release(flow->counter);\n-\t\tflow->counter = NULL;\n-\t}\n+\tif (!mark)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  action,\n+\t\t\t\t\t  \"configuration cannot be null\");\n+\tif (mark->id >= MLX5_FLOW_MARK_MAX)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &mark->id,\n+\t\t\t\t\t  \"mark id must in 0 <= id < \"\n+\t\t\t\t\t  RTE_STR(MLX5_FLOW_MARK_MAX));\n+\tif (action_flags & MLX5_ACTION_DROP)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't drop and mark in same flow\");\n+\tif (action_flags & MLX5_ACTION_FLAG)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't flag and mark in same flow\");\n+\tif (action_flags & MLX5_ACTION_MARK)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't have 2 flag actions in same\"\n+\t\t\t\t\t  \" flow\");\n+\treturn 0;\n }\n \n-/**\n- * Apply the flow.\n+/*\n+ * Validate the drop action.\n+ *\n+ * @param[in] action_flags\n+ *   Bit-fields that holds the actions detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n+ */\n+static int\n+mlx5_flow_validate_action_drop(uint64_t action_flags,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tif (action_flags & MLX5_ACTION_FLAG)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't drop and flag in same flow\");\n+\tif (action_flags & MLX5_ACTION_MARK)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't drop and mark in same flow\");\n+\tif (action_flags &\n+\t\t(MLX5_ACTION_DROP | MLX5_ACTION_QUEUE | MLX5_ACTION_RSS))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't have 2 fate actions in\"\n+\t\t\t\t\t  \" same flow\");\n+\treturn 0;\n+}\n+\n+/*\n  *\n+ * Validate the queue action.\n+ *\n+ * @param[in] action\n+ *   Pointer to the queue action.\n+ * @param[in] action_flags\n+ *   Bit-fields that holds the actions detected until now.\n  * @param[in] dev\n- *   Pointer to Ethernet device structure.\n- * @param[in, out] flow\n- *   Pointer to flow structure.\n+ *   Pointer to the Ethernet device structure.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n  * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n  */\n static int\n-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n-\t\tstruct rte_flow_error *error)\n+mlx5_flow_validate_action_queue(const struct rte_flow_action *action,\n+\t\t\t\tuint64_t action_flags,\n+\t\t\t\tstruct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_error *error)\n {\n \tstruct priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_verbs *verbs;\n-\tint err;\n-\n-\tLIST_FOREACH(verbs, &flow->verbs, next) {\n-\t\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n-\t\t\tverbs->hrxq = mlx5_hrxq_drop_new(dev);\n-\t\t\tif (!verbs->hrxq) {\n-\t\t\t\trte_flow_error_set\n-\t\t\t\t\t(error, errno,\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t NULL,\n-\t\t\t\t\t \"cannot get drop hash queue\");\n-\t\t\t\tgoto error;\n-\t\t\t}\n-\t\t} else {\n-\t\t\tstruct mlx5_hrxq *hrxq;\n+\tconst struct rte_flow_action_queue *queue = action->conf;\n \n-\t\t\thrxq = mlx5_hrxq_get(dev, flow->key,\n-\t\t\t\t\t     MLX5_RSS_HASH_KEY_LEN,\n-\t\t\t\t\t     verbs->hash_fields,\n+\tif (action_flags &\n+\t    (MLX5_ACTION_DROP | MLX5_ACTION_QUEUE | MLX5_ACTION_RSS))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't have 2 fate actions in\"\n+\t\t\t\t\t  \" same flow\");\n+\tif (queue->index >= priv->rxqs_n)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &queue->index,\n+\t\t\t\t\t  \"queue index out of range\");\n+\tif (!(*priv->rxqs)[queue->index])\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &queue->index,\n+\t\t\t\t\t  \"queue is not configured\");\n+\treturn 0;\n+}\n+\n+/*\n+ *\n+ * Validate the rss action.\n+ *\n+ * @param[in] action\n+ *   Pointer to the queue action.\n+ * @param[in] action_flags\n+ *   Bit-fields that holds the actions detected until now.\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n+ */\n+static int\n+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,\n+\t\t\t      uint64_t action_flags,\n+\t\t\t      struct rte_eth_dev *dev,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_rss *rss = action->conf;\n+\tunsigned int i;\n+\n+\tif (action_flags &\n+\t    (MLX5_ACTION_DROP | MLX5_ACTION_QUEUE | MLX5_ACTION_RSS))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"can't have 2 fate actions\"\n+\t\t\t\t\t  \" in same flow\");\n+\tif (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&\n+\t    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->func,\n+\t\t\t\t\t  \"RSS hash function not supported\");\n+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+\tif (rss->level > 2)\n+#else\n+\tif (rss->level > 1)\n+#endif\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->level,\n+\t\t\t\t\t  \"tunnel RSS is not supported\");\n+\tif (rss->key_len < MLX5_RSS_HASH_KEY_LEN)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->key_len,\n+\t\t\t\t\t  \"RSS hash key too small\");\n+\tif (rss->key_len > MLX5_RSS_HASH_KEY_LEN)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->key_len,\n+\t\t\t\t\t  \"RSS hash key too large\");\n+\tif (rss->queue_num > priv->config.ind_table_max_size)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->queue_num,\n+\t\t\t\t\t  \"number of queues too large\");\n+\tif (rss->types & MLX5_RSS_HF_MASK)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->types,\n+\t\t\t\t\t  \"some RSS protocols are not\"\n+\t\t\t\t\t  \" supported\");\n+\tfor (i = 0; i != rss->queue_num; ++i) {\n+\t\tif (!(*priv->rxqs)[rss->queue[i]])\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t &rss->queue[i], \"queue is not configured\");\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * Validate the count action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n+ */\n+static int\n+mlx5_flow_validate_action_count(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\n+\tif (!priv->config.flow_counter_en)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"flow counters are not supported.\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Verify the @p attributes will be correctly understood by the NIC and store\n+ * them in the @p flow if everything is correct.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] attributes\n+ *   Pointer to flow attributes\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_attributes(struct rte_eth_dev *dev,\n+\t\t\t      const struct rte_flow_attr *attributes,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tuint32_t priority_max = priv->config.flow_prio - 1;\n+\n+\tif (attributes->group)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t  NULL, \"groups is not supported\");\n+\tif (attributes->priority != MLX5_FLOW_PRIO_RSVD &&\n+\t    attributes->priority >= priority_max)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t\t  NULL, \"priority out of range\");\n+\tif (attributes->egress)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,\n+\t\t\t\t\t  \"egress is not supported\");\n+\tif (attributes->transfer)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n+\t\t\t\t\t  NULL, \"transfer is not supported\");\n+\tif (!attributes->ingress)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"ingress attribute is mandatory\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate Ethernet item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_eth(const struct rte_flow_item *item,\n+\t\t\t    uint64_t item_flags,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_eth *mask = item->mask;\n+\tconst struct rte_flow_item_eth nic_mask = {\n+\t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t.type = RTE_BE16(0xffff),\n+\t};\n+\tint ret;\n+\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\n+\tif (item_flags & MLX5_FLOW_LAYER_OUTER_L2)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"3 levels of l2 are not supported\");\n+\tif ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"2 L2 without tunnel are not supported\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_eth_mask;\n+\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t(const uint8_t *)&nic_mask,\n+\t\t\t\t\tsizeof(struct rte_flow_item_eth),\n+\t\t\t\t\terror);\n+\treturn ret;\n+}\n+\n+/**\n+ * Validate VLAN item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,\n+\t\t\t     int64_t item_flags,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_vlan *spec = item->spec;\n+\tconst struct rte_flow_item_vlan *mask = item->mask;\n+\tconst struct rte_flow_item_vlan nic_mask = {\n+\t\t.tci = RTE_BE16(0x0fff),\n+\t\t.inner_type = RTE_BE16(0xffff),\n+\t};\n+\tconst int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint ret;\n+\tconst uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |\n+\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L4) :\n+\t\t\t\t       (MLX5_FLOW_LAYER_OUTER_L3 |\n+\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L4);\n+\tconst uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :\n+\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_VLAN;\n+\n+\tif (item_flags & vlanm)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"VLAN layer already configured\");\n+\telse if ((item_flags & l34m) != 0)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L2 layer cannot follow L3/L4 layer\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vlan_mask;\n+\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t(const uint8_t *)&nic_mask,\n+\t\t\t\t\tsizeof(struct rte_flow_item_vlan),\n+\t\t\t\t\terror);\n+\tif (ret)\n+\t\treturn ret;\n+\t/*\n+\t * From verbs perspective an empty VLAN is equivalent\n+\t * to a packet without VLAN layer.\n+\t */\n+\tif (!spec->tci)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n+\t\t\t\t\t  item->spec,\n+\t\t\t\t\t  \"VLAN cannot be empty\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate IPV4 item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,\n+\t\t\t     int64_t item_flags,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_ipv4 *mask = item->mask;\n+\tconst struct rte_flow_item_ipv4 nic_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_addr = RTE_BE32(0xffffffff),\n+\t\t\t.dst_addr = RTE_BE32(0xffffffff),\n+\t\t\t.type_of_service = 0xff,\n+\t\t\t.next_proto_id = 0xff,\n+\t\t},\n+\t};\n+\tconst int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint ret;\n+\n+\tif (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L3))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"multiple L3 layers not supported\");\n+\telse if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L4))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_ipv4_mask;\n+\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t(const uint8_t *)&nic_mask,\n+\t\t\t\t\tsizeof(struct rte_flow_item_ipv4),\n+\t\t\t\t\terror);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate IPV6 item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,\n+\t\t\t     uint64_t item_flags,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_ipv6 *mask = item->mask;\n+\tconst struct rte_flow_item_ipv6 nic_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_addr =\n+\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\n+\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t\t.dst_addr =\n+\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\n+\t\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t\t.vtc_flow = RTE_BE32(0xffffffff),\n+\t\t\t.proto = 0xff,\n+\t\t\t.hop_limits = 0xff,\n+\t\t},\n+\t};\n+\tconst int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint ret;\n+\n+\tif (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L3))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"multiple L3 layers not supported\");\n+\telse if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L4))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 cannot follow an L4 layer.\");\n+\t/*\n+\t * IPv6 is not recognised by the NIC inside a GRE tunnel.\n+\t * Such support has to be disabled as the rule will be\n+\t * accepted.  Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and\n+\t * Mellanox OFED 4.4-1.0.0.0.\n+\t */\n+\tif (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"IPv6 inside a GRE tunnel is\"\n+\t\t\t\t\t  \" not recognised.\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_ipv6_mask;\n+\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t(const uint8_t *)&nic_mask,\n+\t\t\t\t\tsizeof(struct rte_flow_item_ipv6),\n+\t\t\t\t\terror);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate UDP item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_udp(const struct rte_flow_item *item,\n+\t\t\t    uint64_t item_flags,\n+\t\t\t    uint8_t target_protocol,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_udp *mask = item->mask;\n+\tconst int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint ret;\n+\n+\tif (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_UDP)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"protocol filtering not compatible\"\n+\t\t\t\t\t  \" with UDP layer\");\n+\tif (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3)))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n+\tif (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L4))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L4 layer is already present\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_udp_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_udp_mask,\n+\t\t sizeof(struct rte_flow_item_udp), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate TCP item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,\n+\t\t\t    uint64_t item_flags,\n+\t\t\t    uint8_t target_protocol,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_tcp *mask = item->mask;\n+\tconst int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint ret;\n+\n+\tif (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_TCP)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"protocol filtering not compatible\"\n+\t\t\t\t\t  \" with TCP layer\");\n+\tif (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3)))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 is mandatory to filter on L4\");\n+\tif (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :\n+\t\t\t\t   MLX5_FLOW_LAYER_OUTER_L4))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L4 layer is already present\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_tcp_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_tcp_mask,\n+\t\t sizeof(struct rte_flow_item_tcp), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate VXLAN item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,\n+\t\t\t      uint64_t item_flags,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_vxlan *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan *mask = item->mask;\n+\tint ret;\n+\tunion vni {\n+\t\tuint32_t vlan_id;\n+\t\tuint8_t vni[4];\n+\t} id = { .vlan_id = 0, };\n+\tuint32_t vlan_id = 0;\n+\n+\n+\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"a tunnel is already present\");\n+\t/*\n+\t * Verify only UDPv4 is present as defined in\n+\t * https://tools.ietf.org/html/rfc7348\n+\t */\n+\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"no outer UDP layer found\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vxlan_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_vxlan_mask,\n+\t\t sizeof(struct rte_flow_item_vxlan),\n+\t\t error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tif (spec) {\n+\t\tmemcpy(&id.vni[1], spec->vni, 3);\n+\t\tvlan_id = id.vlan_id;\n+\t\tmemcpy(&id.vni[1], mask->vni, 3);\n+\t\tvlan_id &= id.vlan_id;\n+\t}\n+\t/*\n+\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if\n+\t * only this layer is defined in the Verbs specification it is\n+\t * interpreted as wildcard and all packets will match this\n+\t * rule, if it follows a full stack layer (ex: eth / ipv4 /\n+\t * udp), all packets matching the layers before will also\n+\t * match this rule.  To avoid such situation, VNI 0 is\n+\t * currently refused.\n+\t */\n+\tif (!vlan_id)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"VXLAN vni cannot be 0\");\n+\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"VXLAN tunnel must be fully defined\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate VXLAN_GPE item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[in] priv\n+ *   Pointer to the private data structure.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,\n+\t\t\t\t  uint64_t item_flags,\n+\t\t\t\t  struct rte_eth_dev *dev,\n+\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_item_vxlan_gpe *spec = item->spec;\n+\tconst struct rte_flow_item_vxlan_gpe *mask = item->mask;\n+\tint ret;\n+\tunion vni {\n+\t\tuint32_t vlan_id;\n+\t\tuint8_t vni[4];\n+\t} id = { .vlan_id = 0, };\n+\tuint32_t vlan_id = 0;\n+\n+\tif (!priv->config.l3_vxlan_en)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 VXLAN is not enabled by device\"\n+\t\t\t\t\t  \" parameter and/or not configured in\"\n+\t\t\t\t\t  \" firmware\");\n+\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"a tunnel is already present\");\n+\t/*\n+\t * Verify only UDPv4 is present as defined in\n+\t * https://tools.ietf.org/html/rfc7348\n+\t */\n+\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"no outer UDP layer found\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_vxlan_gpe_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,\n+\t\t sizeof(struct rte_flow_item_vxlan_gpe),\n+\t\t error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tif (spec) {\n+\t\tif (spec->protocol)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"VxLAN-GPE protocol\"\n+\t\t\t\t\t\t  \" not supported\");\n+\t\tmemcpy(&id.vni[1], spec->vni, 3);\n+\t\tvlan_id = id.vlan_id;\n+\t\tmemcpy(&id.vni[1], mask->vni, 3);\n+\t\tvlan_id &= id.vlan_id;\n+\t}\n+\t/*\n+\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n+\t * layer is defined in the Verbs specification it is interpreted as\n+\t * wildcard and all packets will match this rule, if it follows a full\n+\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n+\t * before will also match this rule.  To avoid such situation, VNI 0\n+\t * is currently refused.\n+\t */\n+\tif (!vlan_id)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"VXLAN-GPE vni cannot be 0\");\n+\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"VXLAN-GPE tunnel must be fully\"\n+\t\t\t\t\t  \" defined\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate GRE item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit flags to mark detected items.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_gre(const struct rte_flow_item *item,\n+\t\t\t    uint64_t item_flags,\n+\t\t\t    uint8_t target_protocol,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_gre *spec __rte_unused = item->spec;\n+\tconst struct rte_flow_item_gre *mask = item->mask;\n+\tint ret;\n+\n+\tif (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_GRE)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"protocol filtering not compatible\"\n+\t\t\t\t\t  \" with this GRE layer\");\n+\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"a tunnel is already present\");\n+\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"L3 Layer is missing\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_gre_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_gre_mask,\n+\t\t sizeof(struct rte_flow_item_gre), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT\n+\tif (spec && (spec->protocol & mask->protocol))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"without MPLS support the\"\n+\t\t\t\t\t  \" specification cannot be used for\"\n+\t\t\t\t\t  \" filtering\");\n+#endif\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate MPLS item.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[in] target_protocol\n+ *   The next protocol in the previous item.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,\n+\t\t\t     uint64_t item_flags __rte_unused,\n+\t\t\t     uint8_t target_protocol __rte_unused,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n+\tconst struct rte_flow_item_mpls *mask = item->mask;\n+\tint ret;\n+\n+\tif (target_protocol != 0xff && target_protocol != MLX5_IP_PROTOCOL_MPLS)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"protocol filtering not compatible\"\n+\t\t\t\t\t  \" with MPLS layer\");\n+\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"a tunnel is already\"\n+\t\t\t\t\t  \" present\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_mpls_mask;\n+\tret = mlx5_flow_item_acceptable\n+\t\t(item, (const uint8_t *)mask,\n+\t\t (const uint8_t *)&rte_flow_item_mpls_mask,\n+\t\t sizeof(struct rte_flow_item_mpls), error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t  \"MPLS is not supported by Verbs, please\"\n+\t\t\t\t  \" update.\");\n+}\n+\n+/**\n+ *\n+ * Internal validation function.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] attr\n+ *   Pointer to the flow attributes.\n+ * @param[in] items\n+ *   Pointer to the list of items.\n+ * @param[in] actions\n+ *   Pointer to the list of actions.\n+ * @param[out] error\n+ *   Pointer to the error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_ernno is set.\n+ */\n+static int mlx5_flow_verbs_validate(struct rte_eth_dev *dev,\n+\t\t\t\t    const struct rte_flow_attr *attr,\n+\t\t\t\t    const struct rte_flow_item items[],\n+\t\t\t\t    const struct rte_flow_action actions[],\n+\t\t\t\t    struct rte_flow_error *error)\n+{\n+\tint ret;\n+\tuint32_t action_flags = 0;\n+\tuint32_t item_flags = 0;\n+\tint tunnel = 0;\n+\tuint8_t next_protocol = 0xff;\n+\n+\tif (items == NULL)\n+\t\treturn -1;\n+\tret = mlx5_flow_validate_attributes(dev, attr, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n+\t\tint ret = 0;\n+\t\tswitch (items->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\tret = mlx5_flow_validate_item_eth(items, item_flags,\n+\t\t\t\t\t\t\t  error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_L2;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\t\tret = mlx5_flow_validate_item_vlan(items, item_flags,\n+\t\t\t\t\t\t\t   error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_VLAN;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tret = mlx5_flow_validate_item_ipv4(items, item_flags,\n+\t\t\t\t\t\t\t   error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\t\t\tif (items->mask != NULL &&\n+\t\t\t    ((const struct rte_flow_item_ipv4 *)\n+\t\t\t     items->mask)->hdr.next_proto_id)\n+\t\t\t\tnext_protocol =\n+\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t\t (items->spec))->hdr.next_proto_id;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tret = mlx5_flow_validate_item_ipv6(items, item_flags,\n+\t\t\t\t\t\t\t   error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\t\t\tif (items->mask != NULL &&\n+\t\t\t    ((const struct rte_flow_item_ipv6 *)\n+\t\t\t     items->mask)->hdr.proto)\n+\t\t\t\tnext_protocol =\n+\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t\t items->spec)->hdr.proto;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tret = mlx5_flow_validate_item_udp(items, item_flags,\n+\t\t\t\t\t\t\t  next_protocol,\n+\t\t\t\t\t\t\t  error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\tret = mlx5_flow_validate_item_tcp(items, item_flags,\n+\t\t\t\t\t\t\t  next_protocol, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\t\t       MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tret = mlx5_flow_validate_item_vxlan(items, item_flags,\n+\t\t\t\t\t\t\t    error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\t\tret = mlx5_flow_validate_item_vxlan_gpe(items,\n+\t\t\t\t\t\t\t\titem_flags,\n+\t\t\t\t\t\t\t\tdev, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\t\tret = mlx5_flow_validate_item_gre(items, item_flags,\n+\t\t\t\t\t\t\t  next_protocol, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GRE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n+\t\t\tret = mlx5_flow_validate_item_mpls(items, item_flags,\n+\t\t\t\t\t\t\t   next_protocol,\n+\t\t\t\t\t\t\t   error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\tif (next_protocol != 0xff &&\n+\t\t\t    next_protocol != MLX5_IP_PROTOCOL_MPLS)\n+\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t(error, ENOTSUP,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, items,\n+\t\t\t\t\t \"protocol filtering not compatible\"\n+\t\t\t\t\t \" with MPLS layer\");\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_MPLS;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t  \"item not supported\");\n+\t\t}\n+\t}\n+\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n+\t\ttunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\t\tswitch (actions->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_FLAG:\n+\t\t\tret = mlx5_flow_validate_action_flag(action_flags,\n+\t\t\t\t\t\t\t     error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_FLAG;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n+\t\t\tret = mlx5_flow_validate_action_mark(actions,\n+\t\t\t\t\t\t\t     action_flags,\n+\t\t\t\t\t\t\t     error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_MARK;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\t\tret = mlx5_flow_validate_action_drop(action_flags,\n+\t\t\t\t\t\t\t     error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_DROP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tret = mlx5_flow_validate_action_queue(actions,\n+\t\t\t\t\t\t\t      action_flags, dev,\n+\t\t\t\t\t\t\t      error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_QUEUE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\tret = mlx5_flow_validate_action_rss(actions,\n+\t\t\t\t\t\t\t    action_flags, dev,\n+\t\t\t\t\t\t\t    error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_RSS;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\t\tret = mlx5_flow_validate_action_count(dev, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_ACTION_COUNT;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"action not supported\");\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate a flow supported by the NIC.\n+ *\n+ * @see rte_flow_validate()\n+ * @see rte_flow_ops\n+ */\n+int\n+mlx5_flow_validate(struct rte_eth_dev *dev,\n+\t\t   const struct rte_flow_attr *attr,\n+\t\t   const struct rte_flow_item items[],\n+\t\t   const struct rte_flow_action actions[],\n+\t\t   struct rte_flow_error *error)\n+{\n+\tint ret;\n+\n+\tret =  mlx5_flow_verbs_validate(dev, attr, items, actions, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Remove the flow.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ */\n+static void\n+mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_verbs *verbs;\n+\n+\tif (flow->nl_flow && priv->mnl_socket)\n+\t\tmlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tif (verbs->flow) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_flow(verbs->flow));\n+\t\t\tverbs->flow = NULL;\n+\t\t}\n+\t\tif (verbs->hrxq) {\n+\t\t\tif (flow->fate & MLX5_FLOW_FATE_DROP)\n+\t\t\t\tmlx5_hrxq_drop_release(dev);\n+\t\t\telse\n+\t\t\t\tmlx5_hrxq_release(dev, verbs->hrxq);\n+\t\t\tverbs->hrxq = NULL;\n+\t\t}\n+\t}\n+\tif (flow->counter) {\n+\t\tmlx5_flow_counter_release(flow->counter);\n+\t\tflow->counter = NULL;\n+\t}\n+}\n+\n+/**\n+ * Apply the flow.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t\tstruct rte_flow_error *error)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_verbs *verbs;\n+\tint err;\n+\n+\tLIST_FOREACH(verbs, &flow->verbs, next) {\n+\t\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n+\t\t\tverbs->hrxq = mlx5_hrxq_drop_new(dev);\n+\t\t\tif (!verbs->hrxq) {\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error, errno,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t NULL,\n+\t\t\t\t\t \"cannot get drop hash queue\");\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tstruct mlx5_hrxq *hrxq;\n+\n+\t\t\thrxq = mlx5_hrxq_get(dev, flow->key,\n+\t\t\t\t\t     MLX5_RSS_HASH_KEY_LEN,\n+\t\t\t\t\t     verbs->hash_fields,\n \t\t\t\t\t     (*flow->queue),\n \t\t\t\t\t     flow->rss.queue_num);\n \t\t\tif (!hrxq)\n@@ -3025,6 +3597,9 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,\n \tsize_t size = 0;\n \tint ret;\n \n+\tret = mlx5_flow_validate(dev, attr, items, actions, error);\n+\tif (ret < 0)\n+\t\treturn NULL;\n \tret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);\n \tif (ret < 0)\n \t\treturn NULL;\n@@ -3233,7 +3808,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\n \t\t},\n \t\t{\n \t\t\t.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :\n-\t\t\t\tRTE_FLOW_ITEM_TYPE_END,\n+\t\t\t\t\t      RTE_FLOW_ITEM_TYPE_END,\n \t\t\t.spec = vlan_spec,\n \t\t\t.last = NULL,\n \t\t\t.mask = vlan_mask,\n",
    "prefixes": [
        "v3",
        "01/11"
    ]
}