get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42795/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42795,
    "url": "http://patches.dpdk.org/api/patches/42795/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/40c457fa16871ead7eac9f6d594f8ecf7fbbc6b6.1531293415.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<40c457fa16871ead7eac9f6d594f8ecf7fbbc6b6.1531293415.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/40c457fa16871ead7eac9f6d594f8ecf7fbbc6b6.1531293415.git.nelio.laranjeiro@6wind.com",
    "date": "2018-07-11T07:22:34",
    "name": "[v3,01/21] net/mlx5: remove flow support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "02f50898fa59495a124e920945b4b42524553607",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/40c457fa16871ead7eac9f6d594f8ecf7fbbc6b6.1531293415.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 512,
            "url": "http://patches.dpdk.org/api/series/512/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=512",
            "date": "2018-07-11T07:22:33",
            "name": "net/mlx5: flow rework",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/512/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42795/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42795/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7FE441B4B5;\n\tWed, 11 Jul 2018 09:23:22 +0200 (CEST)",
            "from mail-wr1-f42.google.com (mail-wr1-f42.google.com\n\t[209.85.221.42]) by dpdk.org (Postfix) with ESMTP id 8BEAA1B466\n\tfor <dev@dpdk.org>; Wed, 11 Jul 2018 09:23:19 +0200 (CEST)",
            "by mail-wr1-f42.google.com with SMTP id b15-v6so17029680wrv.10\n\tfor <dev@dpdk.org>; Wed, 11 Jul 2018 00:23:19 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\tt10-v6sm31314212wre.95.2018.07.11.00.23.16\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tWed, 11 Jul 2018 00:23:17 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=ax8P3r+VBpEqhnaCODZO+9MA5wBs5COlrbAOevoOZu4=;\n\tb=Z5K55JgGOfohighcwiwOpV0ZAz2ghsIo9Zrlk7SoCPKlrndIf6loLHDB3vrLt93SaQ\n\tGwMHFEWPjkZUUYbKYLZNgsmPdhVkh1pSgEGn/8Sh5nCNBQhPnlf3+xMJHHFJyN9030fl\n\t/elGgvBD7mUF6kV82CGN9g1Hl65QLBFx8ULlPNHohkqVNBmAvi3NFqFU8Q4Z02DFsWEC\n\tyJzYmVFpBZ/Ys3J4JS9yrfwCR9VcWgz7ip6DYwXm19aKX8+KAda1fqv/XfKR5RZMfQ/Q\n\tfW1RIykhZAzLbrJnHuAWYSXauC5Z3p+lVsAKDWA1WJrNDTclkl7HkOpUUmczLw7wOAuO\n\tWElA==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=ax8P3r+VBpEqhnaCODZO+9MA5wBs5COlrbAOevoOZu4=;\n\tb=VCS2M86wS/73Emb4j8bSg20c07IkmF5kTlCpSlq/4LW2w3kyeSa4KoR6+0g3+PHRiw\n\tOpzlcNlXggn3EX5UIfYuEgBpbrgTN+Ax3NE+1Vmx+R5bladI5HU97RaEzvdZyIiwjfZQ\n\tGMlLObIIzzJMRV5+GKo3P2jkkfXg1mDh2m6CGo/b+noFpsdIWNx9m4bf5nvEHMftACyy\n\tFzWmq47qeevQNEAezCW2HL9ci0zzWVRynZ0p16T7TzpkBmGZIpoaT39O65+jgBlf3sf7\n\t2xe5BR0yNLtgdo8TqTyhBY08XwS77KXQAu8bkbFkbflv8LMa3V5ji9pU5oZ/xOAN7M1s\n\ttwBQ==",
        "X-Gm-Message-State": "AOUpUlGOtffbkBGhtcgTc6RrPVaJGWgGbC9T2/RcFlavZbNV9/HjlQVM\n\tRddl0zqkZ8Ef3hJdloLpcSodLX9ZAg==",
        "X-Google-Smtp-Source": "AAOMgpc23QNbV2Ecv5VqmqjEowyr033waoN4m5gSyF02CuPrmYszWkGZCsJge5Lef9xeuao9eiDAlg==",
        "X-Received": "by 2002:a5d:4b50:: with SMTP id\n\tw16-v6mr1183751wrs.87.1531293797884; \n\tWed, 11 Jul 2018 00:23:17 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Date": "Wed, 11 Jul 2018 09:22:34 +0200",
        "Message-Id": "<40c457fa16871ead7eac9f6d594f8ecf7fbbc6b6.1531293415.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1530111623.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531293415.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v3 01/21] net/mlx5: remove flow support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This start a series to re-work the flow engine in mlx5 to easily support\nflow conversion to Verbs or TC.  This is necessary to handle both regular\nflows and representors flows.\n\nAs the full file needs to be clean-up to re-write all items/actions\nprocessing, this patch starts to disable the regular code and only let the\nPMD to start in isolated mode.\n\nAfter this patch flow API will not be usable.\n\nSigned-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 3095 +---------------------------------\n drivers/net/mlx5/mlx5_rxtx.h |    1 -\n 2 files changed, 80 insertions(+), 3016 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 45207d70e..a45cb06e1 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -31,2406 +31,49 @@\n #include \"mlx5_prm.h\"\n #include \"mlx5_glue.h\"\n \n-/* Flow priority for control plane flows. */\n-#define MLX5_CTRL_FLOW_PRIORITY 1\n-\n-/* Internet Protocol versions. */\n-#define MLX5_IPV4 4\n-#define MLX5_IPV6 6\n-#define MLX5_GRE 47\n-\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n-struct ibv_flow_spec_counter_action {\n-\tint dummy;\n-};\n-#endif\n-\n-/* Dev ops structure defined in mlx5.c */\n-extern const struct eth_dev_ops mlx5_dev_ops;\n-extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n-\n-/** Structure give to the conversion functions. */\n-struct mlx5_flow_data {\n-\tstruct rte_eth_dev *dev; /** Ethernet device. */\n-\tstruct mlx5_flow_parse *parser; /** Parser context. */\n-\tstruct rte_flow_error *error; /** Error context. */\n-};\n-\n-static int\n-mlx5_flow_create_eth(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_vlan(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_ipv4(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_ipv6(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_udp(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_tcp(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n-\t\t       const void *default_mask,\n-\t\t       struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,\n-\t\t\t   const void *default_mask,\n-\t\t\t   struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_gre(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data);\n-\n-static int\n-mlx5_flow_create_mpls(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data);\n-\n-struct mlx5_flow_parse;\n-\n-static void\n-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,\n-\t\t      unsigned int size);\n-\n-static int\n-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);\n-\n-static int\n-mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser);\n-\n-/* Hash RX queue types. */\n-enum hash_rxq_type {\n-\tHASH_RXQ_TCPV4,\n-\tHASH_RXQ_UDPV4,\n-\tHASH_RXQ_IPV4,\n-\tHASH_RXQ_TCPV6,\n-\tHASH_RXQ_UDPV6,\n-\tHASH_RXQ_IPV6,\n-\tHASH_RXQ_ETH,\n-\tHASH_RXQ_TUNNEL,\n-};\n-\n-/* Initialization data for hash RX queue. */\n-struct hash_rxq_init {\n-\tuint64_t hash_fields; /* Fields that participate in the hash. */\n-\tuint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */\n-\tunsigned int flow_priority; /* Flow priority to use. */\n-\tunsigned int ip_version; /* Internet protocol. */\n-};\n-\n-/* Initialization data for hash RX queues. */\n-const struct hash_rxq_init hash_rxq_init[] = {\n-\t[HASH_RXQ_TCPV4] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV4 |\n-\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n-\t\t\t\tIBV_RX_HASH_DST_PORT_TCP),\n-\t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,\n-\t\t.flow_priority = 0,\n-\t\t.ip_version = MLX5_IPV4,\n-\t},\n-\t[HASH_RXQ_UDPV4] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV4 |\n-\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n-\t\t\t\tIBV_RX_HASH_DST_PORT_UDP),\n-\t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,\n-\t\t.flow_priority = 0,\n-\t\t.ip_version = MLX5_IPV4,\n-\t},\n-\t[HASH_RXQ_IPV4] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV4),\n-\t\t.dpdk_rss_hf = (ETH_RSS_IPV4 |\n-\t\t\t\tETH_RSS_FRAG_IPV4),\n-\t\t.flow_priority = 1,\n-\t\t.ip_version = MLX5_IPV4,\n-\t},\n-\t[HASH_RXQ_TCPV6] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV6 |\n-\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n-\t\t\t\tIBV_RX_HASH_DST_PORT_TCP),\n-\t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,\n-\t\t.flow_priority = 0,\n-\t\t.ip_version = MLX5_IPV6,\n-\t},\n-\t[HASH_RXQ_UDPV6] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV6 |\n-\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n-\t\t\t\tIBV_RX_HASH_DST_PORT_UDP),\n-\t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,\n-\t\t.flow_priority = 0,\n-\t\t.ip_version = MLX5_IPV6,\n-\t},\n-\t[HASH_RXQ_IPV6] = {\n-\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_RX_HASH_DST_IPV6),\n-\t\t.dpdk_rss_hf = (ETH_RSS_IPV6 |\n-\t\t\t\tETH_RSS_FRAG_IPV6),\n-\t\t.flow_priority = 1,\n-\t\t.ip_version = MLX5_IPV6,\n-\t},\n-\t[HASH_RXQ_ETH] = {\n-\t\t.hash_fields = 0,\n-\t\t.dpdk_rss_hf = 0,\n-\t\t.flow_priority = 2,\n-\t},\n-};\n-\n-/* Number of entries in hash_rxq_init[]. */\n-const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);\n-\n-/** Structure for holding counter stats. */\n-struct mlx5_flow_counter_stats {\n-\tuint64_t hits; /**< Number of packets matched by the rule. */\n-\tuint64_t bytes; /**< Number of bytes matched by the rule. */\n-};\n-\n-/** Structure for Drop queue. */\n-struct mlx5_hrxq_drop {\n-\tstruct ibv_rwq_ind_table *ind_table; /**< Indirection table. */\n-\tstruct ibv_qp *qp; /**< Verbs queue pair. */\n-\tstruct ibv_wq *wq; /**< Verbs work queue. */\n-\tstruct ibv_cq *cq; /**< Verbs completion queue. */\n-};\n-\n-/* Flows structures. */\n-struct mlx5_flow {\n-\tuint64_t hash_fields; /**< Fields that participate in the hash. */\n-\tstruct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */\n-\tstruct ibv_flow *ibv_flow; /**< Verbs flow. */\n-\tstruct mlx5_hrxq *hrxq; /**< Hash Rx queues. */\n-};\n-\n-/* Drop flows structures. */\n-struct mlx5_flow_drop {\n-\tstruct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */\n-\tstruct ibv_flow *ibv_flow; /**< Verbs flow. */\n-};\n-\n-struct rte_flow {\n-\tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n-\tuint32_t mark:1; /**< Set if the flow is marked. */\n-\tuint32_t drop:1; /**< Drop queue. */\n-\tstruct rte_flow_action_rss rss_conf; /**< RSS configuration */\n-\tuint16_t (*queues)[]; /**< Queues indexes to use. */\n-\tuint8_t rss_key[40]; /**< copy of the RSS key. */\n-\tuint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */\n-\tstruct ibv_counter_set *cs; /**< Holds the counters for the rule. */\n-\tstruct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */\n-\tstruct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];\n-\t/**< Flow with Rx queue. */\n-};\n-\n-/** Static initializer for items. */\n-#define ITEMS(...) \\\n-\t(const enum rte_flow_item_type []){ \\\n-\t\t__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \\\n-\t}\n-\n-#define IS_TUNNEL(type) ( \\\n-\t(type) == RTE_FLOW_ITEM_TYPE_VXLAN || \\\n-\t(type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \\\n-\t(type) == RTE_FLOW_ITEM_TYPE_GRE || \\\n-\t(type) == RTE_FLOW_ITEM_TYPE_MPLS)\n-\n-const uint32_t flow_ptype[] = {\n-\t[RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,\n-\t[RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,\n-\t[RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,\n-\t[RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,\n-};\n-\n-#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)\n-\n-const uint32_t ptype_ext[] = {\n-\t[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |\n-\t\t\t\t\t      RTE_PTYPE_L4_UDP,\n-\t[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)]\t= RTE_PTYPE_TUNNEL_VXLAN_GPE |\n-\t\t\t\t\t\t  RTE_PTYPE_L4_UDP,\n-\t[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,\n-\t[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] =\n-\t\tRTE_PTYPE_TUNNEL_MPLS_IN_GRE,\n-\t[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] =\n-\t\tRTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,\n-};\n-\n-/** Structure to generate a simple graph of layers supported by the NIC. */\n-struct mlx5_flow_items {\n-\t/** List of possible actions for these items. */\n-\tconst enum rte_flow_action_type *const actions;\n-\t/** Bit-masks corresponding to the possibilities for the item. */\n-\tconst void *mask;\n-\t/**\n-\t * Default bit-masks to use when item->mask is not provided. When\n-\t * \\default_mask is also NULL, the full supported bit-mask (\\mask) is\n-\t * used instead.\n-\t */\n-\tconst void *default_mask;\n-\t/** Bit-masks size in bytes. */\n-\tconst unsigned int mask_sz;\n-\t/**\n-\t * Conversion function from rte_flow to NIC specific flow.\n-\t *\n-\t * @param item\n-\t *   rte_flow item to convert.\n-\t * @param default_mask\n-\t *   Default bit-masks to use when item->mask is not provided.\n-\t * @param data\n-\t *   Internal structure to store the conversion.\n-\t *\n-\t * @return\n-\t *   0 on success, a negative errno value otherwise and rte_errno is\n-\t *   set.\n-\t */\n-\tint (*convert)(const struct rte_flow_item *item,\n-\t\t       const void *default_mask,\n-\t\t       struct mlx5_flow_data *data);\n-\t/** Size in bytes of the destination structure. */\n-\tconst unsigned int dst_sz;\n-\t/** List of possible following items.  */\n-\tconst enum rte_flow_item_type *const items;\n-};\n-\n-/** Valid action for this PMD. */\n-static const enum rte_flow_action_type valid_actions[] = {\n-\tRTE_FLOW_ACTION_TYPE_DROP,\n-\tRTE_FLOW_ACTION_TYPE_QUEUE,\n-\tRTE_FLOW_ACTION_TYPE_MARK,\n-\tRTE_FLOW_ACTION_TYPE_FLAG,\n-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n-\tRTE_FLOW_ACTION_TYPE_COUNT,\n-#endif\n-\tRTE_FLOW_ACTION_TYPE_END,\n-};\n-\n-/** Graph of supported items and associated actions. */\n-static const struct mlx5_flow_items mlx5_flow_items[] = {\n-\t[RTE_FLOW_ITEM_TYPE_END] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_VXLAN,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_GRE),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_ETH] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_eth){\n-\t\t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n-\t\t\t.type = -1,\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_eth_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_eth),\n-\t\t.convert = mlx5_flow_create_eth,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_eth),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_VLAN] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_vlan){\n-\t\t\t.tci = -1,\n-\t\t\t.inner_type = -1,\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_vlan_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_vlan),\n-\t\t.convert = mlx5_flow_create_vlan,\n-\t\t.dst_sz = 0,\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_IPV4] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_TCP,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_GRE),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_ipv4){\n-\t\t\t.hdr = {\n-\t\t\t\t.src_addr = -1,\n-\t\t\t\t.dst_addr = -1,\n-\t\t\t\t.type_of_service = -1,\n-\t\t\t\t.next_proto_id = -1,\n-\t\t\t},\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_ipv4_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_ipv4),\n-\t\t.convert = mlx5_flow_create_ipv4,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_IPV6] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_TCP,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_GRE),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_ipv6){\n-\t\t\t.hdr = {\n-\t\t\t\t.src_addr = {\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t},\n-\t\t\t\t.dst_addr = {\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t\t0xff, 0xff, 0xff, 0xff,\n-\t\t\t\t},\n-\t\t\t\t.vtc_flow = -1,\n-\t\t\t\t.proto = -1,\n-\t\t\t\t.hop_limits = -1,\n-\t\t\t},\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_ipv6_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_ipv6),\n-\t\t.convert = mlx5_flow_create_ipv6,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_ipv6),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_UDP] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_MPLS),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_udp){\n-\t\t\t.hdr = {\n-\t\t\t\t.src_port = -1,\n-\t\t\t\t.dst_port = -1,\n-\t\t\t},\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_udp_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_udp),\n-\t\t.convert = mlx5_flow_create_udp,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_TCP] = {\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_tcp){\n-\t\t\t.hdr = {\n-\t\t\t\t.src_port = -1,\n-\t\t\t\t.dst_port = -1,\n-\t\t\t},\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_tcp_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_tcp),\n-\t\t.convert = mlx5_flow_create_tcp,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_GRE] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_MPLS),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_gre){\n-\t\t\t.protocol = -1,\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_gre_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_gre),\n-\t\t.convert = mlx5_flow_create_gre,\n-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_gre),\n-#else\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_tunnel),\n-#endif\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_MPLS] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_mpls){\n-\t\t\t.label_tc_s = \"\\xff\\xff\\xf0\",\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_mpls_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_mpls),\n-\t\t.convert = mlx5_flow_create_mpls,\n-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_mpls),\n-#endif\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_VXLAN] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_vxlan){\n-\t\t\t.vni = \"\\xff\\xff\\xff\",\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_vxlan_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_vxlan),\n-\t\t.convert = mlx5_flow_create_vxlan,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_tunnel),\n-\t},\n-\t[RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {\n-\t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV4,\n-\t\t\t       RTE_FLOW_ITEM_TYPE_IPV6),\n-\t\t.actions = valid_actions,\n-\t\t.mask = &(const struct rte_flow_item_vxlan_gpe){\n-\t\t\t.vni = \"\\xff\\xff\\xff\",\n-\t\t},\n-\t\t.default_mask = &rte_flow_item_vxlan_gpe_mask,\n-\t\t.mask_sz = sizeof(struct rte_flow_item_vxlan_gpe),\n-\t\t.convert = mlx5_flow_create_vxlan_gpe,\n-\t\t.dst_sz = sizeof(struct ibv_flow_spec_tunnel),\n-\t},\n-};\n-\n-/** Structure to pass to the conversion function. */\n-struct mlx5_flow_parse {\n-\tuint32_t inner; /**< Verbs value, set once tunnel is encountered. */\n-\tuint32_t create:1;\n-\t/**< Whether resources should remain after a validate. */\n-\tuint32_t drop:1; /**< Target is a drop queue. */\n-\tuint32_t mark:1; /**< Mark is present in the flow. */\n-\tuint32_t count:1; /**< Count is present in the flow. */\n-\tuint32_t mark_id; /**< Mark identifier. */\n-\tstruct rte_flow_action_rss rss_conf; /**< RSS configuration */\n-\tuint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */\n-\tuint8_t rss_key[40]; /**< copy of the RSS key. */\n-\tenum hash_rxq_type layer; /**< Last pattern layer detected. */\n-\tenum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */\n-\tuint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */\n-\tstruct ibv_counter_set *cs; /**< Holds the counter set for the rule */\n-\tstruct {\n-\t\tstruct ibv_flow_attr *ibv_attr;\n-\t\t/**< Pointer to Verbs attributes. */\n-\t\tunsigned int offset;\n-\t\t/**< Current position or total size of the attribute. */\n-\t\tuint64_t hash_fields; /**< Verbs hash fields. */\n-\t} queue[RTE_DIM(hash_rxq_init)];\n-};\n-\n-static const struct rte_flow_ops mlx5_flow_ops = {\n-\t.validate = mlx5_flow_validate,\n-\t.create = mlx5_flow_create,\n-\t.destroy = mlx5_flow_destroy,\n-\t.flush = mlx5_flow_flush,\n-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n-\t.query = mlx5_flow_query,\n-#else\n-\t.query = NULL,\n-#endif\n-\t.isolate = mlx5_flow_isolate,\n-};\n-\n-/* Convert FDIR request to Generic flow. */\n-struct mlx5_fdir {\n-\tstruct rte_flow_attr attr;\n-\tstruct rte_flow_action actions[2];\n-\tstruct rte_flow_item items[4];\n-\tstruct rte_flow_item_eth l2;\n-\tstruct rte_flow_item_eth l2_mask;\n-\tunion {\n-\t\tstruct rte_flow_item_ipv4 ipv4;\n-\t\tstruct rte_flow_item_ipv6 ipv6;\n-\t} l3;\n-\tunion {\n-\t\tstruct rte_flow_item_ipv4 ipv4;\n-\t\tstruct rte_flow_item_ipv6 ipv6;\n-\t} l3_mask;\n-\tunion {\n-\t\tstruct rte_flow_item_udp udp;\n-\t\tstruct rte_flow_item_tcp tcp;\n-\t} l4;\n-\tunion {\n-\t\tstruct rte_flow_item_udp udp;\n-\t\tstruct rte_flow_item_tcp tcp;\n-\t} l4_mask;\n-\tstruct rte_flow_action_queue queue;\n-};\n-\n-/* Verbs specification header. */\n-struct ibv_spec_header {\n-\tenum ibv_flow_spec_type type;\n-\tuint16_t size;\n-};\n-\n-/**\n- * Check item is fully supported by the NIC matching capability.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param mask[in]\n- *   Bit-masks covering supported fields to compare with spec, last and mask in\n- *   \\item.\n- * @param size\n- *   Bit-Mask size in bytes.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_item_validate(const struct rte_flow_item *item,\n-\t\t\tconst uint8_t *mask, unsigned int size)\n-{\n-\tunsigned int i;\n-\tconst uint8_t *spec = item->spec;\n-\tconst uint8_t *last = item->last;\n-\tconst uint8_t *m = item->mask ? item->mask : mask;\n-\n-\tif (!spec && (item->mask || last))\n-\t\tgoto error;\n-\tif (!spec)\n-\t\treturn 0;\n-\t/*\n-\t * Single-pass check to make sure that:\n-\t * - item->mask is supported, no bits are set outside mask.\n-\t * - Both masked item->spec and item->last are equal (no range\n-\t *   supported).\n-\t */\n-\tfor (i = 0; i < size; i++) {\n-\t\tif (!m[i])\n-\t\t\tcontinue;\n-\t\tif ((m[i] | mask[i]) != mask[i])\n-\t\t\tgoto error;\n-\t\tif (last && ((spec[i] & m[i]) != (last[i] & m[i])))\n-\t\t\tgoto error;\n-\t}\n-\treturn 0;\n-error:\n-\trte_errno = ENOTSUP;\n-\treturn -rte_errno;\n-}\n-\n-/**\n- * Extract attribute to the parser.\n- *\n- * @param[in] attr\n- *   Flow rule attributes.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_convert_attributes(const struct rte_flow_attr *attr,\n-\t\t\t     struct rte_flow_error *error)\n-{\n-\tif (attr->group) {\n-\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"groups are not supported\");\n-\t\treturn -rte_errno;\n-\t}\n-\tif (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {\n-\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"priorities are not supported\");\n-\t\treturn -rte_errno;\n-\t}\n-\tif (attr->egress) {\n-\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"egress is not supported\");\n-\t\treturn -rte_errno;\n-\t}\n-\tif (attr->transfer) {\n-\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"transfer is not supported\");\n-\t\treturn -rte_errno;\n-\t}\n-\tif (!attr->ingress) {\n-\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"only ingress is supported\");\n-\t\treturn -rte_errno;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * Extract actions request to the parser.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param[in] actions\n- *   Associated actions (list terminated by the END action).\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- * @param[in, out] parser\n- *   Internal parser structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_convert_actions(struct rte_eth_dev *dev,\n-\t\t\t  const struct rte_flow_action actions[],\n-\t\t\t  struct rte_flow_error *error,\n-\t\t\t  struct mlx5_flow_parse *parser)\n-{\n-\tenum { FATE = 1, MARK = 2, COUNT = 4, };\n-\tuint32_t overlap = 0;\n-\tstruct priv *priv = dev->data->dev_private;\n-\n-\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {\n-\t\tif (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {\n-\t\t\tcontinue;\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {\n-\t\t\tif (overlap & FATE)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= FATE;\n-\t\t\tparser->drop = 1;\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n-\t\t\tconst struct rte_flow_action_queue *queue =\n-\t\t\t\t(const struct rte_flow_action_queue *)\n-\t\t\t\tactions->conf;\n-\n-\t\t\tif (overlap & FATE)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= FATE;\n-\t\t\tif (!queue || (queue->index > (priv->rxqs_n - 1)))\n-\t\t\t\tgoto exit_action_not_supported;\n-\t\t\tparser->queues[0] = queue->index;\n-\t\t\tparser->rss_conf = (struct rte_flow_action_rss){\n-\t\t\t\t.queue_num = 1,\n-\t\t\t\t.queue = parser->queues,\n-\t\t\t};\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {\n-\t\t\tconst struct rte_flow_action_rss *rss =\n-\t\t\t\t(const struct rte_flow_action_rss *)\n-\t\t\t\tactions->conf;\n-\t\t\tconst uint8_t *rss_key;\n-\t\t\tuint32_t rss_key_len;\n-\t\t\tuint16_t n;\n-\n-\t\t\tif (overlap & FATE)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= FATE;\n-\t\t\tif (rss->func &&\n-\t\t\t    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"the only supported RSS hash\"\n-\t\t\t\t\t\t   \" function is Toeplitz\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\t\t\tif (parser->rss_conf.level > 1) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"a nonzero RSS encapsulation\"\n-\t\t\t\t\t\t   \" level is not supported\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-#endif\n-\t\t\tif (parser->rss_conf.level > 2) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"RSS encapsulation level\"\n-\t\t\t\t\t\t   \" > 1 is not supported\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (rss->types & MLX5_RSS_HF_MASK) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"unsupported RSS type\"\n-\t\t\t\t\t\t   \" requested\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (rss->key_len) {\n-\t\t\t\trss_key_len = rss->key_len;\n-\t\t\t\trss_key = rss->key;\n-\t\t\t} else {\n-\t\t\t\trss_key_len = rss_hash_default_key_len;\n-\t\t\t\trss_key = rss_hash_default_key;\n-\t\t\t}\n-\t\t\tif (rss_key_len != RTE_DIM(parser->rss_key)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"RSS hash key must be\"\n-\t\t\t\t\t\t   \" exactly 40 bytes long\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (!rss->queue_num) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"no valid queues\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (rss->queue_num > RTE_DIM(parser->queues)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"too many queues for RSS\"\n-\t\t\t\t\t\t   \" context\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tfor (n = 0; n < rss->queue_num; ++n) {\n-\t\t\t\tif (rss->queue[n] >= priv->rxqs_n) {\n-\t\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"queue id > number of\"\n-\t\t\t\t\t\t   \" queues\");\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tparser->rss_conf = (struct rte_flow_action_rss){\n-\t\t\t\t.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n-\t\t\t\t.level = rss->level ? rss->level : 1,\n-\t\t\t\t.types = rss->types,\n-\t\t\t\t.key_len = rss_key_len,\n-\t\t\t\t.queue_num = rss->queue_num,\n-\t\t\t\t.key = memcpy(parser->rss_key, rss_key,\n-\t\t\t\t\t      sizeof(*rss_key) * rss_key_len),\n-\t\t\t\t.queue = memcpy(parser->queues, rss->queue,\n-\t\t\t\t\t\tsizeof(*rss->queue) *\n-\t\t\t\t\t\trss->queue_num),\n-\t\t\t};\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {\n-\t\t\tconst struct rte_flow_action_mark *mark =\n-\t\t\t\t(const struct rte_flow_action_mark *)\n-\t\t\t\tactions->conf;\n-\n-\t\t\tif (overlap & MARK)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= MARK;\n-\t\t\tif (!mark) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"mark must be defined\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t} else if (mark->id >= MLX5_FLOW_MARK_MAX) {\n-\t\t\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t   actions,\n-\t\t\t\t\t\t   \"mark must be between 0\"\n-\t\t\t\t\t\t   \" and 16777199\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tparser->mark = 1;\n-\t\t\tparser->mark_id = mark->id;\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {\n-\t\t\tif (overlap & MARK)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= MARK;\n-\t\t\tparser->mark = 1;\n-\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&\n-\t\t\t   priv->config.flow_counter_en) {\n-\t\t\tif (overlap & COUNT)\n-\t\t\t\tgoto exit_action_overlap;\n-\t\t\toverlap |= COUNT;\n-\t\t\tparser->count = 1;\n-\t\t} else {\n-\t\t\tgoto exit_action_not_supported;\n-\t\t}\n-\t}\n-\t/* When fate is unknown, drop traffic. */\n-\tif (!(overlap & FATE))\n-\t\tparser->drop = 1;\n-\tif (parser->drop && parser->mark)\n-\t\tparser->mark = 0;\n-\tif (!parser->rss_conf.queue_num && !parser->drop) {\n-\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t   NULL, \"no valid action\");\n-\t\treturn -rte_errno;\n-\t}\n-\treturn 0;\n-exit_action_not_supported:\n-\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t   actions, \"action not supported\");\n-\treturn -rte_errno;\n-exit_action_overlap:\n-\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t   actions, \"overlapping actions are not supported\");\n-\treturn -rte_errno;\n-}\n-\n-/**\n- * Validate items.\n- *\n- * @param[in] items\n- *   Pattern specification (list terminated by the END pattern item).\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- * @param[in, out] parser\n- *   Internal parser structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,\n-\t\t\t\t const struct rte_flow_item items[],\n-\t\t\t\t struct rte_flow_error *error,\n-\t\t\t\t struct mlx5_flow_parse *parser)\n-{\n-\tstruct priv *priv = dev->data->dev_private;\n-\tconst struct mlx5_flow_items *cur_item = mlx5_flow_items;\n-\tunsigned int i;\n-\tunsigned int last_voids = 0;\n-\tint ret = 0;\n-\n-\t/* Initialise the offsets to start after verbs attribute. */\n-\tfor (i = 0; i != hash_rxq_init_n; ++i)\n-\t\tparser->queue[i].offset = sizeof(struct ibv_flow_attr);\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {\n-\t\tconst struct mlx5_flow_items *token = NULL;\n-\t\tunsigned int n;\n-\n-\t\tif (items->type == RTE_FLOW_ITEM_TYPE_VOID) {\n-\t\t\tlast_voids++;\n-\t\t\tcontinue;\n-\t\t}\n-\t\tfor (i = 0;\n-\t\t     cur_item->items &&\n-\t\t     cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;\n-\t\t     ++i) {\n-\t\t\tif (cur_item->items[i] == items->type) {\n-\t\t\t\ttoken = &mlx5_flow_items[items->type];\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (!token) {\n-\t\t\tret = -ENOTSUP;\n-\t\t\tgoto exit_item_not_supported;\n-\t\t}\n-\t\tcur_item = token;\n-\t\tret = mlx5_flow_item_validate(items,\n-\t\t\t\t\t      (const uint8_t *)cur_item->mask,\n-\t\t\t\t\t      cur_item->mask_sz);\n-\t\tif (ret)\n-\t\t\tgoto exit_item_not_supported;\n-\t\tif (IS_TUNNEL(items->type)) {\n-\t\t\tif (parser->tunnel &&\n-\t\t\t    !((items - last_voids - 1)->type ==\n-\t\t\t      RTE_FLOW_ITEM_TYPE_GRE && items->type ==\n-\t\t\t      RTE_FLOW_ITEM_TYPE_MPLS)) {\n-\t\t\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   items,\n-\t\t\t\t\t\t   \"Cannot recognize multiple\"\n-\t\t\t\t\t\t   \" tunnel encapsulations.\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (items->type == RTE_FLOW_ITEM_TYPE_MPLS &&\n-\t\t\t    !priv->config.mpls_en) {\n-\t\t\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   items,\n-\t\t\t\t\t\t   \"MPLS not supported or\"\n-\t\t\t\t\t\t   \" disabled in firmware\"\n-\t\t\t\t\t\t   \" configuration.\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (!priv->config.tunnel_en &&\n-\t\t\t    parser->rss_conf.level > 1) {\n-\t\t\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\titems,\n-\t\t\t\t\t\"RSS on tunnel is not supported\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tparser->inner = IBV_FLOW_SPEC_INNER;\n-\t\t\tparser->tunnel = flow_ptype[items->type];\n-\t\t}\n-\t\tif (parser->drop) {\n-\t\t\tparser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;\n-\t\t} else {\n-\t\t\tfor (n = 0; n != hash_rxq_init_n; ++n)\n-\t\t\t\tparser->queue[n].offset += cur_item->dst_sz;\n-\t\t}\n-\t\tlast_voids = 0;\n-\t}\n-\tif (parser->drop) {\n-\t\tparser->queue[HASH_RXQ_ETH].offset +=\n-\t\t\tsizeof(struct ibv_flow_spec_action_drop);\n-\t}\n-\tif (parser->mark) {\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i)\n-\t\t\tparser->queue[i].offset +=\n-\t\t\t\tsizeof(struct ibv_flow_spec_action_tag);\n-\t}\n-\tif (parser->count) {\n-\t\tunsigned int size = sizeof(struct ibv_flow_spec_counter_action);\n-\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i)\n-\t\t\tparser->queue[i].offset += size;\n-\t}\n-\treturn 0;\n-exit_item_not_supported:\n-\treturn rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t  items, \"item not supported\");\n-}\n-\n-/**\n- * Allocate memory space to store verbs flow attributes.\n- *\n- * @param[in] size\n- *   Amount of byte to allocate.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- *\n- * @return\n- *   A verbs flow attribute on success, NULL otherwise and rte_errno is set.\n- */\n-static struct ibv_flow_attr *\n-mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error)\n-{\n-\tstruct ibv_flow_attr *ibv_attr;\n-\n-\tibv_attr = rte_calloc(__func__, 1, size, 0);\n-\tif (!ibv_attr) {\n-\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"cannot allocate verbs spec attributes\");\n-\t\treturn NULL;\n-\t}\n-\treturn ibv_attr;\n-}\n-\n-/**\n- * Make inner packet matching with an higher priority from the non Inner\n- * matching.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param[in, out] parser\n- *   Internal parser structure.\n- * @param attr\n- *   User flow attribute.\n- */\n-static void\n-mlx5_flow_update_priority(struct rte_eth_dev *dev,\n-\t\t\t  struct mlx5_flow_parse *parser,\n-\t\t\t  const struct rte_flow_attr *attr)\n-{\n-\tstruct priv *priv = dev->data->dev_private;\n-\tunsigned int i;\n-\tuint16_t priority;\n-\n-\t/*\t\t\t8 priorities\t>= 16 priorities\n-\t * Control flow:\t4-7\t\t8-15\n-\t * User normal flow:\t1-3\t\t4-7\n-\t * User tunnel flow:\t0-2\t\t0-3\n-\t */\n-\tpriority = attr->priority * MLX5_VERBS_FLOW_PRIO_8;\n-\tif (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)\n-\t\tpriority /= 2;\n-\t/*\n-\t * Lower non-tunnel flow Verbs priority 1 if only support 8 Verbs\n-\t * priorities, lower 4 otherwise.\n-\t */\n-\tif (!parser->inner) {\n-\t\tif (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)\n-\t\t\tpriority += 1;\n-\t\telse\n-\t\t\tpriority += MLX5_VERBS_FLOW_PRIO_8 / 2;\n-\t}\n-\tif (parser->drop) {\n-\t\tparser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority +\n-\t\t\t\thash_rxq_init[HASH_RXQ_ETH].flow_priority;\n-\t\treturn;\n-\t}\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\tcontinue;\n-\t\tparser->queue[i].ibv_attr->priority = priority +\n-\t\t\t\thash_rxq_init[i].flow_priority;\n-\t}\n-}\n-\n-/**\n- * Finalise verbs flow attributes.\n- *\n- * @param[in, out] parser\n- *   Internal parser structure.\n- */\n-static void\n-mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)\n-{\n-\tunsigned int i;\n-\tuint32_t inner = parser->inner;\n-\n-\t/* Don't create extra flows for outer RSS. */\n-\tif (parser->tunnel && parser->rss_conf.level < 2)\n-\t\treturn;\n-\t/*\n-\t * Fill missing layers in verbs specifications, or compute the correct\n-\t * offset to allocate the memory space for the attributes and\n-\t * specifications.\n-\t */\n-\tfor (i = 0; i != hash_rxq_init_n - 1; ++i) {\n-\t\tunion {\n-\t\t\tstruct ibv_flow_spec_ipv4_ext ipv4;\n-\t\t\tstruct ibv_flow_spec_ipv6 ipv6;\n-\t\t\tstruct ibv_flow_spec_tcp_udp udp_tcp;\n-\t\t\tstruct ibv_flow_spec_eth eth;\n-\t\t} specs;\n-\t\tvoid *dst;\n-\t\tuint16_t size;\n-\n-\t\tif (i == parser->layer)\n-\t\t\tcontinue;\n-\t\tif (parser->layer == HASH_RXQ_ETH ||\n-\t\t    parser->layer == HASH_RXQ_TUNNEL) {\n-\t\t\tif (hash_rxq_init[i].ip_version == MLX5_IPV4) {\n-\t\t\t\tsize = sizeof(struct ibv_flow_spec_ipv4_ext);\n-\t\t\t\tspecs.ipv4 = (struct ibv_flow_spec_ipv4_ext){\n-\t\t\t\t\t.type = inner | IBV_FLOW_SPEC_IPV4_EXT,\n-\t\t\t\t\t.size = size,\n-\t\t\t\t};\n-\t\t\t} else {\n-\t\t\t\tsize = sizeof(struct ibv_flow_spec_ipv6);\n-\t\t\t\tspecs.ipv6 = (struct ibv_flow_spec_ipv6){\n-\t\t\t\t\t.type = inner | IBV_FLOW_SPEC_IPV6,\n-\t\t\t\t\t.size = size,\n-\t\t\t\t};\n-\t\t\t}\n-\t\t\tif (parser->queue[i].ibv_attr) {\n-\t\t\t\tdst = (void *)((uintptr_t)\n-\t\t\t\t\t       parser->queue[i].ibv_attr +\n-\t\t\t\t\t       parser->queue[i].offset);\n-\t\t\t\tmemcpy(dst, &specs, size);\n-\t\t\t\t++parser->queue[i].ibv_attr->num_of_specs;\n-\t\t\t}\n-\t\t\tparser->queue[i].offset += size;\n-\t\t}\n-\t\tif ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||\n-\t\t    (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {\n-\t\t\tsize = sizeof(struct ibv_flow_spec_tcp_udp);\n-\t\t\tspecs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {\n-\t\t\t\t.type = inner | ((i == HASH_RXQ_UDPV4 ||\n-\t\t\t\t\t  i == HASH_RXQ_UDPV6) ?\n-\t\t\t\t\t IBV_FLOW_SPEC_UDP :\n-\t\t\t\t\t IBV_FLOW_SPEC_TCP),\n-\t\t\t\t.size = size,\n-\t\t\t};\n-\t\t\tif (parser->queue[i].ibv_attr) {\n-\t\t\t\tdst = (void *)((uintptr_t)\n-\t\t\t\t\t       parser->queue[i].ibv_attr +\n-\t\t\t\t\t       parser->queue[i].offset);\n-\t\t\t\tmemcpy(dst, &specs, size);\n-\t\t\t\t++parser->queue[i].ibv_attr->num_of_specs;\n-\t\t\t}\n-\t\t\tparser->queue[i].offset += size;\n-\t\t}\n-\t}\n-}\n-\n-/**\n- * Update flows according to pattern and RSS hash fields.\n- *\n- * @param[in, out] parser\n- *   Internal parser structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_convert_rss(struct mlx5_flow_parse *parser)\n-{\n-\tunsigned int i;\n-\tenum hash_rxq_type start;\n-\tenum hash_rxq_type layer;\n-\tint outer = parser->tunnel && parser->rss_conf.level < 2;\n-\tuint64_t rss = parser->rss_conf.types;\n-\n-\tlayer = outer ? parser->out_layer : parser->layer;\n-\tif (layer == HASH_RXQ_TUNNEL)\n-\t\tlayer = HASH_RXQ_ETH;\n-\tif (outer) {\n-\t\t/* Only one hash type for outer RSS. */\n-\t\tif (rss && layer == HASH_RXQ_ETH) {\n-\t\t\tstart = HASH_RXQ_TCPV4;\n-\t\t} else if (rss && layer != HASH_RXQ_ETH &&\n-\t\t\t   !(rss & hash_rxq_init[layer].dpdk_rss_hf)) {\n-\t\t\t/* If RSS not match L4 pattern, try L3 RSS. */\n-\t\t\tif (layer < HASH_RXQ_IPV4)\n-\t\t\t\tlayer = HASH_RXQ_IPV4;\n-\t\t\telse if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6)\n-\t\t\t\tlayer = HASH_RXQ_IPV6;\n-\t\t\tstart = layer;\n-\t\t} else {\n-\t\t\tstart = layer;\n-\t\t}\n-\t\t/* Scan first valid hash type. */\n-\t\tfor (i = start; rss && i <= layer; ++i) {\n-\t\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\t\tcontinue;\n-\t\t\tif (hash_rxq_init[i].dpdk_rss_hf & rss)\n-\t\t\t\tbreak;\n-\t\t}\n-\t\tif (rss && i <= layer)\n-\t\t\tparser->queue[layer].hash_fields =\n-\t\t\t\t\thash_rxq_init[i].hash_fields;\n-\t\t/* Trim unused hash types. */\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (parser->queue[i].ibv_attr && i != layer) {\n-\t\t\t\trte_free(parser->queue[i].ibv_attr);\n-\t\t\t\tparser->queue[i].ibv_attr = NULL;\n-\t\t\t}\n-\t\t}\n-\t} else {\n-\t\t/* Expand for inner or normal RSS. */\n-\t\tif (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4))\n-\t\t\tstart = HASH_RXQ_TCPV4;\n-\t\telse if (rss && layer == HASH_RXQ_IPV6)\n-\t\t\tstart = HASH_RXQ_TCPV6;\n-\t\telse\n-\t\t\tstart = layer;\n-\t\t/* For L4 pattern, try L3 RSS if no L4 RSS. */\n-\t\t/* Trim unused hash types. */\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\t\tcontinue;\n-\t\t\tif (i < start || i > layer) {\n-\t\t\t\trte_free(parser->queue[i].ibv_attr);\n-\t\t\t\tparser->queue[i].ibv_attr = NULL;\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\tif (!rss)\n-\t\t\t\tcontinue;\n-\t\t\tif (hash_rxq_init[i].dpdk_rss_hf & rss) {\n-\t\t\t\tparser->queue[i].hash_fields =\n-\t\t\t\t\t\thash_rxq_init[i].hash_fields;\n-\t\t\t} else if (i != layer) {\n-\t\t\t\t/* Remove unused RSS expansion. */\n-\t\t\t\trte_free(parser->queue[i].ibv_attr);\n-\t\t\t\tparser->queue[i].ibv_attr = NULL;\n-\t\t\t} else if (layer < HASH_RXQ_IPV4 &&\n-\t\t\t\t   (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf &\n-\t\t\t\t    rss)) {\n-\t\t\t\t/* Allow IPv4 RSS on L4 pattern. */\n-\t\t\t\tparser->queue[i].hash_fields =\n-\t\t\t\t\thash_rxq_init[HASH_RXQ_IPV4]\n-\t\t\t\t\t\t.hash_fields;\n-\t\t\t} else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 &&\n-\t\t\t\t   (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf &\n-\t\t\t\t    rss)) {\n-\t\t\t\t/* Allow IPv4 RSS on L4 pattern. */\n-\t\t\t\tparser->queue[i].hash_fields =\n-\t\t\t\t\thash_rxq_init[HASH_RXQ_IPV6]\n-\t\t\t\t\t\t.hash_fields;\n-\t\t\t}\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * Validate and convert a flow supported by the NIC.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param[in] attr\n- *   Flow rule attributes.\n- * @param[in] pattern\n- *   Pattern specification (list terminated by the END pattern item).\n- * @param[in] actions\n- *   Associated actions (list terminated by the END action).\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- * @param[in, out] parser\n- *   Internal parser structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_convert(struct rte_eth_dev *dev,\n-\t\t  const struct rte_flow_attr *attr,\n-\t\t  const struct rte_flow_item items[],\n-\t\t  const struct rte_flow_action actions[],\n-\t\t  struct rte_flow_error *error,\n-\t\t  struct mlx5_flow_parse *parser)\n-{\n-\tconst struct mlx5_flow_items *cur_item = mlx5_flow_items;\n-\tunsigned int i;\n-\tint ret;\n-\n-\t/* First step. Validate the attributes, items and actions. */\n-\t*parser = (struct mlx5_flow_parse){\n-\t\t.create = parser->create,\n-\t\t.layer = HASH_RXQ_ETH,\n-\t\t.mark_id = MLX5_FLOW_MARK_DEFAULT,\n-\t};\n-\tret = mlx5_flow_convert_attributes(attr, error);\n-\tif (ret)\n-\t\treturn ret;\n-\tret = mlx5_flow_convert_actions(dev, actions, error, parser);\n-\tif (ret)\n-\t\treturn ret;\n-\tret = mlx5_flow_convert_items_validate(dev, items, error, parser);\n-\tif (ret)\n-\t\treturn ret;\n-\tmlx5_flow_convert_finalise(parser);\n-\t/*\n-\t * Second step.\n-\t * Allocate the memory space to store verbs specifications.\n-\t */\n-\tif (parser->drop) {\n-\t\tunsigned int offset = parser->queue[HASH_RXQ_ETH].offset;\n-\n-\t\tparser->queue[HASH_RXQ_ETH].ibv_attr =\n-\t\t\tmlx5_flow_convert_allocate(offset, error);\n-\t\tif (!parser->queue[HASH_RXQ_ETH].ibv_attr)\n-\t\t\tgoto exit_enomem;\n-\t\tparser->queue[HASH_RXQ_ETH].offset =\n-\t\t\tsizeof(struct ibv_flow_attr);\n-\t} else {\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tunsigned int offset;\n-\n-\t\t\toffset = parser->queue[i].offset;\n-\t\t\tparser->queue[i].ibv_attr =\n-\t\t\t\tmlx5_flow_convert_allocate(offset, error);\n-\t\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\t\tgoto exit_enomem;\n-\t\t\tparser->queue[i].offset = sizeof(struct ibv_flow_attr);\n-\t\t}\n-\t}\n-\t/* Third step. Conversion parse, fill the specifications. */\n-\tparser->inner = 0;\n-\tparser->tunnel = 0;\n-\tparser->layer = HASH_RXQ_ETH;\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {\n-\t\tstruct mlx5_flow_data data = {\n-\t\t\t.dev = dev,\n-\t\t\t.parser = parser,\n-\t\t\t.error = error,\n-\t\t};\n-\n-\t\tif (items->type == RTE_FLOW_ITEM_TYPE_VOID)\n-\t\t\tcontinue;\n-\t\tcur_item = &mlx5_flow_items[items->type];\n-\t\tret = cur_item->convert(items,\n-\t\t\t\t\t(cur_item->default_mask ?\n-\t\t\t\t\t cur_item->default_mask :\n-\t\t\t\t\t cur_item->mask),\n-\t\t\t\t\t &data);\n-\t\tif (ret)\n-\t\t\tgoto exit_free;\n-\t}\n-\tif (!parser->drop) {\n-\t\t/* RSS check, remove unused hash types. */\n-\t\tret = mlx5_flow_convert_rss(parser);\n-\t\tif (ret)\n-\t\t\tgoto exit_free;\n-\t\t/* Complete missing specification. */\n-\t\tmlx5_flow_convert_finalise(parser);\n-\t}\n-\tmlx5_flow_update_priority(dev, parser, attr);\n-\tif (parser->mark)\n-\t\tmlx5_flow_create_flag_mark(parser, parser->mark_id);\n-\tif (parser->count && parser->create) {\n-\t\tmlx5_flow_create_count(dev, parser);\n-\t\tif (!parser->cs)\n-\t\t\tgoto exit_count_error;\n-\t}\n-exit_free:\n-\t/* Only verification is expected, all resources should be released. */\n-\tif (!parser->create) {\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (parser->queue[i].ibv_attr) {\n-\t\t\t\trte_free(parser->queue[i].ibv_attr);\n-\t\t\t\tparser->queue[i].ibv_attr = NULL;\n-\t\t\t}\n-\t\t}\n-\t}\n-\treturn ret;\n-exit_enomem:\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (parser->queue[i].ibv_attr) {\n-\t\t\trte_free(parser->queue[i].ibv_attr);\n-\t\t\tparser->queue[i].ibv_attr = NULL;\n-\t\t}\n-\t}\n-\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t   NULL, \"cannot allocate verbs spec attributes\");\n-\treturn -rte_errno;\n-exit_count_error:\n-\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t   NULL, \"cannot create counter\");\n-\treturn -rte_errno;\n-}\n-\n-/**\n- * Copy the specification created into the flow.\n- *\n- * @param parser\n- *   Internal parser structure.\n- * @param src\n- *   Create specification.\n- * @param size\n- *   Size in bytes of the specification to copy.\n- */\n-static void\n-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,\n-\t\t      unsigned int size)\n-{\n-\tunsigned int i;\n-\tvoid *dst;\n-\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\tcontinue;\n-\t\tdst = (void *)((uintptr_t)parser->queue[i].ibv_attr +\n-\t\t\t\tparser->queue[i].offset);\n-\t\tmemcpy(dst, src, size);\n-\t\t++parser->queue[i].ibv_attr->num_of_specs;\n-\t\tparser->queue[i].offset += size;\n-\t}\n-}\n-\n-/**\n- * Convert Ethernet item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_eth(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data)\n-{\n-\tconst struct rte_flow_item_eth *spec = item->spec;\n-\tconst struct rte_flow_item_eth *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tconst unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);\n-\tstruct ibv_flow_spec_eth eth = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_ETH,\n-\t\t.size = eth_size,\n-\t};\n-\n-\tparser->layer = HASH_RXQ_ETH;\n-\tif (spec) {\n-\t\tunsigned int i;\n-\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tmemcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);\n-\t\tmemcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);\n-\t\teth.val.ether_type = spec->type;\n-\t\tmemcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);\n-\t\tmemcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);\n-\t\teth.mask.ether_type = mask->type;\n-\t\t/* Remove unwanted bits from values. */\n-\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i) {\n-\t\t\teth.val.dst_mac[i] &= eth.mask.dst_mac[i];\n-\t\t\teth.val.src_mac[i] &= eth.mask.src_mac[i];\n-\t\t}\n-\t\teth.val.ether_type &= eth.mask.ether_type;\n-\t}\n-\tmlx5_flow_create_copy(parser, &eth, eth_size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert VLAN item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_vlan(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data)\n-{\n-\tconst struct rte_flow_item_vlan *spec = item->spec;\n-\tconst struct rte_flow_item_vlan *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tstruct ibv_flow_spec_eth *eth;\n-\tconst unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);\n-\tconst char *msg = \"VLAN cannot be empty\";\n-\n-\tif (spec) {\n-\t\tunsigned int i;\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\t\tcontinue;\n-\n-\t\t\teth = (void *)((uintptr_t)parser->queue[i].ibv_attr +\n-\t\t\t\t       parser->queue[i].offset - eth_size);\n-\t\t\teth->val.vlan_tag = spec->tci;\n-\t\t\teth->mask.vlan_tag = mask->tci;\n-\t\t\teth->val.vlan_tag &= eth->mask.vlan_tag;\n-\t\t\t/*\n-\t\t\t * From verbs perspective an empty VLAN is equivalent\n-\t\t\t * to a packet without VLAN layer.\n-\t\t\t */\n-\t\t\tif (!eth->mask.vlan_tag)\n-\t\t\t\tgoto error;\n-\t\t\t/* Outer TPID cannot be matched. */\n-\t\t\tif (eth->mask.ether_type) {\n-\t\t\t\tmsg = \"VLAN TPID matching is not supported\";\n-\t\t\t\tgoto error;\n-\t\t\t}\n-\t\t\teth->val.ether_type = spec->inner_type;\n-\t\t\teth->mask.ether_type = mask->inner_type;\n-\t\t\teth->val.ether_type &= eth->mask.ether_type;\n-\t\t}\n-\t\treturn 0;\n-\t}\n-error:\n-\treturn rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t  item, msg);\n-}\n-\n-/**\n- * Convert IPv4 item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_ipv4(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data)\n-{\n-\tstruct priv *priv = data->dev->data->dev_private;\n-\tconst struct rte_flow_item_ipv4 *spec = item->spec;\n-\tconst struct rte_flow_item_ipv4 *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);\n-\tstruct ibv_flow_spec_ipv4_ext ipv4 = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,\n-\t\t.size = ipv4_size,\n-\t};\n-\n-\tif (parser->layer == HASH_RXQ_TUNNEL &&\n-\t    parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&\n-\t    !priv->config.l3_vxlan_en)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 VXLAN not enabled by device\"\n-\t\t\t\t\t  \" parameter and/or not configured\"\n-\t\t\t\t\t  \" in firmware\");\n-\tparser->layer = HASH_RXQ_IPV4;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tipv4.val = (struct ibv_flow_ipv4_ext_filter){\n-\t\t\t.src_ip = spec->hdr.src_addr,\n-\t\t\t.dst_ip = spec->hdr.dst_addr,\n-\t\t\t.proto = spec->hdr.next_proto_id,\n-\t\t\t.tos = spec->hdr.type_of_service,\n-\t\t};\n-\t\tipv4.mask = (struct ibv_flow_ipv4_ext_filter){\n-\t\t\t.src_ip = mask->hdr.src_addr,\n-\t\t\t.dst_ip = mask->hdr.dst_addr,\n-\t\t\t.proto = mask->hdr.next_proto_id,\n-\t\t\t.tos = mask->hdr.type_of_service,\n-\t\t};\n-\t\t/* Remove unwanted bits from values. */\n-\t\tipv4.val.src_ip &= ipv4.mask.src_ip;\n-\t\tipv4.val.dst_ip &= ipv4.mask.dst_ip;\n-\t\tipv4.val.proto &= ipv4.mask.proto;\n-\t\tipv4.val.tos &= ipv4.mask.tos;\n-\t}\n-\tmlx5_flow_create_copy(parser, &ipv4, ipv4_size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert IPv6 item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_ipv6(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data)\n-{\n-\tstruct priv *priv = data->dev->data->dev_private;\n-\tconst struct rte_flow_item_ipv6 *spec = item->spec;\n-\tconst struct rte_flow_item_ipv6 *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);\n-\tstruct ibv_flow_spec_ipv6 ipv6 = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_IPV6,\n-\t\t.size = ipv6_size,\n-\t};\n-\n-\tif (parser->layer == HASH_RXQ_TUNNEL &&\n-\t    parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&\n-\t    !priv->config.l3_vxlan_en)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 VXLAN not enabled by device\"\n-\t\t\t\t\t  \" parameter and/or not configured\"\n-\t\t\t\t\t  \" in firmware\");\n-\tparser->layer = HASH_RXQ_IPV6;\n-\tif (spec) {\n-\t\tunsigned int i;\n-\t\tuint32_t vtc_flow_val;\n-\t\tuint32_t vtc_flow_mask;\n-\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tmemcpy(&ipv6.val.src_ip, spec->hdr.src_addr,\n-\t\t       RTE_DIM(ipv6.val.src_ip));\n-\t\tmemcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,\n-\t\t       RTE_DIM(ipv6.val.dst_ip));\n-\t\tmemcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,\n-\t\t       RTE_DIM(ipv6.mask.src_ip));\n-\t\tmemcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,\n-\t\t       RTE_DIM(ipv6.mask.dst_ip));\n-\t\tvtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);\n-\t\tvtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);\n-\t\tipv6.val.flow_label =\n-\t\t\trte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>\n-\t\t\t\t\t IPV6_HDR_FL_SHIFT);\n-\t\tipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>\n-\t\t\t\t\t IPV6_HDR_TC_SHIFT;\n-\t\tipv6.val.next_hdr = spec->hdr.proto;\n-\t\tipv6.val.hop_limit = spec->hdr.hop_limits;\n-\t\tipv6.mask.flow_label =\n-\t\t\trte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>\n-\t\t\t\t\t IPV6_HDR_FL_SHIFT);\n-\t\tipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>\n-\t\t\t\t\t  IPV6_HDR_TC_SHIFT;\n-\t\tipv6.mask.next_hdr = mask->hdr.proto;\n-\t\tipv6.mask.hop_limit = mask->hdr.hop_limits;\n-\t\t/* Remove unwanted bits from values. */\n-\t\tfor (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {\n-\t\t\tipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];\n-\t\t\tipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];\n-\t\t}\n-\t\tipv6.val.flow_label &= ipv6.mask.flow_label;\n-\t\tipv6.val.traffic_class &= ipv6.mask.traffic_class;\n-\t\tipv6.val.next_hdr &= ipv6.mask.next_hdr;\n-\t\tipv6.val.hop_limit &= ipv6.mask.hop_limit;\n-\t}\n-\tmlx5_flow_create_copy(parser, &ipv6, ipv6_size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert UDP item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_udp(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data)\n-{\n-\tconst struct rte_flow_item_udp *spec = item->spec;\n-\tconst struct rte_flow_item_udp *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);\n-\tstruct ibv_flow_spec_tcp_udp udp = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_UDP,\n-\t\t.size = udp_size,\n-\t};\n-\n-\tif (parser->layer == HASH_RXQ_IPV4)\n-\t\tparser->layer = HASH_RXQ_UDPV4;\n-\telse\n-\t\tparser->layer = HASH_RXQ_UDPV6;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tudp.val.dst_port = spec->hdr.dst_port;\n-\t\tudp.val.src_port = spec->hdr.src_port;\n-\t\tudp.mask.dst_port = mask->hdr.dst_port;\n-\t\tudp.mask.src_port = mask->hdr.src_port;\n-\t\t/* Remove unwanted bits from values. */\n-\t\tudp.val.src_port &= udp.mask.src_port;\n-\t\tudp.val.dst_port &= udp.mask.dst_port;\n-\t}\n-\tmlx5_flow_create_copy(parser, &udp, udp_size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert TCP item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_tcp(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data)\n-{\n-\tconst struct rte_flow_item_tcp *spec = item->spec;\n-\tconst struct rte_flow_item_tcp *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);\n-\tstruct ibv_flow_spec_tcp_udp tcp = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_TCP,\n-\t\t.size = tcp_size,\n-\t};\n-\n-\tif (parser->layer == HASH_RXQ_IPV4)\n-\t\tparser->layer = HASH_RXQ_TCPV4;\n-\telse\n-\t\tparser->layer = HASH_RXQ_TCPV6;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\ttcp.val.dst_port = spec->hdr.dst_port;\n-\t\ttcp.val.src_port = spec->hdr.src_port;\n-\t\ttcp.mask.dst_port = mask->hdr.dst_port;\n-\t\ttcp.mask.src_port = mask->hdr.src_port;\n-\t\t/* Remove unwanted bits from values. */\n-\t\ttcp.val.src_port &= tcp.mask.src_port;\n-\t\ttcp.val.dst_port &= tcp.mask.dst_port;\n-\t}\n-\tmlx5_flow_create_copy(parser, &tcp, tcp_size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert VXLAN item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n-\t\t       const void *default_mask,\n-\t\t       struct mlx5_flow_data *data)\n-{\n-\tconst struct rte_flow_item_vxlan *spec = item->spec;\n-\tconst struct rte_flow_item_vxlan *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n-\tstruct ibv_flow_spec_tunnel vxlan = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,\n-\t\t.size = size,\n-\t};\n-\tunion vni {\n-\t\tuint32_t vlan_id;\n-\t\tuint8_t vni[4];\n-\t} id;\n-\n-\tid.vni[0] = 0;\n-\tparser->inner = IBV_FLOW_SPEC_INNER;\n-\tparser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];\n-\tparser->out_layer = parser->layer;\n-\tparser->layer = HASH_RXQ_TUNNEL;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tmemcpy(&id.vni[1], spec->vni, 3);\n-\t\tvxlan.val.tunnel_id = id.vlan_id;\n-\t\tmemcpy(&id.vni[1], mask->vni, 3);\n-\t\tvxlan.mask.tunnel_id = id.vlan_id;\n-\t\t/* Remove unwanted bits from values. */\n-\t\tvxlan.val.tunnel_id &= vxlan.mask.tunnel_id;\n-\t}\n-\t/*\n-\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n-\t * layer is defined in the Verbs specification it is interpreted as\n-\t * wildcard and all packets will match this rule, if it follows a full\n-\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n-\t * before will also match this rule.\n-\t * To avoid such situation, VNI 0 is currently refused.\n-\t */\n-\t/* Only allow tunnel w/o tunnel id pattern after proper outer spec. */\n-\tif (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VxLAN vni cannot be 0\");\n-\tmlx5_flow_create_copy(parser, &vxlan, size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert VXLAN-GPE item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,\n-\t\t\t   const void *default_mask,\n-\t\t\t   struct mlx5_flow_data *data)\n-{\n-\tstruct priv *priv = data->dev->data->dev_private;\n-\tconst struct rte_flow_item_vxlan_gpe *spec = item->spec;\n-\tconst struct rte_flow_item_vxlan_gpe *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n-\tstruct ibv_flow_spec_tunnel vxlan = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,\n-\t\t.size = size,\n-\t};\n-\tunion vni {\n-\t\tuint32_t vlan_id;\n-\t\tuint8_t vni[4];\n-\t} id;\n-\n-\tif (!priv->config.l3_vxlan_en)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"L3 VXLAN not enabled by device\"\n-\t\t\t\t\t  \" parameter and/or not configured\"\n-\t\t\t\t\t  \" in firmware\");\n-\tid.vni[0] = 0;\n-\tparser->inner = IBV_FLOW_SPEC_INNER;\n-\tparser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)];\n-\tparser->out_layer = parser->layer;\n-\tparser->layer = HASH_RXQ_TUNNEL;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\tmemcpy(&id.vni[1], spec->vni, 3);\n-\t\tvxlan.val.tunnel_id = id.vlan_id;\n-\t\tmemcpy(&id.vni[1], mask->vni, 3);\n-\t\tvxlan.mask.tunnel_id = id.vlan_id;\n-\t\tif (spec->protocol)\n-\t\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t  item,\n-\t\t\t\t\t\t  \"VxLAN-GPE protocol not\"\n-\t\t\t\t\t\t  \" supported\");\n-\t\t/* Remove unwanted bits from values. */\n-\t\tvxlan.val.tunnel_id &= vxlan.mask.tunnel_id;\n-\t}\n-\t/*\n-\t * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this\n-\t * layer is defined in the Verbs specification it is interpreted as\n-\t * wildcard and all packets will match this rule, if it follows a full\n-\t * stack layer (ex: eth / ipv4 / udp), all packets matching the layers\n-\t * before will also match this rule.\n-\t * To avoid such situation, VNI 0 is currently refused.\n-\t */\n-\t/* Only allow tunnel w/o tunnel id pattern after proper outer spec. */\n-\tif (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"VxLAN-GPE vni cannot be 0\");\n-\tmlx5_flow_create_copy(parser, &vxlan, size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert GRE item to Verbs specification.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_gre(const struct rte_flow_item *item,\n-\t\t     const void *default_mask,\n-\t\t     struct mlx5_flow_data *data)\n-{\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT\n-\t(void)default_mask;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n-\tstruct ibv_flow_spec_tunnel tunnel = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,\n-\t\t.size = size,\n-\t};\n-#else\n-\tconst struct rte_flow_item_gre *spec = item->spec;\n-\tconst struct rte_flow_item_gre *mask = item->mask;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_gre);\n-\tstruct ibv_flow_spec_gre tunnel = {\n-\t\t.type = parser->inner | IBV_FLOW_SPEC_GRE,\n-\t\t.size = size,\n-\t};\n-#endif\n-\tstruct ibv_flow_spec_ipv4_ext *ipv4;\n-\tstruct ibv_flow_spec_ipv6 *ipv6;\n-\tunsigned int i;\n-\n-\tparser->inner = IBV_FLOW_SPEC_INNER;\n-\tparser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];\n-\tparser->out_layer = parser->layer;\n-\tparser->layer = HASH_RXQ_TUNNEL;\n-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\ttunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;\n-\t\ttunnel.val.protocol = spec->protocol;\n-\t\ttunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;\n-\t\ttunnel.mask.protocol = mask->protocol;\n-\t\t/* Remove unwanted bits from values. */\n-\t\ttunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;\n-\t\ttunnel.val.protocol &= tunnel.mask.protocol;\n-\t\ttunnel.val.key &= tunnel.mask.key;\n-\t}\n-#endif\n-\t/* Update encapsulation IP layer protocol. */\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\tcontinue;\n-\t\tif (parser->out_layer == HASH_RXQ_IPV4) {\n-\t\t\tipv4 = (void *)((uintptr_t)parser->queue[i].ibv_attr +\n-\t\t\t\tparser->queue[i].offset -\n-\t\t\t\tsizeof(struct ibv_flow_spec_ipv4_ext));\n-\t\t\tif (ipv4->mask.proto && ipv4->val.proto != MLX5_GRE)\n-\t\t\t\tbreak;\n-\t\t\tipv4->val.proto = MLX5_GRE;\n-\t\t\tipv4->mask.proto = 0xff;\n-\t\t} else if (parser->out_layer == HASH_RXQ_IPV6) {\n-\t\t\tipv6 = (void *)((uintptr_t)parser->queue[i].ibv_attr +\n-\t\t\t\tparser->queue[i].offset -\n-\t\t\t\tsizeof(struct ibv_flow_spec_ipv6));\n-\t\t\tif (ipv6->mask.next_hdr &&\n-\t\t\t    ipv6->val.next_hdr != MLX5_GRE)\n-\t\t\t\tbreak;\n-\t\t\tipv6->val.next_hdr = MLX5_GRE;\n-\t\t\tipv6->mask.next_hdr = 0xff;\n-\t\t}\n-\t}\n-\tif (i != hash_rxq_init_n)\n-\t\treturn rte_flow_error_set(data->error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t  item,\n-\t\t\t\t\t  \"IP protocol of GRE must be 47\");\n-\tmlx5_flow_create_copy(parser, &tunnel, size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert MPLS item to Verbs specification.\n- * MPLS tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP.\n- *\n- * @param item[in]\n- *   Item specification.\n- * @param default_mask[in]\n- *   Default bit-masks to use when item->mask is not provided.\n- * @param data[in, out]\n- *   User structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_mpls(const struct rte_flow_item *item,\n-\t\t      const void *default_mask,\n-\t\t      struct mlx5_flow_data *data)\n-{\n-#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT\n-\t(void)default_mask;\n-\treturn rte_flow_error_set(data->error, ENOTSUP,\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t  item,\n-\t\t\t\t  \"MPLS is not supported by driver\");\n-#else\n-\tconst struct rte_flow_item_mpls *spec = item->spec;\n-\tconst struct rte_flow_item_mpls *mask = item->mask;\n-\tstruct mlx5_flow_parse *parser = data->parser;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_mpls);\n-\tstruct ibv_flow_spec_mpls mpls = {\n-\t\t.type = IBV_FLOW_SPEC_MPLS,\n-\t\t.size = size,\n-\t};\n-\n-\tparser->inner = IBV_FLOW_SPEC_INNER;\n-\tif (parser->layer == HASH_RXQ_UDPV4 ||\n-\t    parser->layer == HASH_RXQ_UDPV6) {\n-\t\tparser->tunnel =\n-\t\t\tptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)];\n-\t\tparser->out_layer = parser->layer;\n-\t} else {\n-\t\tparser->tunnel =\n-\t\t\tptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)];\n-\t\t/* parser->out_layer stays as in GRE out_layer. */\n-\t}\n-\tparser->layer = HASH_RXQ_TUNNEL;\n-\tif (spec) {\n-\t\tif (!mask)\n-\t\t\tmask = default_mask;\n-\t\t/*\n-\t\t * The verbs label field includes the entire MPLS header:\n-\t\t * bits 0:19 - label value field.\n-\t\t * bits 20:22 - traffic class field.\n-\t\t * bits 23 - bottom of stack bit.\n-\t\t * bits 24:31 - ttl field.\n-\t\t */\n-\t\tmpls.val.label = *(const uint32_t *)spec;\n-\t\tmpls.mask.label = *(const uint32_t *)mask;\n-\t\t/* Remove unwanted bits from values. */\n-\t\tmpls.val.label &= mpls.mask.label;\n-\t}\n-\tmlx5_flow_create_copy(parser, &mpls, size);\n-\treturn 0;\n-#endif\n-}\n-\n-/**\n- * Convert mark/flag action to Verbs specification.\n- *\n- * @param parser\n- *   Internal parser structure.\n- * @param mark_id\n- *   Mark identifier.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)\n-{\n-\tunsigned int size = sizeof(struct ibv_flow_spec_action_tag);\n-\tstruct ibv_flow_spec_action_tag tag = {\n-\t\t.type = IBV_FLOW_SPEC_ACTION_TAG,\n-\t\t.size = size,\n-\t\t.tag_id = mlx5_flow_mark_set(mark_id),\n-\t};\n-\n-\tassert(parser->mark);\n-\tmlx5_flow_create_copy(parser, &tag, size);\n-\treturn 0;\n-}\n-\n-/**\n- * Convert count action to Verbs specification.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param parser\n- *   Pointer to MLX5 flow parser structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,\n-\t\t       struct mlx5_flow_parse *parser __rte_unused)\n-{\n-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n-\tstruct priv *priv = dev->data->dev_private;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_counter_action);\n-\tstruct ibv_counter_set_init_attr init_attr = {0};\n-\tstruct ibv_flow_spec_counter_action counter = {\n-\t\t.type = IBV_FLOW_SPEC_ACTION_COUNT,\n-\t\t.size = size,\n-\t\t.counter_set_handle = 0,\n-\t};\n-\n-\tinit_attr.counter_set_id = 0;\n-\tparser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);\n-\tif (!parser->cs) {\n-\t\trte_errno = EINVAL;\n-\t\treturn -rte_errno;\n-\t}\n-\tcounter.counter_set_handle = parser->cs->handle;\n-\tmlx5_flow_create_copy(parser, &counter, size);\n-#endif\n-\treturn 0;\n-}\n-\n-/**\n- * Complete flow rule creation with a drop queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param parser\n- *   Internal parser structure.\n- * @param flow\n- *   Pointer to the rte_flow.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,\n-\t\t\t\t   struct mlx5_flow_parse *parser,\n-\t\t\t\t   struct rte_flow *flow,\n-\t\t\t\t   struct rte_flow_error *error)\n-{\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct ibv_flow_spec_action_drop *drop;\n-\tunsigned int size = sizeof(struct ibv_flow_spec_action_drop);\n-\n-\tassert(priv->pd);\n-\tassert(priv->ctx);\n-\tflow->drop = 1;\n-\tdrop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr +\n-\t\t\tparser->queue[HASH_RXQ_ETH].offset);\n-\t*drop = (struct ibv_flow_spec_action_drop){\n-\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n-\t\t\t.size = size,\n-\t};\n-\t++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs;\n-\tparser->queue[HASH_RXQ_ETH].offset += size;\n-\tflow->frxq[HASH_RXQ_ETH].ibv_attr =\n-\t\tparser->queue[HASH_RXQ_ETH].ibv_attr;\n-\tif (parser->count)\n-\t\tflow->cs = parser->cs;\n-\tif (!dev->data->dev_started)\n-\t\treturn 0;\n-\tparser->queue[HASH_RXQ_ETH].ibv_attr = NULL;\n-\tflow->frxq[HASH_RXQ_ETH].ibv_flow =\n-\t\tmlx5_glue->create_flow(priv->flow_drop_queue->qp,\n-\t\t\t\t       flow->frxq[HASH_RXQ_ETH].ibv_attr);\n-\tif (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {\n-\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t   NULL, \"flow rule creation failure\");\n-\t\tgoto error;\n-\t}\n-\treturn 0;\n-error:\n-\tassert(flow);\n-\tif (flow->frxq[HASH_RXQ_ETH].ibv_flow) {\n-\t\tclaim_zero(mlx5_glue->destroy_flow\n-\t\t\t   (flow->frxq[HASH_RXQ_ETH].ibv_flow));\n-\t\tflow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;\n-\t}\n-\tif (flow->frxq[HASH_RXQ_ETH].ibv_attr) {\n-\t\trte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);\n-\t\tflow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;\n-\t}\n-\tif (flow->cs) {\n-\t\tclaim_zero(mlx5_glue->destroy_counter_set(flow->cs));\n-\t\tflow->cs = NULL;\n-\t\tparser->cs = NULL;\n-\t}\n-\treturn -rte_errno;\n-}\n-\n-/**\n- * Create hash Rx queues when RSS is enabled.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param parser\n- *   Internal parser structure.\n- * @param flow\n- *   Pointer to the rte_flow.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,\n-\t\t\t\t  struct mlx5_flow_parse *parser,\n-\t\t\t\t  struct rte_flow *flow,\n-\t\t\t\t  struct rte_flow_error *error)\n-{\n-\tunsigned int i;\n-\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (!parser->queue[i].ibv_attr)\n-\t\t\tcontinue;\n-\t\tflow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;\n-\t\tparser->queue[i].ibv_attr = NULL;\n-\t\tflow->frxq[i].hash_fields = parser->queue[i].hash_fields;\n-\t\tif (!dev->data->dev_started)\n-\t\t\tcontinue;\n-\t\tflow->frxq[i].hrxq =\n-\t\t\tmlx5_hrxq_get(dev,\n-\t\t\t\t      parser->rss_conf.key,\n-\t\t\t\t      parser->rss_conf.key_len,\n-\t\t\t\t      flow->frxq[i].hash_fields,\n-\t\t\t\t      parser->rss_conf.queue,\n-\t\t\t\t      parser->rss_conf.queue_num,\n-\t\t\t\t      parser->tunnel,\n-\t\t\t\t      parser->rss_conf.level);\n-\t\tif (flow->frxq[i].hrxq)\n-\t\t\tcontinue;\n-\t\tflow->frxq[i].hrxq =\n-\t\t\tmlx5_hrxq_new(dev,\n-\t\t\t\t      parser->rss_conf.key,\n-\t\t\t\t      parser->rss_conf.key_len,\n-\t\t\t\t      flow->frxq[i].hash_fields,\n-\t\t\t\t      parser->rss_conf.queue,\n-\t\t\t\t      parser->rss_conf.queue_num,\n-\t\t\t\t      parser->tunnel,\n-\t\t\t\t      parser->rss_conf.level);\n-\t\tif (!flow->frxq[i].hrxq) {\n-\t\t\treturn rte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t\t\t  NULL,\n-\t\t\t\t\t\t  \"cannot create hash rxq\");\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * RXQ update after flow rule creation.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param flow\n- *   Pointer to the flow rule.\n- */\n-static void\n-mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)\n-{\n-\tstruct priv *priv = dev->data->dev_private;\n-\tunsigned int i;\n-\tunsigned int j;\n-\n-\tif (!dev->data->dev_started)\n-\t\treturn;\n-\tfor (i = 0; i != flow->rss_conf.queue_num; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)\n-\t\t\t\t\t\t [(*flow->queues)[i]];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n-\t\tuint8_t tunnel = PTYPE_IDX(flow->tunnel);\n-\n-\t\trxq_data->mark |= flow->mark;\n-\t\tif (!tunnel)\n-\t\t\tcontinue;\n-\t\trxq_ctrl->tunnel_types[tunnel] += 1;\n-\t\t/* Clear tunnel type if more than one tunnel types set. */\n-\t\tfor (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) {\n-\t\t\tif (j == tunnel)\n-\t\t\t\tcontinue;\n-\t\t\tif (rxq_ctrl->tunnel_types[j] > 0) {\n-\t\t\t\trxq_data->tunnel = 0;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (j == RTE_DIM(rxq_ctrl->tunnel_types))\n-\t\t\trxq_data->tunnel = flow->tunnel;\n-\t}\n-}\n-\n-/**\n- * Dump flow hash RX queue detail.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param flow\n- *   Pointer to the rte_flow.\n- * @param hrxq_idx\n- *   Hash RX queue index.\n- */\n-static void\n-mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,\n-\t       struct rte_flow *flow __rte_unused,\n-\t       unsigned int hrxq_idx __rte_unused)\n-{\n-#ifndef NDEBUG\n-\tuintptr_t spec_ptr;\n-\tuint16_t j;\n-\tchar buf[256];\n-\tuint8_t off;\n-\tuint64_t extra_hash_fields = 0;\n+/* Dev ops structure defined in mlx5.c */\n+extern const struct eth_dev_ops mlx5_dev_ops;\n+extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\tif (flow->tunnel && flow->rss_conf.level > 1)\n-\t\textra_hash_fields = (uint32_t)IBV_RX_HASH_INNER;\n-#endif\n-\tspec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1);\n-\tfor (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs;\n-\t     j++) {\n-\t\tstruct ibv_flow_spec *spec = (void *)spec_ptr;\n-\t\toff += sprintf(buf + off, \" %x(%hu)\", spec->hdr.type,\n-\t\t\t       spec->hdr.size);\n-\t\tspec_ptr += spec->hdr.size;\n-\t}\n-\tDRV_LOG(DEBUG,\n-\t\t\"port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p,\"\n-\t\t\" hash:%\" PRIx64 \"/%u specs:%hhu(%hu), priority:%hu, type:%d,\"\n-\t\t\" flags:%x, comp_mask:%x specs:%s\",\n-\t\tdev->data->port_id, (void *)flow, hrxq_idx,\n-\t\t(void *)flow->frxq[hrxq_idx].hrxq,\n-\t\t(void *)flow->frxq[hrxq_idx].hrxq->qp,\n-\t\t(void *)flow->frxq[hrxq_idx].hrxq->ind_table,\n-\t\t(flow->frxq[hrxq_idx].hash_fields | extra_hash_fields),\n-\t\tflow->rss_conf.queue_num,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->num_of_specs,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->size,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->priority,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->type,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->flags,\n-\t\tflow->frxq[hrxq_idx].ibv_attr->comp_mask,\n-\t\tbuf);\n-#endif\n-}\n+struct rte_flow {\n+\tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n+};\n \n-/**\n- * Complete flow rule creation.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param parser\n- *   Internal parser structure.\n- * @param flow\n- *   Pointer to the rte_flow.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_create_action_queue(struct rte_eth_dev *dev,\n-\t\t\t      struct mlx5_flow_parse *parser,\n-\t\t\t      struct rte_flow *flow,\n-\t\t\t      struct rte_flow_error *error)\n-{\n-\tstruct priv *priv __rte_unused = dev->data->dev_private;\n-\tint ret;\n-\tunsigned int i;\n-\tunsigned int flows_n = 0;\n+static const struct rte_flow_ops mlx5_flow_ops = {\n+\t.isolate = mlx5_flow_isolate,\n+};\n \n-\tassert(priv->pd);\n-\tassert(priv->ctx);\n-\tassert(!parser->drop);\n-\tret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);\n-\tif (ret)\n-\t\tgoto error;\n-\tif (parser->count)\n-\t\tflow->cs = parser->cs;\n-\tif (!dev->data->dev_started)\n-\t\treturn 0;\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (!flow->frxq[i].hrxq)\n-\t\t\tcontinue;\n-\t\tflow->frxq[i].ibv_flow =\n-\t\t\tmlx5_glue->create_flow(flow->frxq[i].hrxq->qp,\n-\t\t\t\t\t       flow->frxq[i].ibv_attr);\n-\t\tmlx5_flow_dump(dev, flow, i);\n-\t\tif (!flow->frxq[i].ibv_flow) {\n-\t\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t\t   NULL, \"flow rule creation failure\");\n-\t\t\tgoto error;\n-\t\t}\n-\t\t++flows_n;\n-\t}\n-\tif (!flows_n) {\n-\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t   NULL, \"internal error in flow creation\");\n-\t\tgoto error;\n-\t}\n-\tmlx5_flow_create_update_rxqs(dev, flow);\n-\treturn 0;\n-error:\n-\tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tassert(flow);\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (flow->frxq[i].ibv_flow) {\n-\t\t\tstruct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;\n+/* Convert FDIR request to Generic flow. */\n+struct mlx5_fdir {\n+\tstruct rte_flow_attr attr;\n+\tstruct rte_flow_action actions[2];\n+\tstruct rte_flow_item items[4];\n+\tstruct rte_flow_item_eth l2;\n+\tstruct rte_flow_item_eth l2_mask;\n+\tunion {\n+\t\tstruct rte_flow_item_ipv4 ipv4;\n+\t\tstruct rte_flow_item_ipv6 ipv6;\n+\t} l3;\n+\tunion {\n+\t\tstruct rte_flow_item_ipv4 ipv4;\n+\t\tstruct rte_flow_item_ipv6 ipv6;\n+\t} l3_mask;\n+\tunion {\n+\t\tstruct rte_flow_item_udp udp;\n+\t\tstruct rte_flow_item_tcp tcp;\n+\t} l4;\n+\tunion {\n+\t\tstruct rte_flow_item_udp udp;\n+\t\tstruct rte_flow_item_tcp tcp;\n+\t} l4_mask;\n+\tstruct rte_flow_action_queue queue;\n+};\n \n-\t\t\tclaim_zero(mlx5_glue->destroy_flow(ibv_flow));\n-\t\t}\n-\t\tif (flow->frxq[i].hrxq)\n-\t\t\tmlx5_hrxq_release(dev, flow->frxq[i].hrxq);\n-\t\tif (flow->frxq[i].ibv_attr)\n-\t\t\trte_free(flow->frxq[i].ibv_attr);\n-\t}\n-\tif (flow->cs) {\n-\t\tclaim_zero(mlx5_glue->destroy_counter_set(flow->cs));\n-\t\tflow->cs = NULL;\n-\t\tparser->cs = NULL;\n-\t}\n-\trte_errno = ret; /* Restore rte_errno. */\n-\treturn -rte_errno;\n-}\n+/* Verbs specification header. */\n+struct ibv_spec_header {\n+\tenum ibv_flow_spec_type type;\n+\tuint16_t size;\n+};\n \n /**\n  * Convert a flow.\n@@ -2452,69 +95,17 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,\n  *   A flow on success, NULL otherwise and rte_errno is set.\n  */\n static struct rte_flow *\n-mlx5_flow_list_create(struct rte_eth_dev *dev,\n-\t\t      struct mlx5_flows *list,\n-\t\t      const struct rte_flow_attr *attr,\n-\t\t      const struct rte_flow_item items[],\n-\t\t      const struct rte_flow_action actions[],\n+mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,\n+\t\t      struct mlx5_flows *list __rte_unused,\n+\t\t      const struct rte_flow_attr *attr __rte_unused,\n+\t\t      const struct rte_flow_item items[] __rte_unused,\n+\t\t      const struct rte_flow_action actions[] __rte_unused,\n \t\t      struct rte_flow_error *error)\n {\n-\tstruct mlx5_flow_parse parser = { .create = 1, };\n-\tstruct rte_flow *flow = NULL;\n-\tunsigned int i;\n-\tint ret;\n-\n-\tret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);\n-\tif (ret)\n-\t\tgoto exit;\n-\tflow = rte_calloc(__func__, 1,\n-\t\t\t  sizeof(*flow) +\n-\t\t\t  parser.rss_conf.queue_num * sizeof(uint16_t),\n-\t\t\t  0);\n-\tif (!flow) {\n-\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t   NULL,\n-\t\t\t\t   \"cannot allocate flow memory\");\n-\t\treturn NULL;\n-\t}\n-\t/* Copy configuration. */\n-\tflow->queues = (uint16_t (*)[])(flow + 1);\n-\tflow->tunnel = parser.tunnel;\n-\tflow->rss_conf = (struct rte_flow_action_rss){\n-\t\t.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n-\t\t.level = parser.rss_conf.level,\n-\t\t.types = parser.rss_conf.types,\n-\t\t.key_len = parser.rss_conf.key_len,\n-\t\t.queue_num = parser.rss_conf.queue_num,\n-\t\t.key = memcpy(flow->rss_key, parser.rss_conf.key,\n-\t\t\t      sizeof(*parser.rss_conf.key) *\n-\t\t\t      parser.rss_conf.key_len),\n-\t\t.queue = memcpy(flow->queues, parser.rss_conf.queue,\n-\t\t\t\tsizeof(*parser.rss_conf.queue) *\n-\t\t\t\tparser.rss_conf.queue_num),\n-\t};\n-\tflow->mark = parser.mark;\n-\t/* finalise the flow. */\n-\tif (parser.drop)\n-\t\tret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,\n-\t\t\t\t\t\t\t error);\n-\telse\n-\t\tret = mlx5_flow_create_action_queue(dev, &parser, flow, error);\n-\tif (ret)\n-\t\tgoto exit;\n-\tTAILQ_INSERT_TAIL(list, flow, next);\n-\tDRV_LOG(DEBUG, \"port %u flow created %p\", dev->data->port_id,\n-\t\t(void *)flow);\n-\treturn flow;\n-exit:\n-\tDRV_LOG(ERR, \"port %u flow creation error: %s\", dev->data->port_id,\n-\t\terror->message);\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (parser.queue[i].ibv_attr)\n-\t\t\trte_free(parser.queue[i].ibv_attr);\n-\t}\n-\trte_free(flow);\n+\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t   NULL,\n+\t\t\t   \"action not supported\");\n \treturn NULL;\n }\n \n@@ -2525,15 +116,16 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,\n  * @see rte_flow_ops\n  */\n int\n-mlx5_flow_validate(struct rte_eth_dev *dev,\n-\t\t   const struct rte_flow_attr *attr,\n-\t\t   const struct rte_flow_item items[],\n-\t\t   const struct rte_flow_action actions[],\n+mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,\n+\t\t   const struct rte_flow_attr *attr __rte_unused,\n+\t\t   const struct rte_flow_item items[] __rte_unused,\n+\t\t   const struct rte_flow_action actions[] __rte_unused,\n \t\t   struct rte_flow_error *error)\n {\n-\tstruct mlx5_flow_parse parser = { .create = 0, };\n-\n-\treturn mlx5_flow_convert(dev, attr, items, actions, error, &parser);\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL,\n+\t\t\t\t  \"action not supported\");\n }\n \n /**\n@@ -2543,16 +135,17 @@ mlx5_flow_validate(struct rte_eth_dev *dev,\n  * @see rte_flow_ops\n  */\n struct rte_flow *\n-mlx5_flow_create(struct rte_eth_dev *dev,\n-\t\t const struct rte_flow_attr *attr,\n-\t\t const struct rte_flow_item items[],\n-\t\t const struct rte_flow_action actions[],\n+mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,\n+\t\t const struct rte_flow_attr *attr __rte_unused,\n+\t\t const struct rte_flow_item items[] __rte_unused,\n+\t\t const struct rte_flow_action actions[] __rte_unused,\n \t\t struct rte_flow_error *error)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\n-\treturn mlx5_flow_list_create(dev, &priv->flows, attr, items, actions,\n-\t\t\t\t     error);\n+\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t   NULL,\n+\t\t\t   \"action not supported\");\n+\treturn NULL;\n }\n \n /**\n@@ -2566,99 +159,10 @@ mlx5_flow_create(struct rte_eth_dev *dev,\n  *   Flow to destroy.\n  */\n static void\n-mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,\n-\t\t       struct rte_flow *flow)\n+mlx5_flow_list_destroy(struct rte_eth_dev *dev __rte_unused,\n+\t\t       struct mlx5_flows *list __rte_unused,\n+\t\t       struct rte_flow *flow __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tunsigned int i;\n-\n-\tif (flow->drop || !dev->data->dev_started)\n-\t\tgoto free;\n-\tfor (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {\n-\t\t/* Update queue tunnel type. */\n-\t\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)\n-\t\t\t\t\t\t [(*flow->queues)[i]];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n-\t\tuint8_t tunnel = PTYPE_IDX(flow->tunnel);\n-\n-\t\tassert(rxq_ctrl->tunnel_types[tunnel] > 0);\n-\t\trxq_ctrl->tunnel_types[tunnel] -= 1;\n-\t\tif (!rxq_ctrl->tunnel_types[tunnel]) {\n-\t\t\t/* Update tunnel type. */\n-\t\t\tuint8_t j;\n-\t\t\tuint8_t types = 0;\n-\t\t\tuint8_t last;\n-\n-\t\t\tfor (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)\n-\t\t\t\tif (rxq_ctrl->tunnel_types[j]) {\n-\t\t\t\t\ttypes += 1;\n-\t\t\t\t\tlast = j;\n-\t\t\t\t}\n-\t\t\t/* Keep same if more than one tunnel types left. */\n-\t\t\tif (types == 1)\n-\t\t\t\trxq_data->tunnel = ptype_ext[last];\n-\t\t\telse if (types == 0)\n-\t\t\t\t/* No tunnel type left. */\n-\t\t\t\trxq_data->tunnel = 0;\n-\t\t}\n-\t}\n-\tfor (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {\n-\t\tstruct rte_flow *tmp;\n-\t\tint mark = 0;\n-\n-\t\t/*\n-\t\t * To remove the mark from the queue, the queue must not be\n-\t\t * present in any other marked flow (RSS or not).\n-\t\t */\n-\t\tTAILQ_FOREACH(tmp, list, next) {\n-\t\t\tunsigned int j;\n-\t\t\tuint16_t *tqs = NULL;\n-\t\t\tuint16_t tq_n = 0;\n-\n-\t\t\tif (!tmp->mark)\n-\t\t\t\tcontinue;\n-\t\t\tfor (j = 0; j != hash_rxq_init_n; ++j) {\n-\t\t\t\tif (!tmp->frxq[j].hrxq)\n-\t\t\t\t\tcontinue;\n-\t\t\t\ttqs = tmp->frxq[j].hrxq->ind_table->queues;\n-\t\t\t\ttq_n = tmp->frxq[j].hrxq->ind_table->queues_n;\n-\t\t\t}\n-\t\t\tif (!tq_n)\n-\t\t\t\tcontinue;\n-\t\t\tfor (j = 0; (j != tq_n) && !mark; j++)\n-\t\t\t\tif (tqs[j] == (*flow->queues)[i])\n-\t\t\t\t\tmark = 1;\n-\t\t}\n-\t\t(*priv->rxqs)[(*flow->queues)[i]]->mark = mark;\n-\t}\n-free:\n-\tif (flow->drop) {\n-\t\tif (flow->frxq[HASH_RXQ_ETH].ibv_flow)\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow\n-\t\t\t\t   (flow->frxq[HASH_RXQ_ETH].ibv_flow));\n-\t\trte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);\n-\t} else {\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tstruct mlx5_flow *frxq = &flow->frxq[i];\n-\n-\t\t\tif (frxq->ibv_flow)\n-\t\t\t\tclaim_zero(mlx5_glue->destroy_flow\n-\t\t\t\t\t   (frxq->ibv_flow));\n-\t\t\tif (frxq->hrxq)\n-\t\t\t\tmlx5_hrxq_release(dev, frxq->hrxq);\n-\t\t\tif (frxq->ibv_attr)\n-\t\t\t\trte_free(frxq->ibv_attr);\n-\t\t}\n-\t}\n-\tif (flow->cs) {\n-\t\tclaim_zero(mlx5_glue->destroy_counter_set(flow->cs));\n-\t\tflow->cs = NULL;\n-\t}\n-\tTAILQ_REMOVE(list, flow, next);\n-\tDRV_LOG(DEBUG, \"port %u flow destroyed %p\", dev->data->port_id,\n-\t\t(void *)flow);\n-\trte_free(flow);\n }\n \n /**\n@@ -2690,97 +194,9 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)\n+mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq_drop *fdq = NULL;\n-\n-\tassert(priv->pd);\n-\tassert(priv->ctx);\n-\tfdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);\n-\tif (!fdq) {\n-\t\tDRV_LOG(WARNING,\n-\t\t\t\"port %u cannot allocate memory for drop queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n-\t}\n-\tfdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);\n-\tif (!fdq->cq) {\n-\t\tDRV_LOG(WARNING, \"port %u cannot allocate CQ for drop queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\tfdq->wq = mlx5_glue->create_wq\n-\t\t(priv->ctx,\n-\t\t &(struct ibv_wq_init_attr){\n-\t\t\t.wq_type = IBV_WQT_RQ,\n-\t\t\t.max_wr = 1,\n-\t\t\t.max_sge = 1,\n-\t\t\t.pd = priv->pd,\n-\t\t\t.cq = fdq->cq,\n-\t\t });\n-\tif (!fdq->wq) {\n-\t\tDRV_LOG(WARNING, \"port %u cannot allocate WQ for drop queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\tfdq->ind_table = mlx5_glue->create_rwq_ind_table\n-\t\t(priv->ctx,\n-\t\t &(struct ibv_rwq_ind_table_init_attr){\n-\t\t\t.log_ind_tbl_size = 0,\n-\t\t\t.ind_tbl = &fdq->wq,\n-\t\t\t.comp_mask = 0,\n-\t\t });\n-\tif (!fdq->ind_table) {\n-\t\tDRV_LOG(WARNING,\n-\t\t\t\"port %u cannot allocate indirection table for drop\"\n-\t\t\t\" queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\tfdq->qp = mlx5_glue->create_qp_ex\n-\t\t(priv->ctx,\n-\t\t &(struct ibv_qp_init_attr_ex){\n-\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n-\t\t\t.comp_mask =\n-\t\t\t\tIBV_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n-\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n-\t\t\t\t.rx_hash_function =\n-\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n-\t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n-\t\t\t\t.rx_hash_key = rss_hash_default_key,\n-\t\t\t\t.rx_hash_fields_mask = 0,\n-\t\t\t\t},\n-\t\t\t.rwq_ind_tbl = fdq->ind_table,\n-\t\t\t.pd = priv->pd\n-\t\t });\n-\tif (!fdq->qp) {\n-\t\tDRV_LOG(WARNING, \"port %u cannot allocate QP for drop queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\tpriv->flow_drop_queue = fdq;\n \treturn 0;\n-error:\n-\tif (fdq->qp)\n-\t\tclaim_zero(mlx5_glue->destroy_qp(fdq->qp));\n-\tif (fdq->ind_table)\n-\t\tclaim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));\n-\tif (fdq->wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(fdq->wq));\n-\tif (fdq->cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(fdq->cq));\n-\tif (fdq)\n-\t\trte_free(fdq);\n-\tpriv->flow_drop_queue = NULL;\n-\treturn -rte_errno;\n }\n \n /**\n@@ -2790,23 +206,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)\n  *   Pointer to Ethernet device.\n  */\n void\n-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)\n+mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;\n-\n-\tif (!fdq)\n-\t\treturn;\n-\tif (fdq->qp)\n-\t\tclaim_zero(mlx5_glue->destroy_qp(fdq->qp));\n-\tif (fdq->ind_table)\n-\t\tclaim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));\n-\tif (fdq->wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(fdq->wq));\n-\tif (fdq->cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(fdq->cq));\n-\trte_free(fdq);\n-\tpriv->flow_drop_queue = NULL;\n }\n \n /**\n@@ -2818,70 +219,9 @@ mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)\n  *   Pointer to a TAILQ flow list.\n  */\n void\n-mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)\n+mlx5_flow_stop(struct rte_eth_dev *dev __rte_unused,\n+\t       struct mlx5_flows *list __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct rte_flow *flow;\n-\tunsigned int i;\n-\n-\tTAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {\n-\t\tstruct mlx5_ind_table_ibv *ind_tbl = NULL;\n-\n-\t\tif (flow->drop) {\n-\t\t\tif (!flow->frxq[HASH_RXQ_ETH].ibv_flow)\n-\t\t\t\tcontinue;\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow\n-\t\t\t\t   (flow->frxq[HASH_RXQ_ETH].ibv_flow));\n-\t\t\tflow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;\n-\t\t\tDRV_LOG(DEBUG, \"port %u flow %p removed\",\n-\t\t\t\tdev->data->port_id, (void *)flow);\n-\t\t\t/* Next flow. */\n-\t\t\tcontinue;\n-\t\t}\n-\t\t/* Verify the flow has not already been cleaned. */\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (!flow->frxq[i].ibv_flow)\n-\t\t\t\tcontinue;\n-\t\t\t/*\n-\t\t\t * Indirection table may be necessary to remove the\n-\t\t\t * flags in the Rx queues.\n-\t\t\t * This helps to speed-up the process by avoiding\n-\t\t\t * another loop.\n-\t\t\t */\n-\t\t\tind_tbl = flow->frxq[i].hrxq->ind_table;\n-\t\t\tbreak;\n-\t\t}\n-\t\tif (i == hash_rxq_init_n)\n-\t\t\treturn;\n-\t\tif (flow->mark) {\n-\t\t\tassert(ind_tbl);\n-\t\t\tfor (i = 0; i != ind_tbl->queues_n; ++i)\n-\t\t\t\t(*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;\n-\t\t}\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (!flow->frxq[i].ibv_flow)\n-\t\t\t\tcontinue;\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow\n-\t\t\t\t   (flow->frxq[i].ibv_flow));\n-\t\t\tflow->frxq[i].ibv_flow = NULL;\n-\t\t\tmlx5_hrxq_release(dev, flow->frxq[i].hrxq);\n-\t\t\tflow->frxq[i].hrxq = NULL;\n-\t\t}\n-\t\tDRV_LOG(DEBUG, \"port %u flow %p removed\", dev->data->port_id,\n-\t\t\t(void *)flow);\n-\t}\n-\t/* Cleanup Rx queue tunnel info. */\n-\tfor (i = 0; i != priv->rxqs_n; ++i) {\n-\t\tstruct mlx5_rxq_data *q = (*priv->rxqs)[i];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(q, struct mlx5_rxq_ctrl, rxq);\n-\n-\t\tif (!q)\n-\t\t\tcontinue;\n-\t\tmemset((void *)rxq_ctrl->tunnel_types, 0,\n-\t\t       sizeof(rxq_ctrl->tunnel_types));\n-\t\tq->tunnel = 0;\n-\t}\n }\n \n /**\n@@ -2896,76 +236,9 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)\n+mlx5_flow_start(struct rte_eth_dev *dev __rte_unused,\n+\t\tstruct mlx5_flows *list __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct rte_flow *flow;\n-\n-\tTAILQ_FOREACH(flow, list, next) {\n-\t\tunsigned int i;\n-\n-\t\tif (flow->drop) {\n-\t\t\tflow->frxq[HASH_RXQ_ETH].ibv_flow =\n-\t\t\t\tmlx5_glue->create_flow\n-\t\t\t\t(priv->flow_drop_queue->qp,\n-\t\t\t\t flow->frxq[HASH_RXQ_ETH].ibv_attr);\n-\t\t\tif (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {\n-\t\t\t\tDRV_LOG(DEBUG,\n-\t\t\t\t\t\"port %u flow %p cannot be applied\",\n-\t\t\t\t\tdev->data->port_id, (void *)flow);\n-\t\t\t\trte_errno = EINVAL;\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tDRV_LOG(DEBUG, \"port %u flow %p applied\",\n-\t\t\t\tdev->data->port_id, (void *)flow);\n-\t\t\t/* Next flow. */\n-\t\t\tcontinue;\n-\t\t}\n-\t\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\t\tif (!flow->frxq[i].ibv_attr)\n-\t\t\t\tcontinue;\n-\t\t\tflow->frxq[i].hrxq =\n-\t\t\t\tmlx5_hrxq_get(dev, flow->rss_conf.key,\n-\t\t\t\t\t      flow->rss_conf.key_len,\n-\t\t\t\t\t      flow->frxq[i].hash_fields,\n-\t\t\t\t\t      flow->rss_conf.queue,\n-\t\t\t\t\t      flow->rss_conf.queue_num,\n-\t\t\t\t\t      flow->tunnel,\n-\t\t\t\t\t      flow->rss_conf.level);\n-\t\t\tif (flow->frxq[i].hrxq)\n-\t\t\t\tgoto flow_create;\n-\t\t\tflow->frxq[i].hrxq =\n-\t\t\t\tmlx5_hrxq_new(dev, flow->rss_conf.key,\n-\t\t\t\t\t      flow->rss_conf.key_len,\n-\t\t\t\t\t      flow->frxq[i].hash_fields,\n-\t\t\t\t\t      flow->rss_conf.queue,\n-\t\t\t\t\t      flow->rss_conf.queue_num,\n-\t\t\t\t\t      flow->tunnel,\n-\t\t\t\t\t      flow->rss_conf.level);\n-\t\t\tif (!flow->frxq[i].hrxq) {\n-\t\t\t\tDRV_LOG(DEBUG,\n-\t\t\t\t\t\"port %u flow %p cannot create hash\"\n-\t\t\t\t\t\" rxq\",\n-\t\t\t\t\tdev->data->port_id, (void *)flow);\n-\t\t\t\trte_errno = EINVAL;\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-flow_create:\n-\t\t\tmlx5_flow_dump(dev, flow, i);\n-\t\t\tflow->frxq[i].ibv_flow =\n-\t\t\t\tmlx5_glue->create_flow(flow->frxq[i].hrxq->qp,\n-\t\t\t\t\t\t       flow->frxq[i].ibv_attr);\n-\t\t\tif (!flow->frxq[i].ibv_flow) {\n-\t\t\t\tDRV_LOG(DEBUG,\n-\t\t\t\t\t\"port %u flow %p type %u cannot be\"\n-\t\t\t\t\t\" applied\",\n-\t\t\t\t\tdev->data->port_id, (void *)flow, i);\n-\t\t\t\trte_errno = EINVAL;\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t}\n-\t\tmlx5_flow_create_update_rxqs(dev, flow);\n-\t}\n \treturn 0;\n }\n \n@@ -3019,7 +292,6 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\n \tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_attr attr = {\n \t\t.ingress = 1,\n-\t\t.priority = MLX5_CTRL_FLOW_PRIORITY,\n \t};\n \tstruct rte_flow_item items[] = {\n \t\t{\n@@ -3129,83 +401,6 @@ mlx5_flow_flush(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT\n-/**\n- * Query flow counter.\n- *\n- * @param cs\n- *   the counter set.\n- * @param counter_value\n- *   returned data from the counter.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-static int\n-mlx5_flow_query_count(struct ibv_counter_set *cs,\n-\t\t      struct mlx5_flow_counter_stats *counter_stats,\n-\t\t      struct rte_flow_query_count *query_count,\n-\t\t      struct rte_flow_error *error)\n-{\n-\tuint64_t counters[2];\n-\tstruct ibv_query_counter_set_attr query_cs_attr = {\n-\t\t.cs = cs,\n-\t\t.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,\n-\t};\n-\tstruct ibv_counter_set_data query_out = {\n-\t\t.out = counters,\n-\t\t.outlen = 2 * sizeof(uint64_t),\n-\t};\n-\tint err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);\n-\n-\tif (err)\n-\t\treturn rte_flow_error_set(error, err,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"cannot read counter\");\n-\tquery_count->hits_set = 1;\n-\tquery_count->bytes_set = 1;\n-\tquery_count->hits = counters[0] - counter_stats->hits;\n-\tquery_count->bytes = counters[1] - counter_stats->bytes;\n-\tif (query_count->reset) {\n-\t\tcounter_stats->hits = counters[0];\n-\t\tcounter_stats->bytes = counters[1];\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * Query a flows.\n- *\n- * @see rte_flow_query()\n- * @see rte_flow_ops\n- */\n-int\n-mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,\n-\t\tstruct rte_flow *flow,\n-\t\tconst struct rte_flow_action *action __rte_unused,\n-\t\tvoid *data,\n-\t\tstruct rte_flow_error *error)\n-{\n-\tif (flow->cs) {\n-\t\tint ret;\n-\n-\t\tret = mlx5_flow_query_count(flow->cs,\n-\t\t\t\t\t    &flow->counter_stats,\n-\t\t\t\t\t    (struct rte_flow_query_count *)data,\n-\t\t\t\t\t    error);\n-\t\tif (ret)\n-\t\t\treturn ret;\n-\t} else {\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"no counter found for flow\");\n-\t}\n-\treturn 0;\n-}\n-#endif\n-\n /**\n  * Isolated mode.\n  *\n@@ -3445,18 +640,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,\n \t\t\t.type = 0,\n \t\t},\n \t};\n-\tstruct mlx5_flow_parse parser = {\n-\t\t.layer = HASH_RXQ_ETH,\n-\t};\n \tstruct rte_flow_error error;\n \tstruct rte_flow *flow;\n \tint ret;\n \n \tret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);\n-\tif (ret)\n-\t\treturn ret;\n-\tret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,\n-\t\t\t\tattributes.actions, &error, &parser);\n \tif (ret)\n \t\treturn ret;\n \tflow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,\n@@ -3482,96 +670,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_fdir_filter_delete(struct rte_eth_dev *dev,\n-\t\t\tconst struct rte_eth_fdir_filter *fdir_filter)\n+mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,\n+\t\t\tconst struct rte_eth_fdir_filter *fdir_filter\n+\t\t\t__rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tstruct mlx5_fdir attributes = {\n-\t\t.attr.group = 0,\n-\t};\n-\tstruct mlx5_flow_parse parser = {\n-\t\t.create = 1,\n-\t\t.layer = HASH_RXQ_ETH,\n-\t};\n-\tstruct rte_flow_error error;\n-\tstruct rte_flow *flow;\n-\tunsigned int i;\n-\tint ret;\n-\n-\tret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);\n-\tif (ret)\n-\t\treturn ret;\n-\tret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,\n-\t\t\t\tattributes.actions, &error, &parser);\n-\tif (ret)\n-\t\tgoto exit;\n-\t/*\n-\t * Special case for drop action which is only set in the\n-\t * specifications when the flow is created.  In this situation the\n-\t * drop specification is missing.\n-\t */\n-\tif (parser.drop) {\n-\t\tstruct ibv_flow_spec_action_drop *drop;\n-\n-\t\tdrop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr +\n-\t\t\t\tparser.queue[HASH_RXQ_ETH].offset);\n-\t\t*drop = (struct ibv_flow_spec_action_drop){\n-\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n-\t\t\t.size = sizeof(struct ibv_flow_spec_action_drop),\n-\t\t};\n-\t\tparser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++;\n-\t}\n-\tTAILQ_FOREACH(flow, &priv->flows, next) {\n-\t\tstruct ibv_flow_attr *attr;\n-\t\tstruct ibv_spec_header *attr_h;\n-\t\tvoid *spec;\n-\t\tstruct ibv_flow_attr *flow_attr;\n-\t\tstruct ibv_spec_header *flow_h;\n-\t\tvoid *flow_spec;\n-\t\tunsigned int specs_n;\n-\t\tunsigned int queue_id = parser.drop ? HASH_RXQ_ETH :\n-\t\t\t\t\t\t      parser.layer;\n-\n-\t\tattr = parser.queue[queue_id].ibv_attr;\n-\t\tflow_attr = flow->frxq[queue_id].ibv_attr;\n-\t\t/* Compare first the attributes. */\n-\t\tif (!flow_attr ||\n-\t\t    memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))\n-\t\t\tcontinue;\n-\t\tif (attr->num_of_specs == 0)\n-\t\t\tcontinue;\n-\t\tspec = (void *)((uintptr_t)attr +\n-\t\t\t\tsizeof(struct ibv_flow_attr));\n-\t\tflow_spec = (void *)((uintptr_t)flow_attr +\n-\t\t\t\t     sizeof(struct ibv_flow_attr));\n-\t\tspecs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);\n-\t\tfor (i = 0; i != specs_n; ++i) {\n-\t\t\tattr_h = spec;\n-\t\t\tflow_h = flow_spec;\n-\t\t\tif (memcmp(spec, flow_spec,\n-\t\t\t\t   RTE_MIN(attr_h->size, flow_h->size)))\n-\t\t\t\tgoto wrong_flow;\n-\t\t\tspec = (void *)((uintptr_t)spec + attr_h->size);\n-\t\t\tflow_spec = (void *)((uintptr_t)flow_spec +\n-\t\t\t\t\t     flow_h->size);\n-\t\t}\n-\t\t/* At this point, the flow match. */\n-\t\tbreak;\n-wrong_flow:\n-\t\t/* The flow does not match. */\n-\t\tcontinue;\n-\t}\n-\tif (flow)\n-\t\tmlx5_flow_list_destroy(dev, &priv->flows, flow);\n-exit:\n-\tif (ret)\n-\t\tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tfor (i = 0; i != hash_rxq_init_n; ++i) {\n-\t\tif (parser.queue[i].ibv_attr)\n-\t\t\trte_free(parser.queue[i].ibv_attr);\n-\t}\n-\tif (ret)\n-\t\trte_errno = ret; /* Restore rte_errno. */\n+\trte_errno = ENOTSUP;\n \treturn -rte_errno;\n }\n \n@@ -3738,45 +841,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,\n  *   number of supported Verbs flow priority.\n  */\n unsigned int\n-mlx5_get_max_verbs_prio(struct rte_eth_dev *dev)\n+mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)\n {\n-\tstruct priv *priv = dev->data->dev_private;\n-\tunsigned int verb_priorities = MLX5_VERBS_FLOW_PRIO_8;\n-\tstruct {\n-\t\tstruct ibv_flow_attr attr;\n-\t\tstruct ibv_flow_spec_eth eth;\n-\t\tstruct ibv_flow_spec_action_drop drop;\n-\t} flow_attr = {\n-\t\t.attr = {\n-\t\t\t.num_of_specs = 2,\n-\t\t},\n-\t\t.eth = {\n-\t\t\t.type = IBV_FLOW_SPEC_ETH,\n-\t\t\t.size = sizeof(struct ibv_flow_spec_eth),\n-\t\t},\n-\t\t.drop = {\n-\t\t\t.size = sizeof(struct ibv_flow_spec_action_drop),\n-\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n-\t\t},\n-\t};\n-\tstruct ibv_flow *flow;\n-\n-\tdo {\n-\t\tflow_attr.attr.priority = verb_priorities - 1;\n-\t\tflow = mlx5_glue->create_flow(priv->flow_drop_queue->qp,\n-\t\t\t\t\t      &flow_attr.attr);\n-\t\tif (flow) {\n-\t\t\tclaim_zero(mlx5_glue->destroy_flow(flow));\n-\t\t\t/* Try more priorities. */\n-\t\t\tverb_priorities *= 2;\n-\t\t} else {\n-\t\t\t/* Failed, restore last right number. */\n-\t\t\tverb_priorities /= 2;\n-\t\t\tbreak;\n-\t\t}\n-\t} while (1);\n-\tDRV_LOG(DEBUG, \"port %u Verbs flow priorities: %d,\"\n-\t\t\" user flow priorities: %d\",\n-\t\tdev->data->port_id, verb_priorities, MLX5_CTRL_FLOW_PRIORITY);\n-\treturn verb_priorities;\n+\treturn 8;\n }\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex 0007be08b..97b4d9eb6 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -136,7 +136,6 @@ struct mlx5_rxq_ctrl {\n \tstruct priv *priv; /* Back pointer to private data. */\n \tstruct mlx5_rxq_data rxq; /* Data path structure. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n-\tuint32_t tunnel_types[16]; /* Tunnel type counter. */\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint16_t idx; /* Queue index. */\n };\n",
    "prefixes": [
        "v3",
        "01/21"
    ]
}