get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55403/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55403,
    "url": "http://patches.dpdk.org/api/patches/55403/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190626085809.5974-3-qiming.yang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190626085809.5974-3-qiming.yang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190626085809.5974-3-qiming.yang@intel.com",
    "date": "2019-06-26T08:58:07",
    "name": "[v8,2/4] net/ice: add generic flow API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0e0cf7235609fa96db9f3634894fa3315ad5d3c6",
    "submitter": {
        "id": 522,
        "url": "http://patches.dpdk.org/api/people/522/?format=api",
        "name": "Qiming Yang",
        "email": "qiming.yang@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190626085809.5974-3-qiming.yang@intel.com/mbox/",
    "series": [
        {
            "id": 5174,
            "url": "http://patches.dpdk.org/api/series/5174/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5174",
            "date": "2019-06-26T08:58:05",
            "name": "Enable rte_flow API in ice driver",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/5174/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/55403/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/55403/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D35582C6D;\n\tWed, 26 Jun 2019 11:00:14 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n\tby dpdk.org (Postfix) with ESMTP id DDAB32BE6\n\tfor <dev@dpdk.org>; Wed, 26 Jun 2019 11:00:04 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t26 Jun 2019 02:00:03 -0700",
            "from map1.sh.intel.com ([10.67.111.124])\n\tby orsmga001.jf.intel.com with ESMTP; 26 Jun 2019 02:00:00 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.63,419,1557212400\"; d=\"scan'208\";a=\"245363661\"",
        "From": "Qiming Yang <qiming.yang@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Qiming Yang <qiming.yang@intel.com>",
        "Date": "Wed, 26 Jun 2019 16:58:07 +0800",
        "Message-Id": "<20190626085809.5974-3-qiming.yang@intel.com>",
        "X-Mailer": "git-send-email 2.9.5",
        "In-Reply-To": "<20190626085809.5974-1-qiming.yang@intel.com>",
        "References": "<1559552722-8970-1-git-send-email-qiming.yang@intel.com>\n\t<20190626085809.5974-1-qiming.yang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds ice_flow_create, ice_flow_destroy,\nice_flow_flush and ice_flow_validate support,\nthese are used to handle all the generic filters.\n\nSigned-off-by: Qiming Yang <qiming.yang@intel.com>\n---\n drivers/net/ice/Makefile           |   1 +\n drivers/net/ice/ice_ethdev.c       |  44 +++\n drivers/net/ice/ice_ethdev.h       |   5 +\n drivers/net/ice/ice_generic_flow.c | 696 +++++++++++++++++++++++++++++++++++++\n drivers/net/ice/ice_generic_flow.h | 614 ++++++++++++++++++++++++++++++++\n drivers/net/ice/meson.build        |   3 +-\n 6 files changed, 1362 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ice/ice_generic_flow.c\n create mode 100644 drivers/net/ice/ice_generic_flow.h",
    "diff": "diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile\nindex b10d826..32abeb6 100644\n--- a/drivers/net/ice/Makefile\n+++ b/drivers/net/ice/Makefile\n@@ -79,5 +79,6 @@ endif\n ifeq ($(CC_AVX2_SUPPORT), 1)\n \tSRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c\n endif\n+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex a94aa7e..8ee06d1 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -15,6 +15,7 @@\n #include \"base/ice_dcb.h\"\n #include \"ice_ethdev.h\"\n #include \"ice_rxtx.h\"\n+#include \"ice_switch_filter.h\"\n \n #define ICE_MAX_QP_NUM \"max_queue_pair_num\"\n #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100\n@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,\n static int ice_xstats_get_names(struct rte_eth_dev *dev,\n \t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n \t\t\t\tunsigned int limit);\n+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,\n+\t\t\tenum rte_filter_type filter_type,\n+\t\t\tenum rte_filter_op filter_op,\n+\t\t\tvoid *arg);\n \n static const struct rte_pci_id pci_id_ice_map[] = {\n \t{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },\n@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {\n \t.xstats_get                   = ice_xstats_get,\n \t.xstats_get_names             = ice_xstats_get_names,\n \t.xstats_reset                 = ice_stats_reset,\n+\t.filter_ctrl                  = ice_dev_filter_ctrl,\n };\n \n /* store statistics names and its offset in stats structure */\n@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)\n \t/* get base queue pairs index  in the device */\n \tice_base_queue_get(pf);\n \n+\tTAILQ_INIT(&pf->flow_list);\n+\n \treturn 0;\n \n err_pf_setup:\n@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)\n {\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct rte_flow *p_flow;\n \n \tice_dev_close(dev);\n \n@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)\n \trte_intr_callback_unregister(intr_handle,\n \t\t\t\t     ice_interrupt_handler, dev);\n \n+\t/* Remove all flows */\n+\twhile ((p_flow = TAILQ_FIRST(&pf->flow_list))) {\n+\t\tTAILQ_REMOVE(&pf->flow_list, p_flow, node);\n+\t\tice_free_switch_filter_rule(p_flow->rule);\n+\t\trte_free(p_flow);\n+\t}\n+\n \treturn 0;\n }\n \n@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,\n }\n \n static int\n+ice_dev_filter_ctrl(struct rte_eth_dev *dev,\n+\t\t     enum rte_filter_type filter_type,\n+\t\t     enum rte_filter_op filter_op,\n+\t\t     void *arg)\n+{\n+\tint ret = 0;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tswitch (filter_type) {\n+\tcase RTE_ETH_FILTER_GENERIC:\n+\t\tif (filter_op != RTE_ETH_FILTER_GET)\n+\t\t\treturn -EINVAL;\n+\t\t*(const void **)arg = &ice_flow_ops;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(WARNING, \"Filter type (%d) not supported\",\n+\t\t\t\t\tfilter_type);\n+\t\tret = -EINVAL;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t      struct rte_pci_device *pci_dev)\n {\ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex 50b966c..8a52239 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -234,12 +234,16 @@ struct ice_vsi {\n \tbool offset_loaded;\n };\n \n+extern const struct rte_flow_ops ice_flow_ops;\n+\n /* Struct to store flow created. */\n struct rte_flow {\n \tTAILQ_ENTRY(rte_flow) node;\n \tvoid *rule;\n };\n \n+TAILQ_HEAD(ice_flow_list, rte_flow);\n+\n struct ice_pf {\n \tstruct ice_adapter *adapter; /* The adapter this PF associate to */\n \tstruct ice_vsi *main_vsi; /* pointer to main VSI structure */\n@@ -266,6 +270,7 @@ struct ice_pf {\n \tstruct ice_eth_stats internal_stats;\n \tbool offset_loaded;\n \tbool adapter_stopped;\n+\tstruct ice_flow_list flow_list;\n };\n \n /**\ndiff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c\nnew file mode 100644\nindex 0000000..d5ff278\n--- /dev/null\n+++ b/drivers/net/ice/ice_generic_flow.c\n@@ -0,0 +1,696 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#include <sys/queue.h>\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+\n+#include <rte_ether.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_malloc.h>\n+\n+#include \"ice_ethdev.h\"\n+#include \"ice_generic_flow.h\"\n+#include \"ice_switch_filter.h\"\n+\n+static int ice_flow_validate(struct rte_eth_dev *dev,\n+\t\tconst struct rte_flow_attr *attr,\n+\t\tconst struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_action actions[],\n+\t\tstruct rte_flow_error *error);\n+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,\n+\t\tconst struct rte_flow_attr *attr,\n+\t\tconst struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_action actions[],\n+\t\tstruct rte_flow_error *error);\n+static int ice_flow_destroy(struct rte_eth_dev *dev,\n+\t\tstruct rte_flow *flow,\n+\t\tstruct rte_flow_error *error);\n+static int ice_flow_flush(struct rte_eth_dev *dev,\n+\t\tstruct rte_flow_error *error);\n+\n+const struct rte_flow_ops ice_flow_ops = {\n+\t.validate = ice_flow_validate,\n+\t.create = ice_flow_create,\n+\t.destroy = ice_flow_destroy,\n+\t.flush = ice_flow_flush,\n+};\n+\n+static int\n+ice_flow_valid_attr(const struct rte_flow_attr *attr,\n+\t\t     struct rte_flow_error *error)\n+{\n+\t/* Must be input direction */\n+\tif (!attr->ingress) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t   attr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->egress) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t   attr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->priority) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t   attr, \"Not support priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->group) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t   attr, \"Not support group.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Find the first VOID or non-VOID item pointer */\n+static const struct rte_flow_item *\n+ice_find_first_item(const struct rte_flow_item *item, bool is_void)\n+{\n+\tbool is_find;\n+\n+\twhile (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tif (is_void)\n+\t\t\tis_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;\n+\t\telse\n+\t\t\tis_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;\n+\t\tif (is_find)\n+\t\t\tbreak;\n+\t\titem++;\n+\t}\n+\treturn item;\n+}\n+\n+/* Skip all VOID items of the pattern */\n+static void\n+ice_pattern_skip_void_item(struct rte_flow_item *items,\n+\t\t\t    const struct rte_flow_item *pattern)\n+{\n+\tuint32_t cpy_count = 0;\n+\tconst struct rte_flow_item *pb = pattern, *pe = pattern;\n+\n+\tfor (;;) {\n+\t\t/* Find a non-void item first */\n+\t\tpb = ice_find_first_item(pb, false);\n+\t\tif (pb->type == RTE_FLOW_ITEM_TYPE_END) {\n+\t\t\tpe = pb;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* Find a void item */\n+\t\tpe = ice_find_first_item(pb + 1, true);\n+\n+\t\tcpy_count = pe - pb;\n+\t\trte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);\n+\n+\t\titems += cpy_count;\n+\n+\t\tif (pe->type == RTE_FLOW_ITEM_TYPE_END) {\n+\t\t\tpb = pe;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tpb = pe + 1;\n+\t}\n+\t/* Copy the END item. */\n+\trte_memcpy(items, pe, sizeof(struct rte_flow_item));\n+}\n+\n+/* Check if the pattern matches a supported item type array */\n+static bool\n+ice_match_pattern(enum rte_flow_item_type *item_array,\n+\t\tconst struct rte_flow_item *pattern)\n+{\n+\tconst struct rte_flow_item *item = pattern;\n+\n+\twhile ((*item_array == item->type) &&\n+\t       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {\n+\t\titem_array++;\n+\t\titem++;\n+\t}\n+\n+\treturn (*item_array == RTE_FLOW_ITEM_TYPE_END &&\n+\t\titem->type == RTE_FLOW_ITEM_TYPE_END);\n+}\n+\n+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],\n+\t\tstruct rte_flow_error *error)\n+{\n+\tuint16_t i = 0;\n+\tuint64_t inset;\n+\tstruct rte_flow_item *items; /* used for pattern without VOID items */\n+\tuint32_t item_num = 0; /* non-void item number */\n+\n+\t/* Get the non-void item number of pattern */\n+\twhile ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tif ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\titem_num++;\n+\t\ti++;\n+\t}\n+\titem_num++;\n+\n+\titems = rte_zmalloc(\"ice_pattern\",\n+\t\t\t    item_num * sizeof(struct rte_flow_item), 0);\n+\tif (!items) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\t   NULL, \"No memory for PMD internal items.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tice_pattern_skip_void_item(items, pattern);\n+\n+\tfor (i = 0; i < RTE_DIM(ice_supported_patterns); i++)\n+\t\tif (ice_match_pattern(ice_supported_patterns[i].items,\n+\t\t\t\t      items)) {\n+\t\t\tinset = ice_supported_patterns[i].sw_fields;\n+\t\t\trte_free(items);\n+\t\t\treturn inset;\n+\t\t}\n+\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t   pattern, \"Unsupported pattern\");\n+\n+\trte_free(items);\n+\treturn 0;\n+}\n+\n+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item = pattern;\n+\tconst struct rte_flow_item_eth *eth_spec, *eth_mask;\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;\n+\tconst struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;\n+\tconst struct rte_flow_item_tcp *tcp_spec, *tcp_mask;\n+\tconst struct rte_flow_item_udp *udp_spec, *udp_mask;\n+\tconst struct rte_flow_item_sctp *sctp_spec, *sctp_mask;\n+\tconst struct rte_flow_item_icmp *icmp_mask;\n+\tconst struct rte_flow_item_icmp6 *icmp6_mask;\n+\tconst struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;\n+\tconst struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;\n+\tenum rte_flow_item_type item_type;\n+\tuint8_t  ipv6_addr_mask[16] = {\n+\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n+\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };\n+\tuint64_t input_set = ICE_INSET_NONE;\n+\tbool outer_ip = true;\n+\tbool outer_l4 = true;\n+\n+\tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Not support range\");\n+\t\t\treturn 0;\n+\t\t}\n+\t\titem_type = item->type;\n+\t\tswitch (item_type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\teth_spec = item->spec;\n+\t\t\teth_mask = item->mask;\n+\n+\t\t\tif (eth_spec && eth_mask) {\n+\t\t\t\tif (rte_is_broadcast_ether_addr(&eth_mask->src))\n+\t\t\t\t\tinput_set |= ICE_INSET_SMAC;\n+\t\t\t\tif (rte_is_broadcast_ether_addr(&eth_mask->dst))\n+\t\t\t\t\tinput_set |= ICE_INSET_DMAC;\n+\t\t\t\tif (eth_mask->type == RTE_BE16(0xffff))\n+\t\t\t\t\tinput_set |= ICE_INSET_ETHERTYPE;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tipv4_spec = item->spec;\n+\t\t\tipv4_mask = item->mask;\n+\n+\t\t\tif (!(ipv4_spec && ipv4_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid IPv4 spec or mask.\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\t/* Check IPv4 mask and update input set */\n+\t\t\tif (ipv4_mask->hdr.version_ihl ||\n+\t\t\t    ipv4_mask->hdr.total_length ||\n+\t\t\t    ipv4_mask->hdr.packet_id ||\n+\t\t\t    ipv4_mask->hdr.hdr_checksum) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid IPv4 mask.\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (outer_ip) {\n+\t\t\t\tif (ipv4_mask->hdr.src_addr == UINT32_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV4_SRC;\n+\t\t\t\tif (ipv4_mask->hdr.dst_addr == UINT32_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV4_DST;\n+\t\t\t\tif (ipv4_mask->hdr.type_of_service == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV4_TOS;\n+\t\t\t\tif (ipv4_mask->hdr.time_to_live == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV4_TTL;\n+\t\t\t\tif (ipv4_mask->hdr.next_proto_id == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV4_PROTO;\n+\t\t\t\touter_ip = false;\n+\t\t\t} else {\n+\t\t\t\tif (ipv4_mask->hdr.src_addr == UINT32_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV4_SRC;\n+\t\t\t\tif (ipv4_mask->hdr.dst_addr == UINT32_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV4_DST;\n+\t\t\t\tif (ipv4_mask->hdr.time_to_live == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV4_TTL;\n+\t\t\t\tif (ipv4_mask->hdr.next_proto_id == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV4_PROTO;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tipv6_spec = item->spec;\n+\t\t\tipv6_mask = item->mask;\n+\n+\t\t\tif (!(ipv6_spec && ipv6_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\titem, \"Invalid IPv6 spec or mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (ipv6_mask->hdr.payload_len ||\n+\t\t\t    ipv6_mask->hdr.vtc_flow) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid IPv6 mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (outer_ip) {\n+\t\t\t\tif (!memcmp(ipv6_mask->hdr.src_addr,\n+\t\t\t\t\t    ipv6_addr_mask,\n+\t\t\t\t\t    RTE_DIM(ipv6_mask->hdr.src_addr)))\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV6_SRC;\n+\t\t\t\tif (!memcmp(ipv6_mask->hdr.dst_addr,\n+\t\t\t\t\t    ipv6_addr_mask,\n+\t\t\t\t\t    RTE_DIM(ipv6_mask->hdr.dst_addr)))\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV6_DST;\n+\t\t\t\tif (ipv6_mask->hdr.proto == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV6_PROTO;\n+\t\t\t\tif (ipv6_mask->hdr.hop_limits == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_IPV6_HOP_LIMIT;\n+\t\t\t\touter_ip = false;\n+\t\t\t} else {\n+\t\t\t\tif (!memcmp(ipv6_mask->hdr.src_addr,\n+\t\t\t\t\t    ipv6_addr_mask,\n+\t\t\t\t\t    RTE_DIM(ipv6_mask->hdr.src_addr)))\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV6_SRC;\n+\t\t\t\tif (!memcmp(ipv6_mask->hdr.dst_addr,\n+\t\t\t\t\t    ipv6_addr_mask,\n+\t\t\t\t\t    RTE_DIM(ipv6_mask->hdr.dst_addr)))\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV6_DST;\n+\t\t\t\tif (ipv6_mask->hdr.proto == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV6_PROTO;\n+\t\t\t\tif (ipv6_mask->hdr.hop_limits == UINT8_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_IPV6_TTL;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tudp_spec = item->spec;\n+\t\t\tudp_mask = item->mask;\n+\n+\t\t\tif (!(udp_spec && udp_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item, \"Invalid UDP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\t/* Check UDP mask and update input set*/\n+\t\t\tif (udp_mask->hdr.dgram_len ||\n+\t\t\t    udp_mask->hdr.dgram_cksum) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid UDP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (outer_l4) {\n+\t\t\t\tif (udp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_SRC_PORT;\n+\t\t\t\tif (udp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_DST_PORT;\n+\t\t\t\touter_l4 = false;\n+\t\t\t} else {\n+\t\t\t\tif (udp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_SRC_PORT;\n+\t\t\t\tif (udp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_DST_PORT;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\ttcp_spec = item->spec;\n+\t\t\ttcp_mask = item->mask;\n+\n+\t\t\tif (!(tcp_spec && tcp_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item, \"Invalid TCP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\t/* Check TCP mask and update input set */\n+\t\t\tif (tcp_mask->hdr.sent_seq ||\n+\t\t\t    tcp_mask->hdr.recv_ack ||\n+\t\t\t    tcp_mask->hdr.data_off ||\n+\t\t\t    tcp_mask->hdr.tcp_flags ||\n+\t\t\t    tcp_mask->hdr.rx_win ||\n+\t\t\t    tcp_mask->hdr.cksum ||\n+\t\t\t    tcp_mask->hdr.tcp_urp) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid TCP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (outer_l4) {\n+\t\t\t\tif (tcp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_SRC_PORT;\n+\t\t\t\tif (tcp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_DST_PORT;\n+\t\t\t\touter_l4 = false;\n+\t\t\t} else {\n+\t\t\t\tif (tcp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_SRC_PORT;\n+\t\t\t\tif (tcp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_DST_PORT;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_SCTP:\n+\t\t\tsctp_spec = item->spec;\n+\t\t\tsctp_mask = item->mask;\n+\n+\t\t\tif (!(sctp_spec && sctp_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item, \"Invalid SCTP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\t/* Check SCTP mask and update input set */\n+\t\t\tif (sctp_mask->hdr.cksum) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid SCTP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (outer_l4) {\n+\t\t\t\tif (sctp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_SRC_PORT;\n+\t\t\t\tif (sctp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_DST_PORT;\n+\t\t\t\touter_l4 = false;\n+\t\t\t} else {\n+\t\t\t\tif (sctp_mask->hdr.src_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_SRC_PORT;\n+\t\t\t\tif (sctp_mask->hdr.dst_port == UINT16_MAX)\n+\t\t\t\t\tinput_set |= ICE_INSET_TUN_DST_PORT;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n+\t\t\ticmp_mask = item->mask;\n+\t\t\tif (icmp_mask->hdr.icmp_code ||\n+\t\t\t    icmp_mask->hdr.icmp_cksum ||\n+\t\t\t    icmp_mask->hdr.icmp_ident ||\n+\t\t\t    icmp_mask->hdr.icmp_seq_nb) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid ICMP mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (icmp_mask->hdr.icmp_type == UINT8_MAX)\n+\t\t\t\tinput_set |= ICE_INSET_ICMP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\t\ticmp6_mask = item->mask;\n+\t\t\tif (icmp6_mask->code ||\n+\t\t\t    icmp6_mask->checksum) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid ICMP6 mask\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (icmp6_mask->type == UINT8_MAX)\n+\t\t\t\tinput_set |= ICE_INSET_ICMP6;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tvxlan_spec = item->spec;\n+\t\t\tvxlan_mask = item->mask;\n+\t\t\t/* Check if VXLAN item is used to describe protocol.\n+\t\t\t * If yes, both spec and mask should be NULL.\n+\t\t\t * If no, both spec and mask shouldn't be NULL.\n+\t\t\t */\n+\t\t\tif ((!vxlan_spec && vxlan_mask) ||\n+\t\t\t    (vxlan_spec && !vxlan_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid VXLAN item\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\t\tnvgre_spec = item->spec;\n+\t\t\tnvgre_mask = item->mask;\n+\t\t\t/* Check if VXLAN item is used to describe protocol.\n+\t\t\t * If yes, both spec and mask should be NULL.\n+\t\t\t * If no, both spec and mask shouldn't be NULL.\n+\t\t\t */\n+\t\t\tif ((!nvgre_spec && nvgre_mask) ||\n+\t\t\t    (nvgre_spec && !nvgre_mask)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid NVGRE item\");\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"Invalid mask no exist\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn input_set;\n+}\n+\n+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],\n+\t\t\tuint64_t inset, struct rte_flow_error *error)\n+{\n+\tuint64_t fields;\n+\n+\t/* get valid field */\n+\tfields = ice_get_flow_field(pattern, error);\n+\tif (!fields || fields & (~inset)) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\n+\t\t\t\t   pattern,\n+\t\t\t\t   \"Invalid input set\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int ice_flow_valid_action(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct rte_flow_action *actions,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action_queue *act_q;\n+\tuint16_t queue;\n+\n+\tswitch (actions->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\tact_q = actions->conf;\n+\t\tqueue = act_q->index;\n+\t\tif (queue >= dev->data->nb_rx_queues) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   actions, \"Invalid queue ID for\"\n+\t\t\t\t\t   \" ethertype_filter.\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\tbreak;\n+\tdefault:\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, actions,\n+\t\t\t\t   \"Invalid action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_flow_validate(struct rte_eth_dev *dev,\n+\t\t   const struct rte_flow_attr *attr,\n+\t\t   const struct rte_flow_item pattern[],\n+\t\t   const struct rte_flow_action actions[],\n+\t\t   struct rte_flow_error *error)\n+{\n+\tuint64_t inset = 0;\n+\tint ret = ICE_ERR_NOT_SUPPORTED;\n+\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\t   NULL, \"NULL pattern.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\t   NULL, \"NULL action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!attr) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"NULL attribute.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tret = ice_flow_valid_attr(attr, error);\n+\tif (!ret)\n+\t\treturn ret;\n+\n+\tinset = ice_flow_valid_pattern(pattern, error);\n+\tif (!inset)\n+\t\treturn -rte_errno;\n+\n+\tret = ice_flow_valid_inset(pattern, inset, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = ice_flow_valid_action(dev, actions, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn 0;\n+}\n+\n+static struct rte_flow *\n+ice_flow_create(struct rte_eth_dev *dev,\n+\t\t const struct rte_flow_attr *attr,\n+\t\t const struct rte_flow_item pattern[],\n+\t\t const struct rte_flow_action actions[],\n+\t\t struct rte_flow_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct rte_flow *flow = NULL;\n+\tint ret;\n+\n+\tflow = rte_zmalloc(\"ice_flow\", sizeof(struct rte_flow), 0);\n+\tif (!flow) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Failed to allocate memory\");\n+\t\treturn flow;\n+\t}\n+\n+\tret = ice_flow_validate(dev, attr, pattern, actions, error);\n+\tif (ret < 0)\n+\t\tgoto free_flow;\n+\n+\tret = ice_create_switch_filter(pf, pattern, actions, flow, error);\n+\tif (ret)\n+\t\tgoto free_flow;\n+\n+\tTAILQ_INSERT_TAIL(&pf->flow_list, flow, node);\n+\treturn flow;\n+\n+free_flow:\n+\trte_flow_error_set(error, -ret,\n+\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t   \"Failed to create flow.\");\n+\trte_free(flow);\n+\treturn NULL;\n+}\n+\n+static int\n+ice_flow_destroy(struct rte_eth_dev *dev,\n+\t\t struct rte_flow *flow,\n+\t\t struct rte_flow_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tint ret = 0;\n+\n+\tret = ice_destroy_switch_filter(pf, flow, error);\n+\n+\tif (!ret) {\n+\t\tTAILQ_REMOVE(&pf->flow_list, flow, node);\n+\t\trte_free(flow);\n+\t} else {\n+\t\trte_flow_error_set(error, -ret,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Failed to destroy flow.\");\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+ice_flow_flush(struct rte_eth_dev *dev,\n+\t       struct rte_flow_error *error)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct rte_flow *p_flow;\n+\tint ret = 0;\n+\n+\tTAILQ_FOREACH(p_flow, &pf->flow_list, node) {\n+\t\tret = ice_flow_destroy(dev, p_flow, error);\n+\t\tif (ret) {\n+\t\t\trte_flow_error_set(error, -ret,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t   \"Failed to flush SW flows.\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h\nnew file mode 100644\nindex 0000000..2e43a29\n--- /dev/null\n+++ b/drivers/net/ice/ice_generic_flow.h\n@@ -0,0 +1,614 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#ifndef _ICE_GENERIC_FLOW_H_\n+#define _ICE_GENERIC_FLOW_H_\n+\n+#include <rte_flow_driver.h>\n+\n+struct ice_flow_pattern {\n+\tenum rte_flow_item_type *items;\n+\tuint64_t sw_fields;\n+};\n+\n+#define ICE_INSET_NONE            0x00000000000000000ULL\n+\n+/* bit0 ~ bit 7 */\n+#define ICE_INSET_SMAC            0x0000000000000001ULL\n+#define ICE_INSET_DMAC            0x0000000000000002ULL\n+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL\n+\n+/* bit 8 ~ bit 15 */\n+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL\n+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL\n+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL\n+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL\n+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL\n+#define ICE_INSET_DST_PORT        0x0000000000002000ULL\n+#define ICE_INSET_ARP             0x0000000000004000ULL\n+\n+/* bit 16 ~ bit 31 */\n+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL\n+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL\n+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL\n+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL\n+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL\n+#define ICE_INSET_ICMP            0x0000000001000000ULL\n+#define ICE_INSET_ICMP6           0x0000000002000000ULL\n+\n+/* bit 32 ~ bit 47, tunnel fields */\n+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL\n+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL\n+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL\n+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL\n+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL\n+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL\n+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL\n+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL\n+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL\n+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL\n+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL\n+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL\n+#define ICE_INSET_TUN_ID             0x0000100000000000ULL\n+\n+/* bit 48 ~ bit 55 */\n+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL\n+\n+#define ICE_FLAG_VLAN_INNER  0x00000001ULL\n+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL\n+\n+#define INSET_ETHER ( \\\n+\tICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)\n+#define INSET_MAC_IPV4 ( \\\n+\tICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \\\n+\tICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)\n+#define INSET_MAC_IPV4_L4 ( \\\n+\tICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \\\n+\tICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \\\n+\tICE_INSET_SRC_PORT)\n+#define INSET_MAC_IPV4_ICMP ( \\\n+\tICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \\\n+\tICE_INSET_IPV4_TOS | ICE_INSET_ICMP)\n+#define INSET_MAC_IPV6 ( \\\n+\tICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \\\n+\tICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)\n+#define INSET_MAC_IPV6_L4 ( \\\n+\tICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \\\n+\tICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \\\n+\tICE_INSET_SRC_PORT)\n+#define INSET_MAC_IPV6_ICMP ( \\\n+\tICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \\\n+\tICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)\n+#define INSET_TUNNEL_IPV4_TYPE1 ( \\\n+\tICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \\\n+\tICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)\n+#define INSET_TUNNEL_IPV4_TYPE2 ( \\\n+\tICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \\\n+\tICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \\\n+\tICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)\n+#define INSET_TUNNEL_IPV4_TYPE3 ( \\\n+\tICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \\\n+\tICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)\n+#define INSET_TUNNEL_IPV6_TYPE1 ( \\\n+\tICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \\\n+\tICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)\n+#define INSET_TUNNEL_IPV6_TYPE2 ( \\\n+\tICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \\\n+\tICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \\\n+\tICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)\n+#define INSET_TUNNEL_IPV6_TYPE3 ( \\\n+\tICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \\\n+\tICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)\n+\n+/* L2 */\n+static enum rte_flow_item_type pattern_ethertype[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* non-tunnel IPv4 */\n+static enum rte_flow_item_type pattern_ipv4[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* non-tunnel IPv6 */\n+static enum rte_flow_item_type pattern_ipv6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv6_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv6_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv6_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_ICMP6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 VXLAN IPv4 */\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 VXLAN MAC IPv4 */\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 VXLAN IPv6 */\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 VXLAN MAC IPv6 */\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_VXLAN,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 NVGRE IPv4 */\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 NVGRE MAC IPv4 */\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_ICMP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+/* IPv4 NVGRE IPv6 */\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+\n+/* IPv4 NVGRE MAC IPv6 */\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_UDP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_TCP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV4,\n+\tRTE_FLOW_ITEM_TYPE_NVGRE,\n+\tRTE_FLOW_ITEM_TYPE_ETH,\n+\tRTE_FLOW_ITEM_TYPE_IPV6,\n+\tRTE_FLOW_ITEM_TYPE_SCTP,\n+\tRTE_FLOW_ITEM_TYPE_END,\n+};\n+\n+static struct ice_flow_pattern ice_supported_patterns[] = {\n+\t{pattern_ethertype, INSET_ETHER},\n+\t{pattern_ipv4, INSET_MAC_IPV4},\n+\t{pattern_ipv4_udp, INSET_MAC_IPV4_L4},\n+\t{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},\n+\t{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},\n+\t{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},\n+\t{pattern_ipv6, INSET_MAC_IPV6},\n+\t{pattern_ipv6_udp, INSET_MAC_IPV6_L4},\n+\t{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},\n+\t{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},\n+\t{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},\n+\t{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},\n+\t{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},\n+\t{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},\n+\t{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},\n+\t{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},\n+\t{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},\n+\t{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},\n+\t{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},\n+\t{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},\n+\t{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},\n+\t{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},\n+\t{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},\n+\t{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},\n+\t{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},\n+\t{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},\n+\t{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},\n+};\n+\n+#endif\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex 8697676..7f16647 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -7,7 +7,8 @@ objs = [base_objs]\n sources = files(\n \t'ice_ethdev.c',\n \t'ice_rxtx.c',\n-\t'ice_switch_filter.c'\n+\t'ice_switch_filter.c',\n+\t'ice_generic_flow.c'\n \t)\n \n deps += ['hash']\n",
    "prefixes": [
        "v8",
        "2/4"
    ]
}