From patchwork Mon Sep 16 23:06:11 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ying Wang X-Patchwork-Id: 59300 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DF8D31BFB9; Tue, 17 Sep 2019 09:28:52 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 7FD8A1BFB8 for ; Tue, 17 Sep 2019 09:28:49 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Sep 2019 00:28:49 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,515,1559545200"; d="scan'208";a="187384638" Received: from unknown (HELO npg-dpdk-cvl-yingwang-117d84.sh.intel.com) ([10.67.117.84]) by fmsmga007.fm.intel.com with ESMTP; 17 Sep 2019 00:28:47 -0700 From: Ying Wang To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, qiming.yang@intel.com, wei.zhao1@intel.com, ying.a.wang@intel.com Date: Tue, 17 Sep 2019 07:06:11 +0800 Message-Id: <20190916230615.411726-2-ying.a.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20190916230615.411726-1-ying.a.wang@intel.com> References: <20190903221522.151382-2-ying.a.wang@intel.com> <20190916230615.411726-1-ying.a.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 1/5] net/ice: minor code clean X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch removes redundant code and cleans up some wrong indentations. Signed-off-by: Ying Wang --- drivers/net/ice/ice_generic_flow.c | 39 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 1c0adc779..44dbccd3d 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -44,21 +44,21 @@ const struct rte_flow_ops ice_flow_ops = { static int ice_flow_valid_attr(const struct rte_flow_attr *attr, - struct rte_flow_error *error) + struct rte_flow_error *error) { /* Must be input direction */ if (!attr->ingress) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - attr, "Only support ingress."); + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); return -rte_errno; } /* Not supported */ if (attr->egress) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - attr, "Not support egress."); + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); return -rte_errno; } @@ -73,8 +73,8 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr, /* Not supported */ if (attr->group) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - attr, "Not support group."); + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); return -rte_errno; } @@ -102,7 +102,7 @@ ice_find_first_item(const struct rte_flow_item *item, bool is_void) /* Skip all VOID items of the pattern */ static void ice_pattern_skip_void_item(struct rte_flow_item *items, - const struct rte_flow_item *pattern) + const struct rte_flow_item *pattern) { uint32_t cpy_count = 0; const struct rte_flow_item *pb = pattern, *pe = pattern; @@ -124,7 +124,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items, items += cpy_count; if (pe->type == RTE_FLOW_ITEM_TYPE_END) { - pb = pe; break; } @@ -560,10 +559,10 @@ static int ice_flow_valid_action(struct rte_eth_dev *dev, static int ice_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { uint64_t inset = 0; int ret = ICE_ERR_NOT_SUPPORTED; @@ -609,10 +608,10 @@ ice_flow_validate(struct rte_eth_dev *dev, static struct rte_flow * ice_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_flow *flow = NULL; @@ -647,8 +646,8 @@ ice_flow_create(struct rte_eth_dev *dev, static int ice_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow *flow, + struct rte_flow_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); int ret = 0; @@ -669,7 +668,7 @@ ice_flow_destroy(struct rte_eth_dev *dev, static int ice_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_flow *p_flow; From patchwork Mon Sep 16 23:06:12 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ying Wang X-Patchwork-Id: 59301 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D24B01BFC0; Tue, 17 Sep 2019 09:28:56 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id D88F91BFB8 for ; Tue, 17 Sep 2019 09:28:51 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Sep 2019 00:28:51 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,515,1559545200"; d="scan'208";a="187384643" Received: from unknown (HELO npg-dpdk-cvl-yingwang-117d84.sh.intel.com) ([10.67.117.84]) by fmsmga007.fm.intel.com with ESMTP; 17 Sep 2019 00:28:50 -0700 From: Ying Wang To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, qiming.yang@intel.com, wei.zhao1@intel.com, ying.a.wang@intel.com Date: Tue, 17 Sep 2019 07:06:12 +0800 Message-Id: <20190916230615.411726-3-ying.a.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20190916230615.411726-1-ying.a.wang@intel.com> References: <20190903221522.151382-2-ying.a.wang@intel.com> <20190916230615.411726-1-ying.a.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 2/5] net/ice: add devargs to control pipeline mode X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Qiming Yang Added a devarg to control the mode in generic flow API. We use none-pipeline mode by default. Signed-off-by: Qiming Yang --- doc/guides/nics/ice.rst | 19 +++++++++++++++++++ doc/guides/rel_notes/release_19_11.rst | 2 ++ drivers/net/ice/ice_ethdev.c | 11 ++++++++++- drivers/net/ice/ice_ethdev.h | 1 + 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst index 59a222e28..37d9054ad 100644 --- a/doc/guides/nics/ice.rst +++ b/doc/guides/nics/ice.rst @@ -61,6 +61,25 @@ Runtime Config Options NOTE: In Safe mode, only very limited features are available, features like RSS, checksum, fdir, tunneling ... are all disabled. +- ``Generic Flow Pipeline Mode Support`` (default ``0``) + + In pipeline mode, a flow can be set at one specific stage by setting parameter + ``priority``. Currently, we support two stages: priority = 0 or !0. Flows with + priority 0 located at the first pipeline stage which typically be used as a firewall + to drop the packet on a blacklist(we called it permission stage). At this stage, + flow rules are created for the device's exact match engine: switch. Flows with priority + !0 located at the second stage, typically packets are classified here and be steered to + specific queue or queue group (we called it distribution stage), At this stage, flow + rules are created for device's flow director engine. + For none-pipeline mode, ``priority`` is ignored, a flow rule can be created as a flow director + rule or a switch rule depends on its pattern/action and the resource allocation situation, + all flows are virtually at the same pipeline stage. + By default, generic flow API is enabled in none-pipeline mode, user can choose to + use pipeline mode by setting ``devargs`` parameter ``pipeline-mode-support``, + for example:: + + -w 80:00.0,pipleline-mode-support=1 + Driver compilation and testing ------------------------------ diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index 91c31a611..7dc4f004e 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -61,6 +61,8 @@ New Features Updated the Intel ice driver with new features and improvements, including: * Supported device-specific DDP package loading. + * Generic filter enhancement + - Supported pipeline mode. Removed Items ------------- diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index e60cc1943..740c30b1b 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -19,9 +19,11 @@ /* devargs */ #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" +#define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" static const char * const ice_valid_args[] = { ICE_SAFE_MODE_SUPPORT_ARG, + ICE_PIPELINE_MODE_SUPPORT_ARG, NULL }; @@ -1510,7 +1512,13 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG, &parse_bool, &ad->devargs.safe_mode_support); + if (ret) + goto err_devargs; + + ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, + &parse_bool, &ad->devargs.pipeline_mode_support); +err_devargs: rte_kvargs_free(kvlist); return ret; } @@ -3925,7 +3933,8 @@ RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map); RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_ice, - ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"); + ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" + ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"); RTE_INIT(ice_init_log) { diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 926db23d2..9bf5de08d 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -285,6 +285,7 @@ struct ice_pf { */ struct ice_devargs { int safe_mode_support; + int pipeline_mode_support; }; /** From patchwork Mon Sep 16 23:06:13 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ying Wang X-Patchwork-Id: 59302 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0B30F1BFC5; Tue, 17 Sep 2019 09:29:01 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 68DDB1BFBD for ; Tue, 17 Sep 2019 09:28:54 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Sep 2019 00:28:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,515,1559545200"; d="scan'208";a="187384654" Received: from unknown (HELO npg-dpdk-cvl-yingwang-117d84.sh.intel.com) ([10.67.117.84]) by fmsmga007.fm.intel.com with ESMTP; 17 Sep 2019 00:28:52 -0700 From: Ying Wang To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, qiming.yang@intel.com, wei.zhao1@intel.com, ying.a.wang@intel.com Date: Tue, 17 Sep 2019 07:06:13 +0800 Message-Id: <20190916230615.411726-4-ying.a.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20190916230615.411726-1-ying.a.wang@intel.com> References: <20190903221522.151382-2-ying.a.wang@intel.com> <20190916230615.411726-1-ying.a.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 3/5] net/ice: rework for generic flow enabling X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch reworks the generic flow API (rte_flow) implementation. It introduces an abstract layer which provides a unified interface for low-level filter engine (switch, fdir, hash) to register supported patterns and actions and implement flow validate/create/destroy/flush/ query activities. The patch also removes the existing switch filter implementation to avoid compile error. Switch filter implementation for the new framework will be added in the following patch. Signed-off-by: Ying Wang --- drivers/net/ice/ice_ethdev.c | 21 +- drivers/net/ice/ice_ethdev.h | 15 +- drivers/net/ice/ice_generic_flow.c | 747 ++++++++++++++++------------------ drivers/net/ice/ice_generic_flow.h | 785 ++++++++---------------------------- drivers/net/ice/ice_switch_filter.c | 511 ----------------------- drivers/net/ice/ice_switch_filter.h | 18 - 6 files changed, 534 insertions(+), 1563 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 740c30b1b..bf6b5bf44 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -15,7 +15,7 @@ #include "base/ice_dcb.h" #include "ice_ethdev.h" #include "ice_rxtx.h" -#include "ice_switch_filter.h" +#include "ice_generic_flow.h" /* devargs */ #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" @@ -1672,7 +1672,11 @@ ice_dev_init(struct rte_eth_dev *dev) /* get base queue pairs index in the device */ ice_base_queue_get(pf); - TAILQ_INIT(&pf->flow_list); + ret = ice_flow_init(ad); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize flow"); + return ret; + } return 0; @@ -1791,6 +1795,8 @@ ice_dev_close(struct rte_eth_dev *dev) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); /* Since stop will make link down, then the link event will be * triggered, disable the irq firstly to avoid the port_infoe etc @@ -1801,6 +1807,8 @@ ice_dev_close(struct rte_eth_dev *dev) ice_dev_stop(dev); + ice_flow_uninit(ad); + /* release all queue resource */ ice_free_queues(dev); @@ -1817,8 +1825,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct rte_flow *p_flow; ice_dev_close(dev); @@ -1836,13 +1842,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) rte_intr_callback_unregister(intr_handle, ice_interrupt_handler, dev); - /* Remove all flows */ - while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { - TAILQ_REMOVE(&pf->flow_list, p_flow, node); - ice_free_switch_filter_rule(p_flow->rule); - rte_free(p_flow); - } - return 0; } diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 9bf5de08d..0e8c3502c 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -241,16 +241,12 @@ struct ice_vsi { bool offset_loaded; }; -extern const struct rte_flow_ops ice_flow_ops; - -/* Struct to store flow created. */ -struct rte_flow { - TAILQ_ENTRY(rte_flow) node; - void *rule; -}; - +struct rte_flow; TAILQ_HEAD(ice_flow_list, rte_flow); +struct ice_flow_parser_node; +TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -278,6 +274,9 @@ struct ice_pf { bool offset_loaded; bool adapter_stopped; struct ice_flow_list flow_list; + struct ice_parser_list rss_parser_list; + struct ice_parser_list perm_parser_list; + struct ice_parser_list dist_parser_list; }; /** diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 44dbccd3d..28a53cf66 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -17,7 +17,19 @@ #include "ice_ethdev.h" #include "ice_generic_flow.h" -#include "ice_switch_filter.h" + +/** + * Non-pipeline mode, fdir and switch both used as distributor, + * fdir used first, switch used as fdir's backup. + */ +#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 +/*Pipeline mode, switch used at permission stage*/ +#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 +/*Pipeline mode, fdir used at distributor stage*/ +#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2 + +static struct ice_engine_list engine_list = + TAILQ_HEAD_INITIALIZER(engine_list); static int ice_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -34,16 +46,176 @@ static int ice_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error); static int ice_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +static int ice_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); const struct rte_flow_ops ice_flow_ops = { .validate = ice_flow_validate, .create = ice_flow_create, .destroy = ice_flow_destroy, .flush = ice_flow_flush, + .query = ice_flow_query, }; + +void +ice_register_flow_engine(struct ice_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +int +ice_flow_init(struct ice_adapter *ad) +{ + int ret; + struct ice_pf *pf = &ad->pf; + void *temp; + struct ice_flow_engine *engine; + + TAILQ_INIT(&pf->flow_list); + TAILQ_INIT(&pf->rss_parser_list); + TAILQ_INIT(&pf->perm_parser_list); + TAILQ_INIT(&pf->dist_parser_list); + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->init == NULL) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(ad); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + return 0; +} + +void +ice_flow_uninit(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + struct ice_flow_engine *engine; + struct rte_flow *p_flow; + struct ice_flow_parser_node *p_parser; + void *temp; + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(ad); + } + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } + + /* Cleanup parser list */ + while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) { + TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node); + rte_free(p_parser); + } + + while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) { + TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node); + rte_free(p_parser); + } + + while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) { + TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); + rte_free(p_parser); + } +} + +static struct ice_parser_list * +ice_get_parser_list(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_pf *pf = &ad->pf; + + switch (parser->stage) { + case ICE_FLOW_STAGE_RSS: + list = &pf->rss_parser_list; + break; + case ICE_FLOW_STAGE_PERMISSION: + list = &pf->perm_parser_list; + break; + case ICE_FLOW_STAGE_DISTRIBUTOR: + list = &pf->dist_parser_list; + break; + default: + return NULL; + } + + return list; +} + +int +ice_register_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_flow_parser_node *parser_node; + + parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0); + if (parser_node == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory."); + return -ENOMEM; + } + parser_node->parser = parser; + + list = ice_get_parser_list(parser, ad); + if (list == NULL) + return -EINVAL; + + if (ad->devargs.pipeline_mode_support) { + TAILQ_INSERT_TAIL(list, parser_node, node); + } else { + if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH || + parser->engine->type == ICE_FLOW_ENGINE_HASH) + TAILQ_INSERT_TAIL(list, parser_node, node); + else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) + TAILQ_INSERT_HEAD(list, parser_node, node); + else + return -EINVAL; + } + return 0; +} + +void +ice_unregister_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_flow_parser_node *p_parser; + void *temp; + + list = ice_get_parser_list(parser, ad); + if (list == NULL) + return; + + TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { + if (p_parser->parser->engine->type == parser->engine->type) { + TAILQ_REMOVE(list, p_parser, node); + rte_free(p_parser); + } + } +} + static int -ice_flow_valid_attr(const struct rte_flow_attr *attr, +ice_flow_valid_attr(struct ice_adapter *ad, + const struct rte_flow_attr *attr, + int *ice_pipeline_stage, struct rte_flow_error *error) { /* Must be input direction */ @@ -62,12 +234,24 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr, return -rte_errno; } - /* Not supported */ - if (attr->priority) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "Not support priority."); - return -rte_errno; + /* Check pipeline mode support to set classification stage */ + if (ad->devargs.pipeline_mode_support) { + if (attr->priority == 0) + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_PERMISSION; + else + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR; + } else { + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY; + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } } /* Not supported */ @@ -150,11 +334,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array, item->type == RTE_FLOW_ITEM_TYPE_END); } -static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[], +struct ice_pattern_match_item * +ice_search_pattern_match_item(const struct rte_flow_item pattern[], + struct ice_pattern_match_item *array, + uint32_t array_len, struct rte_flow_error *error) { uint16_t i = 0; - uint64_t inset; + struct ice_pattern_match_item *pattern_match_item; + /* need free by each filter */ struct rte_flow_item *items; /* used for pattern without VOID items */ uint32_t item_num = 0; /* non-void item number */ @@ -171,401 +359,76 @@ static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[], if (!items) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, "No memory for PMD internal items."); - return -ENOMEM; + return NULL; + } + pattern_match_item = rte_zmalloc("ice_pattern_match_item", + sizeof(struct ice_pattern_match_item), 0); + if (!pattern_match_item) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate memory."); + return NULL; } ice_pattern_skip_void_item(items, pattern); - for (i = 0; i < RTE_DIM(ice_supported_patterns); i++) - if (ice_match_pattern(ice_supported_patterns[i].items, - items)) { - inset = ice_supported_patterns[i].sw_fields; + for (i = 0; i < array_len; i++) + if (ice_match_pattern(array[i].pattern_list, + items)) { + pattern_match_item->input_set_mask = + array[i].input_set_mask; + pattern_match_item->pattern_list = + array[i].pattern_list; + pattern_match_item->meta = array[i].meta; rte_free(items); - return inset; + return pattern_match_item; } rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, pattern, "Unsupported pattern"); rte_free(items); - return 0; -} - -static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], - struct rte_flow_error *error) -{ - const struct rte_flow_item *item = pattern; - const struct rte_flow_item_eth *eth_spec, *eth_mask; - const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; - const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; - const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; - const struct rte_flow_item_udp *udp_spec, *udp_mask; - const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; - const struct rte_flow_item_icmp *icmp_mask; - const struct rte_flow_item_icmp6 *icmp6_mask; - const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; - const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask; - enum rte_flow_item_type item_type; - uint8_t ipv6_addr_mask[16] = { - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - uint64_t input_set = ICE_INSET_NONE; - bool is_tunnel = false; - - for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->last) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Not support range"); - return 0; - } - item_type = item->type; - switch (item_type) { - case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = item->spec; - eth_mask = item->mask; - - if (eth_spec && eth_mask) { - if (rte_is_broadcast_ether_addr(ð_mask->src)) - input_set |= ICE_INSET_SMAC; - if (rte_is_broadcast_ether_addr(ð_mask->dst)) - input_set |= ICE_INSET_DMAC; - if (eth_mask->type == RTE_BE16(0xffff)) - input_set |= ICE_INSET_ETHERTYPE; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - ipv4_spec = item->spec; - ipv4_mask = item->mask; - - if (!(ipv4_spec && ipv4_mask)) - break; - - /* Check IPv4 mask and update input set */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.hdr_checksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv4 mask."); - return 0; - } - - if (is_tunnel) { - if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_SRC; - if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_DST; - if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_TTL; - if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_PROTO; - } else { - if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_SRC; - if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_DST; - if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TTL; - if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_IPV4_PROTO; - if (ipv4_mask->hdr.type_of_service == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TOS; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - ipv6_spec = item->spec; - ipv6_mask = item->mask; - - if (!(ipv6_spec && ipv6_mask)) - break; - - if (ipv6_mask->hdr.payload_len) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv6 mask"); - return 0; - } - - if (is_tunnel) { - if (!memcmp(ipv6_mask->hdr.src_addr, - ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_TUN_IPV6_SRC; - if (!memcmp(ipv6_mask->hdr.dst_addr, - ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_TUN_IPV6_DST; - if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_PROTO; - if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_TTL; - } else { - if (!memcmp(ipv6_mask->hdr.src_addr, - ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_IPV6_SRC; - if (!memcmp(ipv6_mask->hdr.dst_addr, - ipv6_addr_mask, - RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_IPV6_DST; - if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_IPV6_PROTO; - if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_IPV6_HOP_LIMIT; - if ((ipv6_mask->hdr.vtc_flow & - rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) - == rte_cpu_to_be_32 - (RTE_IPV6_HDR_TC_MASK)) - input_set |= ICE_INSET_IPV6_TOS; - } - - break; - case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = item->spec; - udp_mask = item->mask; - - if (!(udp_spec && udp_mask)) - break; - - /* Check UDP mask and update input set*/ - if (udp_mask->hdr.dgram_len || - udp_mask->hdr.dgram_cksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid UDP mask"); - return 0; - } - - if (is_tunnel) { - if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; - if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; - } else { - if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; - if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - } - - break; - case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = item->spec; - tcp_mask = item->mask; - - if (!(tcp_spec && tcp_mask)) - break; - - /* Check TCP mask and update input set */ - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || - tcp_mask->hdr.tcp_urp) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid TCP mask"); - return 0; - } - - if (is_tunnel) { - if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; - if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; - } else { - if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; - if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - } - - break; - case RTE_FLOW_ITEM_TYPE_SCTP: - sctp_spec = item->spec; - sctp_mask = item->mask; - - if (!(sctp_spec && sctp_mask)) - break; - - /* Check SCTP mask and update input set */ - if (sctp_mask->hdr.cksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid SCTP mask"); - return 0; - } - - if (is_tunnel) { - if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; - if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; - } else { - if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; - if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - } - - break; - case RTE_FLOW_ITEM_TYPE_ICMP: - icmp_mask = item->mask; - if (icmp_mask->hdr.icmp_code || - icmp_mask->hdr.icmp_cksum || - icmp_mask->hdr.icmp_ident || - icmp_mask->hdr.icmp_seq_nb) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid ICMP mask"); - return 0; - } - - if (icmp_mask->hdr.icmp_type == UINT8_MAX) - input_set |= ICE_INSET_ICMP; - break; - case RTE_FLOW_ITEM_TYPE_ICMP6: - icmp6_mask = item->mask; - if (icmp6_mask->code || - icmp6_mask->checksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid ICMP6 mask"); - return 0; - } - - if (icmp6_mask->type == UINT8_MAX) - input_set |= ICE_INSET_ICMP6; - break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = item->spec; - vxlan_mask = item->mask; - /* Check if VXLAN item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!vxlan_spec && vxlan_mask) || - (vxlan_spec && !vxlan_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VXLAN item"); - return 0; - } - if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX && - vxlan_mask->vni[1] == UINT8_MAX && - vxlan_mask->vni[2] == UINT8_MAX) - input_set |= ICE_INSET_TUN_ID; - is_tunnel = 1; - - break; - case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = item->spec; - nvgre_mask = item->mask; - /* Check if NVGRE item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!nvgre_spec && nvgre_mask) || - (nvgre_spec && !nvgre_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid NVGRE item"); - return 0; - } - if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX && - nvgre_mask->tni[1] == UINT8_MAX && - nvgre_mask->tni[2] == UINT8_MAX) - input_set |= ICE_INSET_TUN_ID; - is_tunnel = 1; - - break; - case RTE_FLOW_ITEM_TYPE_VOID: - break; - default: - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid pattern"); - break; - } - } - return input_set; + rte_free(pattern_match_item); + return NULL; } -static int ice_flow_valid_inset(const struct rte_flow_item pattern[], - uint64_t inset, struct rte_flow_error *error) +static struct ice_flow_engine * +ice_parse_engine(struct ice_adapter *ad, + struct ice_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) { - uint64_t fields; - - /* get valid field */ - fields = ice_get_flow_field(pattern, error); - if (!fields || fields & (~inset)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_SPEC, - pattern, - "Invalid input set"); - return -rte_errno; - } + struct ice_flow_engine *engine = NULL; + struct ice_flow_parser_node *parser_node; + void *temp; - return 0; -} + TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + if (parser_node->parser->parse_pattern_action(ad, + parser_node->parser->array, + parser_node->parser->array_len, + pattern, actions, meta, error) < 0) + continue; -static int ice_flow_valid_action(struct rte_eth_dev *dev, - const struct rte_flow_action *actions, - struct rte_flow_error *error) -{ - const struct rte_flow_action_queue *act_q; - uint16_t queue; - const struct rte_flow_action *action; - for (action = actions; action->type != - RTE_FLOW_ACTION_TYPE_END; action++) { - switch (action->type) { - case RTE_FLOW_ACTION_TYPE_QUEUE: - act_q = action->conf; - queue = act_q->index; - if (queue >= dev->data->nb_rx_queues) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, "Invalid queue ID for" - " switch filter."); - return -rte_errno; - } - break; - case RTE_FLOW_ACTION_TYPE_DROP: - case RTE_FLOW_ACTION_TYPE_VOID: - break; - default: - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Invalid action."); - return -rte_errno; - } + engine = parser_node->parser->engine; + break; } - return 0; + return engine; } static int -ice_flow_validate(struct rte_eth_dev *dev, +ice_flow_validate_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct ice_flow_engine **engine, + void **meta, struct rte_flow_error *error) { - uint64_t inset = 0; int ret = ICE_ERR_NOT_SUPPORTED; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ice_pipeline_stage = 0; if (!pattern) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, @@ -587,25 +450,49 @@ ice_flow_validate(struct rte_eth_dev *dev, return -rte_errno; } - ret = ice_flow_valid_attr(attr, error); + ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error); if (ret) return ret; - inset = ice_flow_valid_pattern(pattern, error); - if (!inset) - return -rte_errno; - - ret = ice_flow_valid_inset(pattern, inset, error); - if (ret) - return ret; + *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions, + meta, error); + if (*engine != NULL) + return 0; + + switch (ice_pipeline_stage) { + case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY: + case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR: + *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern, + actions, meta, error); + break; + case ICE_FLOW_CLASSIFY_STAGE_PERMISSION: + *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern, + actions, meta, error); + break; + default: + return -EINVAL; + } - ret = ice_flow_valid_action(dev, actions, error); - if (ret) - return ret; + if (*engine == NULL) + return -EINVAL; return 0; } +static int +ice_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + void *meta; + struct ice_flow_engine *engine; + + return ice_flow_validate_filter(dev, attr, pattern, actions, + &engine, &meta, error); +} + static struct rte_flow * ice_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -616,6 +503,10 @@ ice_flow_create(struct rte_eth_dev *dev, struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct rte_flow *flow = NULL; int ret; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_flow_engine *engine = NULL; + void *meta; flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -625,21 +516,28 @@ ice_flow_create(struct rte_eth_dev *dev, return flow; } - ret = ice_flow_validate(dev, attr, pattern, actions, error); + ret = ice_flow_validate_filter(dev, attr, pattern, actions, + &engine, &meta, error); if (ret < 0) goto free_flow; - ret = ice_create_switch_filter(pf, pattern, actions, flow, error); + if (engine->create == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid engine"); + goto free_flow; + } + + ret = engine->create(ad, flow, meta, error); if (ret) goto free_flow; + flow->engine = engine; TAILQ_INSERT_TAIL(&pf->flow_list, flow, node); return flow; free_flow: - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to create flow."); + PMD_DRV_LOG(ERR, "Failed to create flow"); rte_free(flow); return NULL; } @@ -650,17 +548,24 @@ ice_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); int ret = 0; - ret = ice_destroy_switch_filter(pf, flow, error); + if (!flow || !flow->engine || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + ret = flow->engine->destroy(ad, flow, error); if (!ret) { TAILQ_REMOVE(&pf->flow_list, flow, node); rte_free(flow); } else { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to destroy flow."); + PMD_DRV_LOG(ERR, "Failed to destroy flow"); } return ret; @@ -678,12 +583,46 @@ ice_flow_flush(struct rte_eth_dev *dev, TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { ret = ice_flow_destroy(dev, p_flow, error); if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to flush SW flows."); - return -rte_errno; + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; } } return ret; } + +static int +ice_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = -EINVAL; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_flow_query_count *count = data; + + if (!flow || !flow->engine || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(ad, flow, count, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + return ret; +} diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index 1953905f7..9f6d8f2b7 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -7,615 +7,178 @@ #include -struct ice_flow_pattern { - enum rte_flow_item_type *items; - uint64_t sw_fields; -}; - -#define ICE_INSET_NONE 0x00000000000000000ULL - -/* bit0 ~ bit 7 */ -#define ICE_INSET_SMAC 0x0000000000000001ULL -#define ICE_INSET_DMAC 0x0000000000000002ULL -#define ICE_INSET_ETHERTYPE 0x0000000000000020ULL - -/* bit 8 ~ bit 15 */ -#define ICE_INSET_IPV4_SRC 0x0000000000000100ULL -#define ICE_INSET_IPV4_DST 0x0000000000000200ULL -#define ICE_INSET_IPV6_SRC 0x0000000000000400ULL -#define ICE_INSET_IPV6_DST 0x0000000000000800ULL -#define ICE_INSET_SRC_PORT 0x0000000000001000ULL -#define ICE_INSET_DST_PORT 0x0000000000002000ULL -#define ICE_INSET_ARP 0x0000000000004000ULL - -/* bit 16 ~ bit 31 */ -#define ICE_INSET_IPV4_TOS 0x0000000000010000ULL -#define ICE_INSET_IPV4_PROTO 0x0000000000020000ULL -#define ICE_INSET_IPV4_TTL 0x0000000000040000ULL -#define ICE_INSET_IPV6_TOS 0x0000000000100000ULL -#define ICE_INSET_IPV6_PROTO 0x0000000000200000ULL -#define ICE_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL -#define ICE_INSET_ICMP 0x0000000001000000ULL -#define ICE_INSET_ICMP6 0x0000000002000000ULL - -/* bit 32 ~ bit 47, tunnel fields */ -#define ICE_INSET_TUN_SMAC 0x0000000100000000ULL -#define ICE_INSET_TUN_DMAC 0x0000000200000000ULL -#define ICE_INSET_TUN_IPV4_SRC 0x0000000400000000ULL -#define ICE_INSET_TUN_IPV4_DST 0x0000000800000000ULL -#define ICE_INSET_TUN_IPV4_TTL 0x0000001000000000ULL -#define ICE_INSET_TUN_IPV4_PROTO 0x0000002000000000ULL -#define ICE_INSET_TUN_IPV6_SRC 0x0000004000000000ULL -#define ICE_INSET_TUN_IPV6_DST 0x0000008000000000ULL -#define ICE_INSET_TUN_IPV6_TTL 0x0000010000000000ULL -#define ICE_INSET_TUN_IPV6_PROTO 0x0000020000000000ULL -#define ICE_INSET_TUN_SRC_PORT 0x0000040000000000ULL -#define ICE_INSET_TUN_DST_PORT 0x0000080000000000ULL -#define ICE_INSET_TUN_ID 0x0000100000000000ULL - -/* bit 48 ~ bit 55 */ -#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL - -#define ICE_FLAG_VLAN_INNER 0x00000001ULL -#define ICE_FLAG_VLAN_OUTER 0x00000002ULL - -#define INSET_ETHER ( \ - ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) -#define INSET_MAC_IPV4 ( \ - ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ - ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) -#define INSET_MAC_IPV4_L4 ( \ - ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ - ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \ - ICE_INSET_SRC_PORT) -#define INSET_MAC_IPV4_ICMP ( \ - ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ - ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_ICMP) -#define INSET_MAC_IPV6 ( \ - ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ - ICE_INSET_IPV6_TOS | ICE_INSET_IPV6_HOP_LIMIT) -#define INSET_MAC_IPV6_L4 ( \ - ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ - ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | \ - ICE_INSET_DST_PORT | ICE_INSET_SRC_PORT) -#define INSET_MAC_IPV6_ICMP ( \ - ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ - ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | ICE_INSET_ICMP6) -#define INSET_TUNNEL_IPV4_TYPE1 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \ - ICE_INSET_TUN_ID) -#define INSET_TUNNEL_IPV4_TYPE2 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \ - ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \ - ICE_INSET_TUN_ID) -#define INSET_TUNNEL_IPV4_TYPE3 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP | \ - ICE_INSET_TUN_ID) -#define INSET_TUNNEL_IPV6_TYPE1 ( \ - ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \ - ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \ - ICE_INSET_TUN_ID) -#define INSET_TUNNEL_IPV6_TYPE2 ( \ - ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \ - ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \ - ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \ - ICE_INSET_TUN_ID) -#define INSET_TUNNEL_IPV6_TYPE3 ( \ - ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \ - ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6 | \ - ICE_INSET_TUN_ID) - -/* L2 */ -static enum rte_flow_item_type pattern_ethertype[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* non-tunnel IPv4 */ -static enum rte_flow_item_type pattern_ipv4[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* non-tunnel IPv6 */ -static enum rte_flow_item_type pattern_ipv6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv6_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv6_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv6_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv6_icmp6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_ICMP6, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 VXLAN IPv4 */ -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 VXLAN MAC IPv4 */ -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 VXLAN IPv6 */ -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 VXLAN MAC IPv6 */ -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 NVGRE IPv4 */ -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 NVGRE MAC IPv4 */ -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_ICMP, - RTE_FLOW_ITEM_TYPE_END, -}; - -/* IPv4 NVGRE IPv6 */ -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - - -/* IPv4 NVGRE MAC IPv6 */ -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = { - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_SCTP, - RTE_FLOW_ITEM_TYPE_END, -}; - -static struct ice_flow_pattern ice_supported_patterns[] = { - {pattern_ethertype, INSET_ETHER}, - {pattern_ipv4, INSET_MAC_IPV4}, - {pattern_ipv4_udp, INSET_MAC_IPV4_L4}, - {pattern_ipv4_sctp, INSET_MAC_IPV4_L4}, - {pattern_ipv4_tcp, INSET_MAC_IPV4_L4}, - {pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP}, - {pattern_ipv6, INSET_MAC_IPV6}, - {pattern_ipv6_udp, INSET_MAC_IPV6_L4}, - {pattern_ipv6_sctp, INSET_MAC_IPV6_L4}, - {pattern_ipv6_tcp, INSET_MAC_IPV6_L4}, - {pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP}, - {pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1}, - {pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3}, - {pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1}, - {pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3}, - {pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1}, - {pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3}, - {pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1}, - {pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3}, - {pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1}, - {pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3}, - {pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1}, - {pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3}, - {pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1}, - {pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1}, - {pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2}, - {pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2}, -}; - +#define ICE_INSET_NONE 0ULL + +/* bit0 ~ bit 11 */ +#define ICE_INSET_SMAC (1ULL << 0) +#define ICE_INSET_DMAC (1ULL << 1) +#define ICE_INSET_VLAN_INNER (1ULL << 2) +#define ICE_INSET_VLAN_OUTER (1ULL << 3) +#define ICE_INSET_ETHERTYPE (1ULL << 4) +#define ICE_INSET_ARP_SHA (1ULL << 5) +#define ICE_INSET_ARP_SPA (1ULL << 6) +#define ICE_INSET_ARP_THA (1ULL << 7) +#define ICE_INSET_ARP_TPA (1ULL << 8) +#define ICE_INSET_ARP_OP (1ULL << 9) + +/* bit 12 ~ bit 23 */ +#define ICE_INSET_IPV4_SRC (1ULL << 12) +#define ICE_INSET_IPV4_DST (1ULL << 13) +#define ICE_INSET_IPV4_TOS (1ULL << 14) +#define ICE_INSET_IPV4_PROTO (1ULL << 15) +#define ICE_INSET_IPV4_TTL (1ULL << 16) +#define ICE_INSET_IPV6_SRC (1ULL << 17) +#define ICE_INSET_IPV6_DST (1ULL << 18) +#define ICE_INSET_IPV6_NEXT_HDR (1ULL << 19) +#define ICE_INSET_IPV6_HOP_LIMIT (1ULL << 20) +#define ICE_INSET_IPV6_TC (1ULL << 21) +#define ICE_INSET_TCP_FLAGS (1ULL << 22) + +/* bit 24 ~ bit 35 */ +#define ICE_INSET_ICMP_TYPE (1ULL << 24) +#define ICE_INSET_ICMP_CODE (1ULL << 25) +#define ICE_INSET_ICMP6_TYPE (1ULL << 26) +#define ICE_INSET_ICMP6_CODE (1ULL << 27) +#define ICE_INSET_TCP_SRC_PORT (1ULL << 28) +#define ICE_INSET_TCP_DST_PORT (1ULL << 29) +#define ICE_INSET_UDP_SRC_PORT (1ULL << 30) +#define ICE_INSET_UDP_DST_PORT (1ULL << 31) +#define ICE_INSET_SCTP_SRC_PORT (1ULL << 32) +#define ICE_INSET_SCTP_DST_PORT (1ULL << 33) +#define ICE_INSET_ICMP_SRC_PORT (1ULL << 34) +#define ICE_INSET_ICMP_DST_PORT (1ULL << 35) + +/* bit 36 ~ bit 59, tunnel fields */ +#define ICE_INSET_TUN_SMAC (1ULL << 36) +#define ICE_INSET_TUN_DMAC (1ULL << 37) +#define ICE_INSET_TUN_IPV4_SRC (1ULL << 38) +#define ICE_INSET_TUN_IPV4_DST (1ULL << 39) +#define ICE_INSET_TUN_IPV4_TTL (1ULL << 40) +#define ICE_INSET_TUN_IPV4_PROTO (1ULL << 41) +#define ICE_INSET_TUN_IPV4_TOS (1ULL << 42) +#define ICE_INSET_TUN_IPV6_SRC (1ULL << 43) +#define ICE_INSET_TUN_IPV6_DST (1ULL << 44) +#define ICE_INSET_TUN_IPV6_HOP_LIMIT (1ULL << 45) +#define ICE_INSET_TUN_IPV6_NEXT_HDR (1ULL << 46) +#define ICE_INSET_TUN_IPV6_TC (1ULL << 47) +#define ICE_INSET_TUN_SRC_PORT (1ULL << 48) +#define ICE_INSET_TUN_DST_PORT (1ULL << 49) +#define ICE_INSET_TUN_ICMP_TYPE (1ULL << 50) +#define ICE_INSET_TUN_ICMP_CODE (1ULL << 51) +#define ICE_INSET_TUN_ICMP6_TYPE (1ULL << 52) +#define ICE_INSET_TUN_ICMP6_CODE (1ULL << 53) +#define ICE_INSET_TUN_ID (1ULL << 54) +#define ICE_INSET_TUN_TYPE (1ULL << 55) +#define ICE_INSET_GTPU_TEID (1ULL << 56) +#define ICE_INSET_GTPU_QFI (1ULL << 57) +#define ICE_INSET_GTP_EH_PDU (1ULL << 58) +#define ICE_INSET_TUN_TCP_FLAGS (1ULL << 59) + +/* bit 60 ~ bit 63 */ +#define ICE_INSET_LAST_ETHER_TYPE (1ULL << 60) + + +struct ice_adapter; + +extern const struct rte_flow_ops ice_flow_ops; + +/* engine types. */ +enum ice_flow_engine_type { + ICE_FLOW_ENGINE_NONE = 0, + ICE_FLOW_ENGINE_FDIR, + ICE_FLOW_ENGINE_SWITCH, + ICE_FLOW_ENGINE_HASH, + ICE_FLOW_ENGINE_ACL, + ICE_FLOW_ENGINE_MAX, +}; + +/** + * classification stages. + * for non-pipeline mode, we have two classification stages: Distributor/RSS + * for pipeline-mode we have three classification stages: + * Permission/Distributor/RSS + */ +enum ice_flow_classification_stage { + ICE_FLOW_STAGE_NONE = 0, + ICE_FLOW_STAGE_RSS, + ICE_FLOW_STAGE_PERMISSION, + ICE_FLOW_STAGE_DISTRIBUTOR, + ICE_FLOW_STAGE_MAX, +}; +/* pattern structure */ +struct ice_pattern_match_item { + enum rte_flow_item_type *pattern_list; + /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */ + uint64_t input_set_mask; + uint64_t meta; +}; + +typedef int (*engine_init_t)(struct ice_adapter *ad); +typedef void (*engine_uninit_t)(struct ice_adapter *ad); +typedef int (*engine_create_t)(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*parse_pattern_action_t)(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error); + +/* Struct to store engine created. */ +struct ice_flow_engine { + TAILQ_ENTRY(ice_flow_engine) node; + engine_init_t init; + engine_uninit_t uninit; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_free_t free; + enum ice_flow_engine_type type; +}; +TAILQ_HEAD(ice_engine_list, ice_flow_engine); + +/* Struct to store flow created. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + struct ice_flow_engine *engine; + void *rule; +}; + +struct ice_flow_parser { + struct ice_flow_engine *engine; + struct ice_pattern_match_item *array; + uint32_t array_len; + parse_pattern_action_t parse_pattern_action; + enum ice_flow_classification_stage stage; +}; + +/* Struct to store parser created. */ +struct ice_flow_parser_node { + TAILQ_ENTRY(ice_flow_parser_node) node; + struct ice_flow_parser *parser; +}; + +void ice_register_flow_engine(struct ice_flow_engine *engine); +int ice_flow_init(struct ice_adapter *ad); +void ice_flow_uninit(struct ice_adapter *ad); +int ice_register_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad); +void ice_unregister_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad); +struct ice_pattern_match_item * +ice_search_pattern_match_item(const struct rte_flow_item pattern[], + struct ice_pattern_match_item *array, + uint32_t array_len, + struct rte_flow_error *error); #endif diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index b88b4f59a..6b72bf252 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -2,515 +2,4 @@ * Copyright(c) 2019 Intel Corporation */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ice_logs.h" -#include "base/ice_type.h" -#include "ice_switch_filter.h" - -static int -ice_parse_switch_filter(const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct ice_adv_lkup_elem *list, - uint16_t *lkups_num, - enum ice_sw_tunnel_type tun_type) -{ - const struct rte_flow_item *item = pattern; - enum rte_flow_item_type item_type; - const struct rte_flow_item_eth *eth_spec, *eth_mask; - const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; - const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; - const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; - const struct rte_flow_item_udp *udp_spec, *udp_mask; - const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; - const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask; - const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; - uint16_t j, t = 0; - uint16_t tunnel_valid = 0; - - for (item = pattern; item->type != - RTE_FLOW_ITEM_TYPE_END; item++) { - item_type = item->type; - - switch (item_type) { - case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = item->spec; - eth_mask = item->mask; - if (eth_spec && eth_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_MAC_OFOS : ICE_MAC_IL; - struct ice_ether_hdr *h; - struct ice_ether_hdr *m; - uint16_t i = 0; - h = &list[t].h_u.eth_hdr; - m = &list[t].m_u.eth_hdr; - for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { - if (eth_mask->src.addr_bytes[j] == - UINT8_MAX) { - h->src_addr[j] = - eth_spec->src.addr_bytes[j]; - m->src_addr[j] = - eth_mask->src.addr_bytes[j]; - i = 1; - } - if (eth_mask->dst.addr_bytes[j] == - UINT8_MAX) { - h->dst_addr[j] = - eth_spec->dst.addr_bytes[j]; - m->dst_addr[j] = - eth_mask->dst.addr_bytes[j]; - i = 1; - } - } - if (i) - t++; - if (eth_mask->type == UINT16_MAX) { - list[t].type = ICE_ETYPE_OL; - list[t].h_u.ethertype.ethtype_id = - eth_spec->type; - list[t].m_u.ethertype.ethtype_id = - UINT16_MAX; - t++; - } - } else if (!eth_spec && !eth_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_MAC_OFOS : ICE_MAC_IL; - } - break; - - case RTE_FLOW_ITEM_TYPE_IPV4: - ipv4_spec = item->spec; - ipv4_mask = item->mask; - if (ipv4_spec && ipv4_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_IPV4_OFOS : ICE_IPV4_IL; - if (ipv4_mask->hdr.src_addr == UINT32_MAX) { - list[t].h_u.ipv4_hdr.src_addr = - ipv4_spec->hdr.src_addr; - list[t].m_u.ipv4_hdr.src_addr = - UINT32_MAX; - } - if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { - list[t].h_u.ipv4_hdr.dst_addr = - ipv4_spec->hdr.dst_addr; - list[t].m_u.ipv4_hdr.dst_addr = - UINT32_MAX; - } - if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { - list[t].h_u.ipv4_hdr.time_to_live = - ipv4_spec->hdr.time_to_live; - list[t].m_u.ipv4_hdr.time_to_live = - UINT8_MAX; - } - if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { - list[t].h_u.ipv4_hdr.protocol = - ipv4_spec->hdr.next_proto_id; - list[t].m_u.ipv4_hdr.protocol = - UINT8_MAX; - } - if (ipv4_mask->hdr.type_of_service == - UINT8_MAX) { - list[t].h_u.ipv4_hdr.tos = - ipv4_spec->hdr.type_of_service; - list[t].m_u.ipv4_hdr.tos = UINT8_MAX; - } - t++; - } else if (!ipv4_spec && !ipv4_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_IPV4_OFOS : ICE_IPV4_IL; - } - break; - - case RTE_FLOW_ITEM_TYPE_IPV6: - ipv6_spec = item->spec; - ipv6_mask = item->mask; - if (ipv6_spec && ipv6_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_IPV6_OFOS : ICE_IPV6_IL; - struct ice_ipv6_hdr *f; - struct ice_ipv6_hdr *s; - f = &list[t].h_u.ipv6_hdr; - s = &list[t].m_u.ipv6_hdr; - for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { - if (ipv6_mask->hdr.src_addr[j] == - UINT8_MAX) { - f->src_addr[j] = - ipv6_spec->hdr.src_addr[j]; - s->src_addr[j] = - ipv6_mask->hdr.src_addr[j]; - } - if (ipv6_mask->hdr.dst_addr[j] == - UINT8_MAX) { - f->dst_addr[j] = - ipv6_spec->hdr.dst_addr[j]; - s->dst_addr[j] = - ipv6_mask->hdr.dst_addr[j]; - } - } - if (ipv6_mask->hdr.proto == UINT8_MAX) { - f->next_hdr = - ipv6_spec->hdr.proto; - s->next_hdr = UINT8_MAX; - } - if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { - f->hop_limit = - ipv6_spec->hdr.hop_limits; - s->hop_limit = UINT8_MAX; - } - t++; - } else if (!ipv6_spec && !ipv6_mask) { - list[t].type = (tun_type == ICE_NON_TUN) ? - ICE_IPV4_OFOS : ICE_IPV4_IL; - } - break; - - case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = item->spec; - udp_mask = item->mask; - if (udp_spec && udp_mask) { - if (tun_type == ICE_SW_TUN_VXLAN && - tunnel_valid == 0) - list[t].type = ICE_UDP_OF; - else - list[t].type = ICE_UDP_ILOS; - if (udp_mask->hdr.src_port == UINT16_MAX) { - list[t].h_u.l4_hdr.src_port = - udp_spec->hdr.src_port; - list[t].m_u.l4_hdr.src_port = - udp_mask->hdr.src_port; - } - if (udp_mask->hdr.dst_port == UINT16_MAX) { - list[t].h_u.l4_hdr.dst_port = - udp_spec->hdr.dst_port; - list[t].m_u.l4_hdr.dst_port = - udp_mask->hdr.dst_port; - } - t++; - } else if (!udp_spec && !udp_mask) { - list[t].type = ICE_UDP_ILOS; - } - break; - - case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = item->spec; - tcp_mask = item->mask; - if (tcp_spec && tcp_mask) { - list[t].type = ICE_TCP_IL; - if (tcp_mask->hdr.src_port == UINT16_MAX) { - list[t].h_u.l4_hdr.src_port = - tcp_spec->hdr.src_port; - list[t].m_u.l4_hdr.src_port = - tcp_mask->hdr.src_port; - } - if (tcp_mask->hdr.dst_port == UINT16_MAX) { - list[t].h_u.l4_hdr.dst_port = - tcp_spec->hdr.dst_port; - list[t].m_u.l4_hdr.dst_port = - tcp_mask->hdr.dst_port; - } - t++; - } else if (!tcp_spec && !tcp_mask) { - list[t].type = ICE_TCP_IL; - } - break; - - case RTE_FLOW_ITEM_TYPE_SCTP: - sctp_spec = item->spec; - sctp_mask = item->mask; - if (sctp_spec && sctp_mask) { - list[t].type = ICE_SCTP_IL; - if (sctp_mask->hdr.src_port == UINT16_MAX) { - list[t].h_u.sctp_hdr.src_port = - sctp_spec->hdr.src_port; - list[t].m_u.sctp_hdr.src_port = - sctp_mask->hdr.src_port; - } - if (sctp_mask->hdr.dst_port == UINT16_MAX) { - list[t].h_u.sctp_hdr.dst_port = - sctp_spec->hdr.dst_port; - list[t].m_u.sctp_hdr.dst_port = - sctp_mask->hdr.dst_port; - } - t++; - } else if (!sctp_spec && !sctp_mask) { - list[t].type = ICE_SCTP_IL; - } - break; - - case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = item->spec; - vxlan_mask = item->mask; - tunnel_valid = 1; - if (vxlan_spec && vxlan_mask) { - list[t].type = ICE_VXLAN; - if (vxlan_mask->vni[0] == UINT8_MAX && - vxlan_mask->vni[1] == UINT8_MAX && - vxlan_mask->vni[2] == UINT8_MAX) { - list[t].h_u.tnl_hdr.vni = - (vxlan_spec->vni[2] << 16) | - (vxlan_spec->vni[1] << 8) | - vxlan_spec->vni[0]; - list[t].m_u.tnl_hdr.vni = - UINT32_MAX; - } - t++; - } else if (!vxlan_spec && !vxlan_mask) { - list[t].type = ICE_VXLAN; - } - break; - - case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = item->spec; - nvgre_mask = item->mask; - tunnel_valid = 1; - if (nvgre_spec && nvgre_mask) { - list[t].type = ICE_NVGRE; - if (nvgre_mask->tni[0] == UINT8_MAX && - nvgre_mask->tni[1] == UINT8_MAX && - nvgre_mask->tni[2] == UINT8_MAX) { - list[t].h_u.nvgre_hdr.tni_flow = - (nvgre_spec->tni[2] << 16) | - (nvgre_spec->tni[1] << 8) | - nvgre_spec->tni[0]; - list[t].m_u.nvgre_hdr.tni_flow = - UINT32_MAX; - } - t++; - } else if (!nvgre_spec && !nvgre_mask) { - list[t].type = ICE_NVGRE; - } - break; - - case RTE_FLOW_ITEM_TYPE_VOID: - case RTE_FLOW_ITEM_TYPE_END: - break; - - default: - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, actions, - "Invalid pattern item."); - goto out; - } - } - - *lkups_num = t; - - return 0; -out: - return -rte_errno; -} - -/* By now ice switch filter action code implement only - * supports QUEUE or DROP. - */ -static int -ice_parse_switch_action(struct ice_pf *pf, - const struct rte_flow_action *actions, - struct rte_flow_error *error, - struct ice_adv_rule_info *rule_info) -{ - struct ice_vsi *vsi = pf->main_vsi; - const struct rte_flow_action_queue *act_q; - uint16_t base_queue; - const struct rte_flow_action *action; - enum rte_flow_action_type action_type; - - base_queue = pf->base_queue; - for (action = actions; action->type != - RTE_FLOW_ACTION_TYPE_END; action++) { - action_type = action->type; - switch (action_type) { - case RTE_FLOW_ACTION_TYPE_QUEUE: - act_q = action->conf; - rule_info->sw_act.fltr_act = - ICE_FWD_TO_Q; - rule_info->sw_act.fwd_id.q_id = - base_queue + act_q->index; - break; - - case RTE_FLOW_ACTION_TYPE_DROP: - rule_info->sw_act.fltr_act = - ICE_DROP_PACKET; - break; - - case RTE_FLOW_ACTION_TYPE_VOID: - break; - - default: - rte_flow_error_set(error, - EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - actions, - "Invalid action type"); - return -rte_errno; - } - } - - rule_info->sw_act.vsi_handle = vsi->idx; - rule_info->rx = 1; - rule_info->sw_act.src = vsi->idx; - rule_info->priority = 5; - - return 0; -} - -static int -ice_switch_rule_set(struct ice_pf *pf, - struct ice_adv_lkup_elem *list, - uint16_t lkups_cnt, - struct ice_adv_rule_info *rule_info, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - struct ice_hw *hw = ICE_PF_TO_HW(pf); - int ret; - struct ice_rule_query_data rule_added = {0}; - struct ice_rule_query_data *filter_ptr; - - if (lkups_cnt > ICE_MAX_CHAIN_WORDS) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "item number too large for rule"); - return -rte_errno; - } - if (!list) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "lookup list should not be NULL"); - return -rte_errno; - } - - ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added); - - if (!ret) { - filter_ptr = rte_zmalloc("ice_switch_filter", - sizeof(struct ice_rule_query_data), 0); - if (!filter_ptr) { - PMD_DRV_LOG(ERR, "failed to allocate memory"); - return -EINVAL; - } - flow->rule = filter_ptr; - rte_memcpy(filter_ptr, - &rule_added, - sizeof(struct ice_rule_query_data)); - } - - return ret; -} - -int -ice_create_switch_filter(struct ice_pf *pf, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow *flow, - struct rte_flow_error *error) -{ - int ret = 0; - struct ice_adv_rule_info rule_info = {0}; - struct ice_adv_lkup_elem *list = NULL; - uint16_t lkups_num = 0; - const struct rte_flow_item *item = pattern; - uint16_t item_num = 0; - enum ice_sw_tunnel_type tun_type = ICE_NON_TUN; - - for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - item_num++; - if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) - tun_type = ICE_SW_TUN_VXLAN; - if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) - tun_type = ICE_SW_TUN_NVGRE; - /* reserve one more memory slot for ETH which may - * consume 2 lookup items. - */ - if (item->type == RTE_FLOW_ITEM_TYPE_ETH) - item_num++; - } - rule_info.tun_type = tun_type; - - list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); - if (!list) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "No memory for PMD internal items"); - return -rte_errno; - } - - ret = ice_parse_switch_filter(pattern, actions, error, - list, &lkups_num, tun_type); - if (ret) - goto error; - - ret = ice_parse_switch_action(pf, actions, error, &rule_info); - if (ret) - goto error; - - ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error); - if (ret) - goto error; - - rte_free(list); - return 0; - -error: - rte_free(list); - - return -rte_errno; -} - -int -ice_destroy_switch_filter(struct ice_pf *pf, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - struct ice_hw *hw = ICE_PF_TO_HW(pf); - int ret; - struct ice_rule_query_data *filter_ptr; - - filter_ptr = (struct ice_rule_query_data *) - flow->rule; - - if (!filter_ptr) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "no such flow" - " create by switch filter"); - return -rte_errno; - } - - ret = ice_rem_adv_rule_by_id(hw, filter_ptr); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "fail to destroy switch filter rule"); - return -rte_errno; - } - - rte_free(filter_ptr); - return ret; -} - -void -ice_free_switch_filter_rule(void *rule) -{ - struct ice_rule_query_data *filter_ptr; - - filter_ptr = (struct ice_rule_query_data *)rule; - - rte_free(filter_ptr); -} diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h index cea47990e..5afcddeaf 100644 --- a/drivers/net/ice/ice_switch_filter.h +++ b/drivers/net/ice/ice_switch_filter.h @@ -2,23 +2,5 @@ * Copyright(c) 2019 Intel Corporation */ -#ifndef _ICE_SWITCH_FILTER_H_ -#define _ICE_SWITCH_FILTER_H_ -#include "base/ice_switch.h" -#include "base/ice_type.h" -#include "ice_ethdev.h" -int -ice_create_switch_filter(struct ice_pf *pf, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow *flow, - struct rte_flow_error *error); -int -ice_destroy_switch_filter(struct ice_pf *pf, - struct rte_flow *flow, - struct rte_flow_error *error); -void -ice_free_switch_filter_rule(void *rule); -#endif /* _ICE_SWITCH_FILTER_H_ */ From patchwork Mon Sep 16 23:06:14 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ying Wang X-Patchwork-Id: 59303 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 75C1A1BFCD; Tue, 17 Sep 2019 09:29:03 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id E66821BFC2 for ; Tue, 17 Sep 2019 09:28:57 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Sep 2019 00:28:57 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,515,1559545200"; d="scan'208";a="187384660" Received: from unknown (HELO npg-dpdk-cvl-yingwang-117d84.sh.intel.com) ([10.67.117.84]) by fmsmga007.fm.intel.com with ESMTP; 17 Sep 2019 00:28:55 -0700 From: Ying Wang To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, qiming.yang@intel.com, wei.zhao1@intel.com, ying.a.wang@intel.com Date: Tue, 17 Sep 2019 07:06:14 +0800 Message-Id: <20190916230615.411726-5-ying.a.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20190916230615.411726-1-ying.a.wang@intel.com> References: <20190903221522.151382-2-ying.a.wang@intel.com> <20190916230615.411726-1-ying.a.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 4/5] net/ice: add pattern manifest X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch adds a manifest for all patterns which can be selected by low level filter engine to compose their supported list. Signed-off-by: Ying Wang --- drivers/net/ice/ice_generic_flow.c | 1287 ++++++++++++++++++++++++++++++++++++ drivers/net/ice/ice_generic_flow.h | 200 ++++++ 2 files changed, 1487 insertions(+) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 28a53cf66..4c3d31fac 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -61,6 +61,1293 @@ const struct rte_flow_ops ice_flow_ops = { }; +/* empty */ +enum rte_flow_item_type pattern_empty[] = { + RTE_FLOW_ITEM_TYPE_END, +}; + +/* L2 */ +enum rte_flow_item_type pattern_ethertype[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_ethertype_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_ethertype_qinq[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* ARP */ +enum rte_flow_item_type pattern_eth_arp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* GTPU */ +enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, + +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* PPPoE */ +enum rte_flow_item_type pattern_eth_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + void ice_register_flow_engine(struct ice_flow_engine *engine) { diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index 9f6d8f2b7..d8da0cea4 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -77,6 +77,206 @@ /* bit 60 ~ bit 63 */ #define ICE_INSET_LAST_ETHER_TYPE (1ULL << 60) +/* empty pattern */ +extern enum rte_flow_item_type pattern_empty[]; + +/* L2 */ +extern enum rte_flow_item_type pattern_ethertype[]; +extern enum rte_flow_item_type pattern_ethertype_vlan[]; +extern enum rte_flow_item_type pattern_ethertype_qinq[]; + +/* ARP */ +extern enum rte_flow_item_type pattern_eth_arp[]; + +/* non-tunnel IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_icmp[]; + +/* non-tunnel IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_icmp6[]; + +/* IPv4 VXLAN IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_icmp[]; + +/* IPv4 VXLAN MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_icmp[]; + +/* IPv6 VXLAN IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_icmp[]; + +/* IPv6 VXLAN MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_icmp[]; + +/* IPv4 VXLAN IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_icmp6[]; + +/* IPv4 VXLAN MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_icmp6[]; + +/* IPv6 VXLAN IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_icmp6[]; + +/* IPv6 VXLAN MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_icmp6[]; + +/* IPv4 NVGRE IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_icmp[]; + +/* IPv4 NVGRE MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_icmp[]; + +/* IPv6 NVGRE IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_icmp[]; + +/* IPv6 NVGRE MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_icmp[]; + +/* IPv4 NVGRE IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_icmp6[]; + +/* IPv4 NVGRE MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_icmp6[]; + +/* IPv6 NVGRE IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_icmp6[]; + +/* IPv6 NVGRE MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[]; + +/* GTPU */ +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_icmp[]; + +/* PPPoE */ +extern enum rte_flow_item_type pattern_eth_pppoed[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoed[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoed[]; +extern enum rte_flow_item_type pattern_eth_pppoes[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[]; struct ice_adapter; From patchwork Mon Sep 16 23:06:15 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ying Wang X-Patchwork-Id: 59304 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 35E391BFF4; Tue, 17 Sep 2019 09:29:08 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 50B801BFC8 for ; Tue, 17 Sep 2019 09:29:02 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Sep 2019 00:29:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,515,1559545200"; d="scan'208";a="187384666" Received: from unknown (HELO npg-dpdk-cvl-yingwang-117d84.sh.intel.com) ([10.67.117.84]) by fmsmga007.fm.intel.com with ESMTP; 17 Sep 2019 00:29:00 -0700 From: Ying Wang To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, qiming.yang@intel.com, wei.zhao1@intel.com, ying.a.wang@intel.com Date: Tue, 17 Sep 2019 07:06:15 +0800 Message-Id: <20190916230615.411726-6-ying.a.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20190916230615.411726-1-ying.a.wang@intel.com> References: <20190903221522.151382-2-ying.a.wang@intel.com> <20190916230615.411726-1-ying.a.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 5/5] net/ice: rework switch filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: wei zhao The patch reworks packet process engine's binary classifier (switch) for the new framework. It also adds support for new packet type like PPPoE for switch filter. Signed-off-by: Wei Zhao --- doc/guides/rel_notes/release_19_11.rst | 1 + drivers/net/ice/ice_switch_filter.c | 1146 ++++++++++++++++++++++++++++++++ drivers/net/ice/ice_switch_filter.h | 6 - 3 files changed, 1147 insertions(+), 6 deletions(-) delete mode 100644 drivers/net/ice/ice_switch_filter.h diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index 7dc4f004e..c81e984f2 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -63,6 +63,7 @@ New Features * Supported device-specific DDP package loading. * Generic filter enhancement - Supported pipeline mode. + - Supported new packet type like PPPoE for switch filter. Removed Items ------------- diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 6b72bf252..9a0b42d30 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -2,4 +2,1150 @@ * Copyright(c) 2019 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "base/ice_type.h" +#include "base/ice_switch.h" +#include "base/ice_type.h" +#include "ice_logs.h" +#include "ice_ethdev.h" +#include "ice_generic_flow.h" + +#define MAX_QGRP_NUM_TYPE 7 + +#define ICE_SW_INSET_ETHER ( \ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) +#define ICE_SW_INSET_MAC_IPV4 ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) +#define ICE_SW_INSET_MAC_IPV4_TCP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ + ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV4_UDP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ + ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV6 ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \ + ICE_INSET_IPV6_NEXT_HDR) +#define ICE_SW_INSET_MAC_IPV6_TCP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ + ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV6_UDP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ + ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) +#define ICE_SW_INSET_TUNNEL_IPV4_TYPE3 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_ID | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_TUNNEL_IPV4_TYPE4 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_ID | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_TUNNEL_IPV4_TYPE5 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS) +#define ICE_SW_INSET_TUNNEL_IPV4_TYPE6 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \ + ICE_INSET_TUN_IPV4_TOS) +#define ICE_SW_INSET_MAC_PPPOE ( \ + ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ + ICE_INSET_DMAC) + +struct sw_meta { + struct ice_adv_lkup_elem *list; + uint16_t lkups_num; + struct ice_adv_rule_info rule_info; +}; + +static struct ice_flow_parser ice_switch_dist_parser_os; +static struct ice_flow_parser ice_switch_dist_parser_comms; +static struct ice_flow_parser ice_switch_perm_parser; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_comms[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE3, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE3, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, +}; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_os[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE3, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE3, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE4, ICE_INSET_NONE}, +}; + +static struct +ice_pattern_match_item ice_switch_pattern_perm[] = { + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE5, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE6, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE6, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_TUNNEL_IPV4_TYPE5, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE6, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_TUNNEL_IPV4_TYPE6, ICE_INSET_NONE}, +}; + +static int +ice_switch_create(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + int ret = 0; + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_rule_query_data rule_added = {0}; + struct ice_rule_query_data *filter_ptr; + struct ice_adv_lkup_elem *list = + ((struct sw_meta *)meta)->list; + uint16_t lkups_cnt = + ((struct sw_meta *)meta)->lkups_num; + struct ice_adv_rule_info *rule_info = + &((struct sw_meta *)meta)->rule_info; + + if (lkups_cnt > ICE_MAX_CHAIN_WORDS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "item number too large for rule"); + goto error; + } + if (!list) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "lookup list should not be NULL"); + goto error; + } + ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added); + if (!ret) { + filter_ptr = rte_zmalloc("ice_switch_filter", + sizeof(struct ice_rule_query_data), 0); + if (!filter_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for ice_switch_filter"); + goto error; + } + flow->rule = filter_ptr; + rte_memcpy(filter_ptr, + &rule_added, + sizeof(struct ice_rule_query_data)); + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "switch filter create flow fail"); + goto error; + } + + rte_free(list); + rte_free(meta); + return 0; + +error: + rte_free(list); + rte_free(meta); + + return -rte_errno; +} + +static int +ice_switch_destroy(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_hw *hw = &ad->hw; + int ret; + struct ice_rule_query_data *filter_ptr; + + filter_ptr = (struct ice_rule_query_data *) + flow->rule; + + if (!filter_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "no such flow" + " create by switch filter"); + return -rte_errno; + } + + ret = ice_rem_adv_rule_by_id(hw, filter_ptr); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to destroy switch filter rule"); + return -rte_errno; + } + + rte_free(filter_ptr); + return ret; +} + +static void +ice_switch_filter_rule_free(struct rte_flow *flow) +{ + rte_free(flow->rule); +} + +static uint64_t +ice_switch_inset_get(const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct ice_adv_lkup_elem *list, + uint16_t *lkups_num, + enum ice_sw_tunnel_type tun_type) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask; + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + uint64_t input_set = ICE_INSET_NONE; + uint16_t j, t = 0; + uint16_t tunnel_valid = 0; + + + for (item = pattern; item->type != + RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return 0; + } + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + if (eth_spec && eth_mask) { + if (rte_is_broadcast_ether_addr(ð_mask->src)) + input_set |= ICE_INSET_SMAC; + if (rte_is_broadcast_ether_addr(ð_mask->dst)) + input_set |= ICE_INSET_DMAC; + if (eth_mask->type == RTE_BE16(0xffff)) + input_set |= ICE_INSET_ETHERTYPE; + list[t].type = (tunnel_valid == 0) ? + ICE_MAC_OFOS : ICE_MAC_IL; + struct ice_ether_hdr *h; + struct ice_ether_hdr *m; + uint16_t i = 0; + h = &list[t].h_u.eth_hdr; + m = &list[t].m_u.eth_hdr; + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j] == + UINT8_MAX) { + h->src_addr[j] = + eth_spec->src.addr_bytes[j]; + m->src_addr[j] = + eth_mask->src.addr_bytes[j]; + i = 1; + } + if (eth_mask->dst.addr_bytes[j] == + UINT8_MAX) { + h->dst_addr[j] = + eth_spec->dst.addr_bytes[j]; + m->dst_addr[j] = + eth_mask->dst.addr_bytes[j]; + i = 1; + } + } + if (i) + t++; + if (eth_mask->type == UINT16_MAX) { + list[t].type = ICE_ETYPE_OL; + list[t].h_u.ethertype.ethtype_id = + eth_spec->type; + list[t].m_u.ethertype.ethtype_id = + UINT16_MAX; + t++; + } + } else if (!eth_spec && !eth_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_MAC_OFOS : ICE_MAC_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4_spec = item->spec; + ipv4_mask = item->mask; + if (ipv4_spec && ipv4_mask) { + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return 0; + } + + if (tunnel_valid) { + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) + input_set |= ICE_INSET_IPV4_TOS; + if (ipv4_mask->hdr.src_addr == + UINT32_MAX) + input_set |= + ICE_INSET_TUN_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr == + UINT32_MAX) + input_set |= + ICE_INSET_TUN_IPV4_DST; + if (ipv4_mask->hdr.time_to_live == + UINT8_MAX) + input_set |= + ICE_INSET_TUN_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id == + UINT8_MAX) + input_set |= + ICE_INSET_TUN_IPV4_PROTO; + } else { + if (ipv4_mask->hdr.src_addr == + UINT32_MAX) + input_set |= ICE_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr == + UINT32_MAX) + input_set |= ICE_INSET_IPV4_DST; + if (ipv4_mask->hdr.time_to_live == + UINT8_MAX) + input_set |= ICE_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id == + UINT8_MAX) + input_set |= + ICE_INSET_IPV4_PROTO; + } + list[t].type = (tunnel_valid == 0) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + list[t].h_u.ipv4_hdr.src_addr = + ipv4_spec->hdr.src_addr; + list[t].m_u.ipv4_hdr.src_addr = + UINT32_MAX; + } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + list[t].h_u.ipv4_hdr.dst_addr = + ipv4_spec->hdr.dst_addr; + list[t].m_u.ipv4_hdr.dst_addr = + UINT32_MAX; + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + list[t].h_u.ipv4_hdr.time_to_live = + ipv4_spec->hdr.time_to_live; + list[t].m_u.ipv4_hdr.time_to_live = + UINT8_MAX; + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + list[t].h_u.ipv4_hdr.protocol = + ipv4_spec->hdr.next_proto_id; + list[t].m_u.ipv4_hdr.protocol = + UINT8_MAX; + } + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + list[t].h_u.ipv4_hdr.tos = + ipv4_spec->hdr.type_of_service; + list[t].m_u.ipv4_hdr.tos = UINT8_MAX; + } + t++; + } else if (!ipv4_spec && !ipv4_mask) { + list[t].type = (tunnel_valid == 0) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + if (ipv6_spec && ipv6_mask) { + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask"); + return 0; + } + + if (tunnel_valid) { + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) + input_set |= + ICE_INSET_TUN_IPV6_SRC; + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, RTE_DIM + (ipv6_mask->hdr.dst_addr))) + input_set |= + ICE_INSET_TUN_IPV6_DST; + if (ipv6_mask->hdr.proto == UINT8_MAX) + input_set |= + ICE_INSET_TUN_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits == + UINT8_MAX) + input_set |= + ICE_INSET_TUN_IPV6_HOP_LIMIT; + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + == rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + input_set |= ICE_INSET_IPV6_TC; + } else { + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, RTE_DIM + (ipv6_mask->hdr.src_addr))) + input_set |= ICE_INSET_IPV6_SRC; + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) + input_set |= ICE_INSET_IPV6_DST; + if (ipv6_mask->hdr.proto == UINT8_MAX) + input_set |= + ICE_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits == + UINT8_MAX) + input_set |= + ICE_INSET_IPV6_HOP_LIMIT; + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + == rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + input_set |= ICE_INSET_IPV6_TC; + } + list[t].type = (tunnel_valid == 0) ? + ICE_IPV6_OFOS : ICE_IPV6_IL; + struct ice_ipv6_hdr *f; + struct ice_ipv6_hdr *s; + f = &list[t].h_u.ipv6_hdr; + s = &list[t].m_u.ipv6_hdr; + for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j] == + UINT8_MAX) { + f->src_addr[j] = + ipv6_spec->hdr.src_addr[j]; + s->src_addr[j] = + ipv6_mask->hdr.src_addr[j]; + } + if (ipv6_mask->hdr.dst_addr[j] == + UINT8_MAX) { + f->dst_addr[j] = + ipv6_spec->hdr.dst_addr[j]; + s->dst_addr[j] = + ipv6_mask->hdr.dst_addr[j]; + } + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + f->next_hdr = + ipv6_spec->hdr.proto; + s->next_hdr = UINT8_MAX; + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + f->hop_limit = + ipv6_spec->hdr.hop_limits; + s->hop_limit = UINT8_MAX; + } + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + == rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) { + f->tc = (rte_be_to_cpu_32 + (ipv6_spec->hdr.vtc_flow) & + RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; + s->tc = UINT8_MAX; + } + t++; + } else if (!ipv6_spec && !ipv6_mask) { + list[t].type = (tun_type == ICE_NON_TUN) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + if (udp_spec && udp_mask) { + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return 0; + } + + if (tunnel_valid) { + if (udp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_SRC_PORT; + if (udp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_DST_PORT; + } else { + if (udp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_UDP_DST_PORT; + } + if (tun_type == ICE_SW_TUN_VXLAN && + tunnel_valid == 0) + list[t].type = ICE_UDP_OF; + else + list[t].type = ICE_UDP_ILOS; + if (udp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.l4_hdr.src_port = + udp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + udp_mask->hdr.src_port; + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + udp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + udp_mask->hdr.dst_port; + } + t++; + } else if (!udp_spec && !udp_mask) { + list[t].type = ICE_UDP_ILOS; + } + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + if (tcp_spec && tcp_mask) { + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return 0; + } + + if (tunnel_valid) { + if (tcp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_SRC_PORT; + if (tcp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_DST_PORT; + } else { + if (tcp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_TCP_DST_PORT; + } + list[t].type = ICE_TCP_IL; + if (tcp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.l4_hdr.src_port = + tcp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + tcp_mask->hdr.src_port; + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.l4_hdr.dst_port = + tcp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + tcp_mask->hdr.dst_port; + } + t++; + } else if (!tcp_spec && !tcp_mask) { + list[t].type = ICE_TCP_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + if (sctp_spec && sctp_mask) { + /* Check SCTP mask and update input set */ + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid SCTP mask"); + return 0; + } + + if (tunnel_valid) { + if (sctp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_SRC_PORT; + if (sctp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_TUN_DST_PORT; + } else { + if (sctp_mask->hdr.src_port == + UINT16_MAX) + input_set |= + ICE_INSET_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port == + UINT16_MAX) + input_set |= + ICE_INSET_SCTP_DST_PORT; + } + list[t].type = ICE_SCTP_IL; + if (sctp_mask->hdr.src_port == UINT16_MAX) { + list[t].h_u.sctp_hdr.src_port = + sctp_spec->hdr.src_port; + list[t].m_u.sctp_hdr.src_port = + sctp_mask->hdr.src_port; + } + if (sctp_mask->hdr.dst_port == UINT16_MAX) { + list[t].h_u.sctp_hdr.dst_port = + sctp_spec->hdr.dst_port; + list[t].m_u.sctp_hdr.dst_port = + sctp_mask->hdr.dst_port; + } + t++; + } else if (!sctp_spec && !sctp_mask) { + list[t].type = ICE_SCTP_IL; + } + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return 0; + } + + tunnel_valid = 1; + if (vxlan_spec && vxlan_mask) { + list[t].type = ICE_VXLAN; + if (vxlan_mask->vni[0] == UINT8_MAX && + vxlan_mask->vni[1] == UINT8_MAX && + vxlan_mask->vni[2] == UINT8_MAX) { + list[t].h_u.tnl_hdr.vni = + (vxlan_spec->vni[2] << 16) | + (vxlan_spec->vni[1] << 8) | + vxlan_spec->vni[0]; + list[t].m_u.tnl_hdr.vni = + UINT32_MAX; + input_set |= ICE_INSET_TUN_ID; + } + t++; + } else if (!vxlan_spec && !vxlan_mask) { + list[t].type = ICE_VXLAN; + } + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return 0; + } + tunnel_valid = 1; + if (nvgre_spec && nvgre_mask) { + list[t].type = ICE_NVGRE; + if (nvgre_mask->tni[0] == UINT8_MAX && + nvgre_mask->tni[1] == UINT8_MAX && + nvgre_mask->tni[2] == UINT8_MAX) { + list[t].h_u.nvgre_hdr.tni_flow = + (nvgre_spec->tni[2] << 16) | + (nvgre_spec->tni[1] << 8) | + nvgre_spec->tni[0]; + list[t].m_u.nvgre_hdr.tni_flow = + UINT32_MAX; + input_set |= ICE_INSET_TUN_ID; + } + t++; + } else if (!nvgre_spec && !nvgre_mask) { + list[t].type = ICE_NVGRE; + } + break; + + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + /* Check if VLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vlan_spec && vlan_mask) || + (vlan_spec && !vlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VLAN item"); + return 0; + } + if (vlan_spec && vlan_mask) { + list[t].type = ICE_VLAN_OFOS; + if (vlan_mask->tci == UINT16_MAX) { + list[t].h_u.vlan_hdr.vlan = + vlan_spec->tci; + list[t].m_u.vlan_hdr.vlan = + UINT16_MAX; + input_set |= ICE_INSET_VLAN_OUTER; + } + if (vlan_mask->inner_type == UINT16_MAX) { + list[t].h_u.vlan_hdr.type = + vlan_spec->inner_type; + list[t].m_u.vlan_hdr.type = + UINT16_MAX; + input_set |= ICE_INSET_VLAN_OUTER; + } + t++; + } else if (!vlan_spec && !vlan_mask) { + list[t].type = ICE_VLAN_OFOS; + } + break; + + case RTE_FLOW_ITEM_TYPE_PPPOED: + case RTE_FLOW_ITEM_TYPE_PPPOES: + pppoe_spec = item->spec; + pppoe_mask = item->mask; + /* Check if PPPoE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + */ + if (pppoe_spec || pppoe_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pppoe item"); + return 0; + } + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Invalid pattern item."); + goto out; + } + } + + *lkups_num = t; + + return input_set; +out: + return 0; +} + + +static int +ice_switch_parse_action(struct ice_pf *pf, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct ice_adv_rule_info *rule_info) +{ + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_rss *act_qgrop; + uint16_t base_queue, i; + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = { + 2, 4, 8, 16, 32, 64, 128}; + + base_queue = pf->base_queue; + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_RSS: + act_qgrop = action->conf; + rule_info->sw_act.fltr_act = + ICE_FWD_TO_QGRP; + rule_info->sw_act.fwd_id.q_id = + base_queue + act_qgrop->queue[0]; + for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) { + if (act_qgrop->queue_num == + valid_qgrop_number[i]) + break; + } + if (i == MAX_QGRP_NUM_TYPE) + goto error; + if ((act_qgrop->queue[0] + + act_qgrop->queue_num) > + dev->data->nb_rx_queues) + goto error; + for (i = 0; i < act_qgrop->queue_num - 1; i++) + if (act_qgrop->queue[i + 1] != + act_qgrop->queue[i] + 1) + goto error; + rule_info->sw_act.qgrp_size = + act_qgrop->queue_num; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + act_q = action->conf; + if (act_q->index >= dev->data->nb_rx_queues) + goto error; + rule_info->sw_act.fltr_act = + ICE_FWD_TO_Q; + rule_info->sw_act.fwd_id.q_id = + base_queue + act_q->index; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + rule_info->sw_act.fltr_act = + ICE_DROP_PACKET; + break; + + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + default: + goto error; + } + } + + rule_info->sw_act.vsi_handle = vsi->idx; + rule_info->rx = 1; + rule_info->sw_act.src = vsi->idx; + rule_info->priority = 5; + + return 0; + +error: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action type or queue number"); + return -rte_errno; +} + +static int +ice_switch_parse_pattern_action(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + uint64_t inputset = 0; + int ret = 0; + struct sw_meta *sw_meta_ptr = NULL; + struct ice_adv_rule_info rule_info; + struct ice_adv_lkup_elem *list = NULL; + uint16_t lkups_num = 0; + const struct rte_flow_item *item = pattern; + uint16_t item_num = 0; + enum ice_sw_tunnel_type tun_type = ICE_NON_TUN; + struct ice_pattern_match_item *pattern_match_item = NULL; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + item_num++; + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) + tun_type = ICE_SW_TUN_VXLAN; + if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) + tun_type = ICE_SW_TUN_NVGRE; + if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED || + item->type == RTE_FLOW_ITEM_TYPE_PPPOES) + tun_type = ICE_SW_TUN_PPPOE; + /* reserve one more memory slot for ETH which may + * consume 2 lookup items. + */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) + item_num++; + } + + list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); + if (!list) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for PMD internal items"); + return -rte_errno; + } + + rule_info.tun_type = tun_type; + + sw_meta_ptr = + rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0); + if (!sw_meta_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for sw_pattern_meta_ptr"); + goto error; + } + + pattern_match_item = + ice_search_pattern_match_item(pattern, array, array_len, error); + if (!pattern_match_item) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid input pattern"); + goto error; + } + + inputset = ice_switch_inset_get + (pattern, error, list, &lkups_num, tun_type); + if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + pattern, + "Invalid input set"); + goto error; + } + + ret = ice_switch_parse_action(pf, actions, error, &rule_info); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid input action"); + goto error; + } + *meta = sw_meta_ptr; + ((struct sw_meta *)*meta)->list = list; + ((struct sw_meta *)*meta)->lkups_num = lkups_num; + ((struct sw_meta *)*meta)->rule_info = rule_info; + rte_free(pattern_match_item); + + return 0; + +error: + rte_free(list); + rte_free(sw_meta_ptr); + rte_free(pattern_match_item); + + return -rte_errno; +} + +static int +ice_switch_query(struct ice_adapter *ad __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_query_count *count __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "count action not supported by switch filter"); + + return -rte_errno; +} + +static int +ice_switch_init(struct ice_adapter *ad) +{ + int ret = 0; + struct ice_flow_parser *dist_parser; + struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; + else + dist_parser = &ice_switch_dist_parser_os; + + if (ad->devargs.pipeline_mode_support) + ret = ice_register_parser(perm_parser, ad); + else + ret = ice_register_parser(dist_parser, ad); + return ret; +} + +static void +ice_switch_uninit(struct ice_adapter *ad) +{ + struct ice_flow_parser *dist_parser; + struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; + else + dist_parser = &ice_switch_dist_parser_os; + + if (ad->devargs.pipeline_mode_support) + ice_unregister_parser(perm_parser, ad); + else + ice_unregister_parser(dist_parser, ad); +} + +static struct +ice_flow_engine ice_switch_engine = { + .init = ice_switch_init, + .uninit = ice_switch_uninit, + .create = ice_switch_create, + .destroy = ice_switch_destroy, + .query_count = ice_switch_query, + .free = ice_switch_filter_rule_free, + .type = ICE_FLOW_ENGINE_SWITCH, +}; + +static struct +ice_flow_parser ice_switch_dist_parser_os = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_dist_os, + .array_len = RTE_DIM(ice_switch_pattern_dist_os), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +static struct +ice_flow_parser ice_switch_dist_parser_comms = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_dist_comms, + .array_len = RTE_DIM(ice_switch_pattern_dist_comms), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +static struct +ice_flow_parser ice_switch_perm_parser = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_perm, + .array_len = RTE_DIM(ice_switch_pattern_perm), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_PERMISSION, +}; + +RTE_INIT(ice_sw_engine_init) +{ + struct ice_flow_engine *engine = &ice_switch_engine; + ice_register_flow_engine(engine); +} diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h deleted file mode 100644 index 5afcddeaf..000000000 --- a/drivers/net/ice/ice_switch_filter.h +++ /dev/null @@ -1,6 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - - -