[2/4] net/ice: rework for generic flow enabling
Checks
Commit Message
The patch reworks the generic flow API (rte_flow) implementation.
It introduces an abstract layer which provides a unified interface
for low-level filter engine (switch, fdir, hash) to register supported
patterns and actions and implement flow validate/create/destroy/flush/
query activities.
The patch also removes the existing switch filter implementation to
avoid compile error. Switch filter implementation for the new framework
will be added in the following patch.
Signed-off-by: Ying Wang <ying.a.wang@intel.com>
---
drivers/net/ice/ice_ethdev.c | 22 +-
drivers/net/ice/ice_ethdev.h | 15 +-
drivers/net/ice/ice_generic_flow.c | 768 +++++++++++++++--------------------
drivers/net/ice/ice_generic_flow.h | 782 ++++++++----------------------------
drivers/net/ice/ice_switch_filter.c | 511 -----------------------
drivers/net/ice/ice_switch_filter.h | 18 -
6 files changed, 525 insertions(+), 1591 deletions(-)
Comments
On 09/04, Ying Wang wrote:
>The patch reworks the generic flow API (rte_flow) implementation.
>It introduces an abstract layer which provides a unified interface
>for low-level filter engine (switch, fdir, hash) to register supported
>patterns and actions and implement flow validate/create/destroy/flush/
>query activities.
>
>The patch also removes the existing switch filter implementation to
>avoid compile error. Switch filter implementation for the new framework
>will be added in the following patch.
>
>Signed-off-by: Ying Wang <ying.a.wang@intel.com>
>---
> drivers/net/ice/ice_ethdev.c | 22 +-
> drivers/net/ice/ice_ethdev.h | 15 +-
> drivers/net/ice/ice_generic_flow.c | 768 +++++++++++++++--------------------
> drivers/net/ice/ice_generic_flow.h | 782 ++++++++----------------------------
> drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> drivers/net/ice/ice_switch_filter.h | 18 -
> 6 files changed, 525 insertions(+), 1591 deletions(-)
Please add update to document and release notes.
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 4e0645db1..647aca3ed 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
[snip]
>+int
>+ice_flow_init(struct ice_adapter *ad)
>+{
>+ int ret = 0;
>+ struct ice_pf *pf = &ad->pf;
>+ void *temp;
>+ struct ice_flow_engine *engine = NULL;
>+
>+ TAILQ_INIT(&pf->flow_list);
>+ TAILQ_INIT(&pf->rss_parser_list);
>+ TAILQ_INIT(&pf->perm_parser_list);
>+ TAILQ_INIT(&pf->dist_parser_list);
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->init == NULL)
>+ return -EINVAL;
I think ENOTSUP is more preferred here.
>+
>+ ret = engine->init(ad);
>+ if (ret)
>+ return ret;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_flow_uninit(struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_flow_engine *engine;
>+ struct rte_flow *p_flow;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->uninit)
>+ engine->uninit(ad);
>+ }
>+
>+ /* Remove all flows */
>+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>+ if (p_flow->engine->free)
>+ p_flow->engine->free(p_flow);
>+ rte_free(p_flow);
>+ }
>+
>+ /* Cleanup parser list */
>+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
>+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
>+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
>+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
>+}
>+
>+int
>+ice_register_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_parser_list *list = NULL;
>+ struct ice_pf *pf = &ad->pf;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return -EINVAL;
>+ }
>+
>+ if (ad->devargs.pipeline_mode_support)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else {
>+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
>+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
>+ TAILQ_INSERT_HEAD(list, parser, node);
>+ else
>+ return -EINVAL;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_unregister_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_parser_list *list;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return;
>+ }
>+
>+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
>+ if (p_parser->engine->type == parser->engine->type)
>+ TAILQ_REMOVE(list, p_parser, node);
>+ }
>+
>+}
>+
> static int
>-ice_flow_valid_attr(const struct rte_flow_attr *attr,
>- struct rte_flow_error *error)
>+ice_flow_valid_attr(struct ice_adapter *ad,
>+ const struct rte_flow_attr *attr,
>+ struct rte_flow_error *error)
> {
> /* Must be input direction */
> if (!attr->ingress) {
>@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
> attr, "Not support egress.");
> return -rte_errno;
> }
>-
>- /* Not supported */
>- if (attr->priority) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>- attr, "Not support priority.");
>- return -rte_errno;
>+ /* Check pipeline mode support to set classification stage */
>+ if (ad->devargs.pipeline_mode_support) {
>+ if (0 == attr->priority)
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
>+ else
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
>+ } else {
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>+ /* Not supported */
>+ if (attr->priority) {
>+ rte_flow_error_set(error, EINVAL,
>+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>+ attr, "Not support priority.");
>+ return -rte_errno;
>+ }
> }
>-
> /* Not supported */
> if (attr->group) {
> rte_flow_error_set(error, EINVAL,
>@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item *item, bool is_void)
> /* Skip all VOID items of the pattern */
> static void
> ice_pattern_skip_void_item(struct rte_flow_item *items,
>- const struct rte_flow_item *pattern)
>+ const struct rte_flow_item *pattern)
Unnecessary change here, only indentation changes.
> {
> uint32_t cpy_count = 0;
> const struct rte_flow_item *pb = pattern, *pe = pattern;
>@@ -124,7 +285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
> items += cpy_count;
>
> if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
>- pb = pe;
seems this is some code cleanup, prefer a separate patch, not a strong opinion though.
> break;
> }
>
>@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array,
> item->type == RTE_FLOW_ITEM_TYPE_END);
> }
>
>-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
>+struct ice_pattern_match_item *
>+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
>+ struct ice_pattern_match_item *array,
>+ uint32_t array_len,
> struct rte_flow_error *error)
> {
> uint16_t i = 0;
>- uint64_t inset;
>+ struct ice_pattern_match_item *pattern_match_item;
>+ /* need free by each filter */
> struct rte_flow_item *items; /* used for pattern without VOID items */
> uint32_t item_num = 0; /* non-void item number */
>
>@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
> if (!items) {
> rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> NULL, "No memory for PMD internal items.");
>- return -ENOMEM;
>+ return NULL;
>+ }
>+ pattern_match_item = rte_zmalloc("ice_pattern_match_item",
>+ sizeof(struct ice_pattern_match_item), 0);
>+ if (!pattern_match_item) {
>+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
>+ return NULL;
> }
>-
> ice_pattern_skip_void_item(items, pattern);
>
>- for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
[snip]
>
>+static int
>+ice_flow_validate(struct rte_eth_dev *dev,
>+ const struct rte_flow_attr *attr,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ struct rte_flow_error *error)
>+{
>+ int ret = ICE_ERR_NOT_SUPPORTED;
Unnecessary initialization.
>+ void *meta = NULL;
>+ struct ice_flow_engine *engine = NULL;
>+
>+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+ &engine, &meta, error);
>+ return ret;
>+}
>+
> static struct rte_flow *
> ice_flow_create(struct rte_eth_dev *dev,
>- const struct rte_flow_attr *attr,
>- const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow_error *error)
>+ const struct rte_flow_attr *attr,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ struct rte_flow_error *error)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct rte_flow *flow = NULL;
>- int ret;
>+ int ret = 0;
Unnecessary initialization.
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+ struct ice_flow_engine *engine = NULL;
>+ void *meta = NULL;
>
> flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> if (!flow) {
>@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
> return flow;
> }
>
>- ret = ice_flow_validate(dev, attr, pattern, actions, error);
>+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+ &engine, &meta, error);
> if (ret < 0)
> goto free_flow;
>
>- ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
>+ if (engine->create == NULL)
>+ goto free_flow;
>+
>+ ret = engine->create(ad, flow, meta, error);
> if (ret)
> goto free_flow;
>
>+ flow->engine = engine;
> TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> return flow;
>
> free_flow:
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to create flow.");
>+ PMD_DRV_LOG(ERR, "Failed to create flow");
> rte_free(flow);
> return NULL;
> }
>
> static int
> ice_flow_destroy(struct rte_eth_dev *dev,
>- struct rte_flow *flow,
>- struct rte_flow_error *error)
>+ struct rte_flow *flow,
>+ struct rte_flow_error *error)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> int ret = 0;
>
>- ret = ice_destroy_switch_filter(pf, flow, error);
>-
>+ if (!flow || !flow->engine->destroy) {
>+ rte_flow_error_set(error, EINVAL,
>+ RTE_FLOW_ERROR_TYPE_HANDLE,
>+ NULL, "NULL flow or NULL destroy");
>+ return -rte_errno;
>+ }
>+ ret = flow->engine->destroy(ad, flow, error);
> if (!ret) {
> TAILQ_REMOVE(&pf->flow_list, flow, node);
> rte_free(flow);
>- } else {
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to destroy flow.");
>- }
>+ } else
>+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
>
> return ret;
> }
>
> static int
> ice_flow_flush(struct rte_eth_dev *dev,
>- struct rte_flow_error *error)
>+ struct rte_flow_error *error)
Unnecessary change.
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>- struct rte_flow *p_flow;
>+ struct rte_flow *p_flow = NULL;
Unnecessary initialization.
> void *temp;
> int ret = 0;
>
> TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
> ret = ice_flow_destroy(dev, p_flow, error);
> if (ret) {
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to flush SW flows.");
>- return -rte_errno;
>+ PMD_DRV_LOG(ERR, "Failed to flush flows");
>+ return -EINVAL;
> }
> }
>
> return ret;
> }
>+
>+static int
>+ice_flow_query_count(struct rte_eth_dev *dev,
>+ struct rte_flow *flow,
>+ const struct rte_flow_action *actions,
>+ void *data,
>+ struct rte_flow_error *error)
>+{
>+ int ret = -EINVAL;
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+
>+ if (!flow || !flow->engine->query) {
>+ rte_flow_error_set(error, EINVAL,
>+ RTE_FLOW_ERROR_TYPE_HANDLE,
>+ NULL, "NULL flow or NULL query");
>+ return -rte_errno;
>+ }
>+
>+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
>+ switch (actions->type) {
>+ case RTE_FLOW_ACTION_TYPE_VOID:
>+ break;
>+ case RTE_FLOW_ACTION_TYPE_COUNT:
>+ ret = flow->engine->query(ad, flow, data, error);
>+ break;
>+ default:
>+ return rte_flow_error_set(error, ENOTSUP,
>+ RTE_FLOW_ERROR_TYPE_ACTION,
>+ actions,
>+ "action not supported");
>+ }
>+ }
>+ return ret;
>+}
[snip]
>+TAILQ_HEAD(ice_engine_list, ice_flow_engine);
>+
>+/* Struct to store flow created. */
>+struct rte_flow {
>+TAILQ_ENTRY(rte_flow) node;
Indentation is needed here.
>+ struct ice_flow_engine *engine;
>+ void *rule;
>+};
>+
>+/* Struct to store parser created. */
>+struct ice_flow_parser {
>+ TAILQ_ENTRY(ice_flow_parser) node;
>+ struct ice_flow_engine *engine;
>+ struct ice_pattern_match_item *array;
>+ uint32_t array_len;
>+ parse_pattern_action_t parse_pattern_action;
>+ enum ice_flow_classification_stage stage;
>+};
>+
>+void ice_register_flow_engine(struct ice_flow_engine *engine);
>+int ice_flow_init(struct ice_adapter *ad);
>+void ice_flow_uninit(struct ice_adapter *ad);
>+int ice_register_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad);
>+void ice_unregister_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad);
>+struct ice_pattern_match_item *
>+ice_search_pattern_match_item(
>+ const struct rte_flow_item pattern[],
>+ struct ice_pattern_match_item *array,
>+ uint32_t array_len,
>+ struct rte_flow_error *error);
>
> #endif
>diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
>index b88b4f59a..6b72bf252 100644
>--- a/drivers/net/ice/ice_switch_filter.c
>+++ b/drivers/net/ice/ice_switch_filter.c
>@@ -2,515 +2,4 @@
> * Copyright(c) 2019 Intel Corporation
> */
>
>-#include <sys/queue.h>
>-#include <stdio.h>
>-#include <errno.h>
>-#include <stdint.h>
>-#include <string.h>
>-#include <unistd.h>
>-#include <stdarg.h>
>
>-#include <rte_debug.h>
>-#include <rte_ether.h>
>-#include <rte_ethdev_driver.h>
>-#include <rte_log.h>
>-#include <rte_malloc.h>
>-#include <rte_eth_ctrl.h>
>-#include <rte_tailq.h>
>-#include <rte_flow_driver.h>
>-
>-#include "ice_logs.h"
>-#include "base/ice_type.h"
>-#include "ice_switch_filter.h"
>-
>-static int
>-ice_parse_switch_filter(const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow_error *error,
>- struct ice_adv_lkup_elem *list,
>- uint16_t *lkups_num,
>- enum ice_sw_tunnel_type tun_type)
>-{
>- const struct rte_flow_item *item = pattern;
>- enum rte_flow_item_type item_type;
>- const struct rte_flow_item_eth *eth_spec, *eth_mask;
>- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
>- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
>- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>- const struct rte_flow_item_udp *udp_spec, *udp_mask;
>- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
>- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
>- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
>- uint16_t j, t = 0;
>- uint16_t tunnel_valid = 0;
>-
>- for (item = pattern; item->type !=
>- RTE_FLOW_ITEM_TYPE_END; item++) {
>- item_type = item->type;
>-
>- switch (item_type) {
>- case RTE_FLOW_ITEM_TYPE_ETH:
>- eth_spec = item->spec;
>- eth_mask = item->mask;
>- if (eth_spec && eth_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_MAC_OFOS : ICE_MAC_IL;
>- struct ice_ether_hdr *h;
>- struct ice_ether_hdr *m;
>- uint16_t i = 0;
>- h = &list[t].h_u.eth_hdr;
>- m = &list[t].m_u.eth_hdr;
>- for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
>- if (eth_mask->src.addr_bytes[j] ==
>- UINT8_MAX) {
>- h->src_addr[j] =
>- eth_spec->src.addr_bytes[j];
>- m->src_addr[j] =
>- eth_mask->src.addr_bytes[j];
>- i = 1;
>- }
>- if (eth_mask->dst.addr_bytes[j] ==
>- UINT8_MAX) {
>- h->dst_addr[j] =
>- eth_spec->dst.addr_bytes[j];
>- m->dst_addr[j] =
>- eth_mask->dst.addr_bytes[j];
>- i = 1;
>- }
>- }
>- if (i)
>- t++;
>- if (eth_mask->type == UINT16_MAX) {
>- list[t].type = ICE_ETYPE_OL;
>- list[t].h_u.ethertype.ethtype_id =
>- eth_spec->type;
>- list[t].m_u.ethertype.ethtype_id =
>- UINT16_MAX;
>- t++;
>- }
>- } else if (!eth_spec && !eth_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_MAC_OFOS : ICE_MAC_IL;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_IPV4:
>- ipv4_spec = item->spec;
>- ipv4_mask = item->mask;
>- if (ipv4_spec && ipv4_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_IPV4_OFOS : ICE_IPV4_IL;
>- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
>- list[t].h_u.ipv4_hdr.src_addr =
>- ipv4_spec->hdr.src_addr;
>- list[t].m_u.ipv4_hdr.src_addr =
>- UINT32_MAX;
>- }
>- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
>- list[t].h_u.ipv4_hdr.dst_addr =
>- ipv4_spec->hdr.dst_addr;
>- list[t].m_u.ipv4_hdr.dst_addr =
>- UINT32_MAX;
>- }
>- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
>- list[t].h_u.ipv4_hdr.time_to_live =
>- ipv4_spec->hdr.time_to_live;
>- list[t].m_u.ipv4_hdr.time_to_live =
>- UINT8_MAX;
>- }
>- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
>- list[t].h_u.ipv4_hdr.protocol =
>- ipv4_spec->hdr.next_proto_id;
>- list[t].m_u.ipv4_hdr.protocol =
>- UINT8_MAX;
>- }
>- if (ipv4_mask->hdr.type_of_service ==
>- UINT8_MAX) {
>- list[t].h_u.ipv4_hdr.tos =
>- ipv4_spec->hdr.type_of_service;
>- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
>- }
>- t++;
>- } else if (!ipv4_spec && !ipv4_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_IPV4_OFOS : ICE_IPV4_IL;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_IPV6:
>- ipv6_spec = item->spec;
>- ipv6_mask = item->mask;
>- if (ipv6_spec && ipv6_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_IPV6_OFOS : ICE_IPV6_IL;
>- struct ice_ipv6_hdr *f;
>- struct ice_ipv6_hdr *s;
>- f = &list[t].h_u.ipv6_hdr;
>- s = &list[t].m_u.ipv6_hdr;
>- for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
>- if (ipv6_mask->hdr.src_addr[j] ==
>- UINT8_MAX) {
>- f->src_addr[j] =
>- ipv6_spec->hdr.src_addr[j];
>- s->src_addr[j] =
>- ipv6_mask->hdr.src_addr[j];
>- }
>- if (ipv6_mask->hdr.dst_addr[j] ==
>- UINT8_MAX) {
>- f->dst_addr[j] =
>- ipv6_spec->hdr.dst_addr[j];
>- s->dst_addr[j] =
>- ipv6_mask->hdr.dst_addr[j];
>- }
>- }
>- if (ipv6_mask->hdr.proto == UINT8_MAX) {
>- f->next_hdr =
>- ipv6_spec->hdr.proto;
>- s->next_hdr = UINT8_MAX;
>- }
>- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
>- f->hop_limit =
>- ipv6_spec->hdr.hop_limits;
>- s->hop_limit = UINT8_MAX;
>- }
>- t++;
>- } else if (!ipv6_spec && !ipv6_mask) {
>- list[t].type = (tun_type == ICE_NON_TUN) ?
>- ICE_IPV4_OFOS : ICE_IPV4_IL;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_UDP:
>- udp_spec = item->spec;
>- udp_mask = item->mask;
>- if (udp_spec && udp_mask) {
>- if (tun_type == ICE_SW_TUN_VXLAN &&
>- tunnel_valid == 0)
>- list[t].type = ICE_UDP_OF;
>- else
>- list[t].type = ICE_UDP_ILOS;
>- if (udp_mask->hdr.src_port == UINT16_MAX) {
>- list[t].h_u.l4_hdr.src_port =
>- udp_spec->hdr.src_port;
>- list[t].m_u.l4_hdr.src_port =
>- udp_mask->hdr.src_port;
>- }
>- if (udp_mask->hdr.dst_port == UINT16_MAX) {
>- list[t].h_u.l4_hdr.dst_port =
>- udp_spec->hdr.dst_port;
>- list[t].m_u.l4_hdr.dst_port =
>- udp_mask->hdr.dst_port;
>- }
>- t++;
>- } else if (!udp_spec && !udp_mask) {
>- list[t].type = ICE_UDP_ILOS;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_TCP:
>- tcp_spec = item->spec;
>- tcp_mask = item->mask;
>- if (tcp_spec && tcp_mask) {
>- list[t].type = ICE_TCP_IL;
>- if (tcp_mask->hdr.src_port == UINT16_MAX) {
>- list[t].h_u.l4_hdr.src_port =
>- tcp_spec->hdr.src_port;
>- list[t].m_u.l4_hdr.src_port =
>- tcp_mask->hdr.src_port;
>- }
>- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
>- list[t].h_u.l4_hdr.dst_port =
>- tcp_spec->hdr.dst_port;
>- list[t].m_u.l4_hdr.dst_port =
>- tcp_mask->hdr.dst_port;
>- }
>- t++;
>- } else if (!tcp_spec && !tcp_mask) {
>- list[t].type = ICE_TCP_IL;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_SCTP:
>- sctp_spec = item->spec;
>- sctp_mask = item->mask;
>- if (sctp_spec && sctp_mask) {
>- list[t].type = ICE_SCTP_IL;
>- if (sctp_mask->hdr.src_port == UINT16_MAX) {
>- list[t].h_u.sctp_hdr.src_port =
>- sctp_spec->hdr.src_port;
>- list[t].m_u.sctp_hdr.src_port =
>- sctp_mask->hdr.src_port;
>- }
>- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
>- list[t].h_u.sctp_hdr.dst_port =
>- sctp_spec->hdr.dst_port;
>- list[t].m_u.sctp_hdr.dst_port =
>- sctp_mask->hdr.dst_port;
>- }
>- t++;
>- } else if (!sctp_spec && !sctp_mask) {
>- list[t].type = ICE_SCTP_IL;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_VXLAN:
>- vxlan_spec = item->spec;
>- vxlan_mask = item->mask;
>- tunnel_valid = 1;
>- if (vxlan_spec && vxlan_mask) {
>- list[t].type = ICE_VXLAN;
>- if (vxlan_mask->vni[0] == UINT8_MAX &&
>- vxlan_mask->vni[1] == UINT8_MAX &&
>- vxlan_mask->vni[2] == UINT8_MAX) {
>- list[t].h_u.tnl_hdr.vni =
>- (vxlan_spec->vni[2] << 16) |
>- (vxlan_spec->vni[1] << 8) |
>- vxlan_spec->vni[0];
>- list[t].m_u.tnl_hdr.vni =
>- UINT32_MAX;
>- }
>- t++;
>- } else if (!vxlan_spec && !vxlan_mask) {
>- list[t].type = ICE_VXLAN;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_NVGRE:
>- nvgre_spec = item->spec;
>- nvgre_mask = item->mask;
>- tunnel_valid = 1;
>- if (nvgre_spec && nvgre_mask) {
>- list[t].type = ICE_NVGRE;
>- if (nvgre_mask->tni[0] == UINT8_MAX &&
>- nvgre_mask->tni[1] == UINT8_MAX &&
>- nvgre_mask->tni[2] == UINT8_MAX) {
>- list[t].h_u.nvgre_hdr.tni_flow =
>- (nvgre_spec->tni[2] << 16) |
>- (nvgre_spec->tni[1] << 8) |
>- nvgre_spec->tni[0];
>- list[t].m_u.nvgre_hdr.tni_flow =
>- UINT32_MAX;
>- }
>- t++;
>- } else if (!nvgre_spec && !nvgre_mask) {
>- list[t].type = ICE_NVGRE;
>- }
>- break;
>-
>- case RTE_FLOW_ITEM_TYPE_VOID:
>- case RTE_FLOW_ITEM_TYPE_END:
>- break;
>-
>- default:
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM, actions,
>- "Invalid pattern item.");
>- goto out;
>- }
>- }
>-
>- *lkups_num = t;
>-
>- return 0;
>-out:
>- return -rte_errno;
>-}
>-
>-/* By now ice switch filter action code implement only
>- * supports QUEUE or DROP.
>- */
>-static int
>-ice_parse_switch_action(struct ice_pf *pf,
>- const struct rte_flow_action *actions,
>- struct rte_flow_error *error,
>- struct ice_adv_rule_info *rule_info)
>-{
>- struct ice_vsi *vsi = pf->main_vsi;
>- const struct rte_flow_action_queue *act_q;
>- uint16_t base_queue;
>- const struct rte_flow_action *action;
>- enum rte_flow_action_type action_type;
>-
>- base_queue = pf->base_queue;
>- for (action = actions; action->type !=
>- RTE_FLOW_ACTION_TYPE_END; action++) {
>- action_type = action->type;
>- switch (action_type) {
>- case RTE_FLOW_ACTION_TYPE_QUEUE:
>- act_q = action->conf;
>- rule_info->sw_act.fltr_act =
>- ICE_FWD_TO_Q;
>- rule_info->sw_act.fwd_id.q_id =
>- base_queue + act_q->index;
>- break;
>-
>- case RTE_FLOW_ACTION_TYPE_DROP:
>- rule_info->sw_act.fltr_act =
>- ICE_DROP_PACKET;
>- break;
>-
>- case RTE_FLOW_ACTION_TYPE_VOID:
>- break;
>-
>- default:
>- rte_flow_error_set(error,
>- EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- actions,
>- "Invalid action type");
>- return -rte_errno;
>- }
>- }
>-
>- rule_info->sw_act.vsi_handle = vsi->idx;
>- rule_info->rx = 1;
>- rule_info->sw_act.src = vsi->idx;
>- rule_info->priority = 5;
>-
>- return 0;
>-}
>-
>-static int
>-ice_switch_rule_set(struct ice_pf *pf,
>- struct ice_adv_lkup_elem *list,
>- uint16_t lkups_cnt,
>- struct ice_adv_rule_info *rule_info,
>- struct rte_flow *flow,
>- struct rte_flow_error *error)
>-{
>- struct ice_hw *hw = ICE_PF_TO_HW(pf);
>- int ret;
>- struct ice_rule_query_data rule_added = {0};
>- struct ice_rule_query_data *filter_ptr;
>-
>- if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
>- "item number too large for rule");
>- return -rte_errno;
>- }
>- if (!list) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
>- "lookup list should not be NULL");
>- return -rte_errno;
>- }
>-
>- ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
>-
>- if (!ret) {
>- filter_ptr = rte_zmalloc("ice_switch_filter",
>- sizeof(struct ice_rule_query_data), 0);
>- if (!filter_ptr) {
>- PMD_DRV_LOG(ERR, "failed to allocate memory");
>- return -EINVAL;
>- }
>- flow->rule = filter_ptr;
>- rte_memcpy(filter_ptr,
>- &rule_added,
>- sizeof(struct ice_rule_query_data));
>- }
>-
>- return ret;
>-}
>-
>-int
>-ice_create_switch_filter(struct ice_pf *pf,
>- const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow *flow,
>- struct rte_flow_error *error)
>-{
>- int ret = 0;
>- struct ice_adv_rule_info rule_info = {0};
>- struct ice_adv_lkup_elem *list = NULL;
>- uint16_t lkups_num = 0;
>- const struct rte_flow_item *item = pattern;
>- uint16_t item_num = 0;
>- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
>-
>- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
>- item_num++;
>- if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
>- tun_type = ICE_SW_TUN_VXLAN;
>- if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
>- tun_type = ICE_SW_TUN_NVGRE;
>- /* reserve one more memory slot for ETH which may
>- * consume 2 lookup items.
>- */
>- if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
>- item_num++;
>- }
>- rule_info.tun_type = tun_type;
>-
>- list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
>- if (!list) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "No memory for PMD internal items");
>- return -rte_errno;
>- }
>-
>- ret = ice_parse_switch_filter(pattern, actions, error,
>- list, &lkups_num, tun_type);
>- if (ret)
>- goto error;
>-
>- ret = ice_parse_switch_action(pf, actions, error, &rule_info);
>- if (ret)
>- goto error;
>-
>- ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
>- if (ret)
>- goto error;
>-
>- rte_free(list);
>- return 0;
>-
>-error:
>- rte_free(list);
>-
>- return -rte_errno;
>-}
>-
>-int
>-ice_destroy_switch_filter(struct ice_pf *pf,
>- struct rte_flow *flow,
>- struct rte_flow_error *error)
>-{
>- struct ice_hw *hw = ICE_PF_TO_HW(pf);
>- int ret;
>- struct ice_rule_query_data *filter_ptr;
>-
>- filter_ptr = (struct ice_rule_query_data *)
>- flow->rule;
>-
>- if (!filter_ptr) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "no such flow"
>- " create by switch filter");
>- return -rte_errno;
>- }
>-
>- ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
>- if (ret) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "fail to destroy switch filter rule");
>- return -rte_errno;
>- }
>-
>- rte_free(filter_ptr);
>- return ret;
>-}
>-
>-void
>-ice_free_switch_filter_rule(void *rule)
>-{
>- struct ice_rule_query_data *filter_ptr;
>-
>- filter_ptr = (struct ice_rule_query_data *)rule;
>-
>- rte_free(filter_ptr);
>-}
>diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
>index cea47990e..5afcddeaf 100644
>--- a/drivers/net/ice/ice_switch_filter.h
>+++ b/drivers/net/ice/ice_switch_filter.h
>@@ -2,23 +2,5 @@
> * Copyright(c) 2019 Intel Corporation
> */
>
>-#ifndef _ICE_SWITCH_FILTER_H_
>-#define _ICE_SWITCH_FILTER_H_
>
>-#include "base/ice_switch.h"
>-#include "base/ice_type.h"
>-#include "ice_ethdev.h"
>
>-int
>-ice_create_switch_filter(struct ice_pf *pf,
>- const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow *flow,
>- struct rte_flow_error *error);
>-int
>-ice_destroy_switch_filter(struct ice_pf *pf,
>- struct rte_flow *flow,
>- struct rte_flow_error *error);
>-void
>-ice_free_switch_filter_rule(void *rule);
>-#endif /* _ICE_SWITCH_FILTER_H_ */
>--
>2.15.1
>
Hi, Xiaolong
> -----Original Message-----
> From: Ye, Xiaolong
> Sent: Wednesday, September 4, 2019 10:45 PM
> To: Wang, Ying A <ying.a.wang@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [PATCH 2/4] net/ice: rework for generic flow enabling
>
> On 09/04, Ying Wang wrote:
> >The patch reworks the generic flow API (rte_flow) implementation.
> >It introduces an abstract layer which provides a unified interface for
> >low-level filter engine (switch, fdir, hash) to register supported
> >patterns and actions and implement flow validate/create/destroy/flush/
> >query activities.
> >
> >The patch also removes the existing switch filter implementation to
> >avoid compile error. Switch filter implementation for the new framework
> >will be added in the following patch.
> >
> >Signed-off-by: Ying Wang <ying.a.wang@intel.com>
> >---
> > drivers/net/ice/ice_ethdev.c | 22 +-
> > drivers/net/ice/ice_ethdev.h | 15 +-
> > drivers/net/ice/ice_generic_flow.c | 768
> >+++++++++++++++--------------------
> > drivers/net/ice/ice_generic_flow.h | 782
> >++++++++----------------------------
> > drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> >drivers/net/ice/ice_switch_filter.h | 18 -
> > 6 files changed, 525 insertions(+), 1591 deletions(-)
>
> Please add update to document and release notes.
OK, will add it in v2.
>
> >
> >diff --git a/drivers/net/ice/ice_ethdev.c
> >b/drivers/net/ice/ice_ethdev.c index 4e0645db1..647aca3ed 100644
> >--- a/drivers/net/ice/ice_ethdev.c
> >+++ b/drivers/net/ice/ice_ethdev.c
> [snip]
> >+int
> >+ice_flow_init(struct ice_adapter *ad)
> >+{
> >+ int ret = 0;
> >+ struct ice_pf *pf = &ad->pf;
> >+ void *temp;
> >+ struct ice_flow_engine *engine = NULL;
> >+
> >+ TAILQ_INIT(&pf->flow_list);
> >+ TAILQ_INIT(&pf->rss_parser_list);
> >+ TAILQ_INIT(&pf->perm_parser_list);
> >+ TAILQ_INIT(&pf->dist_parser_list);
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->init == NULL)
> >+ return -EINVAL;
>
> I think ENOTSUP is more preferred here.
OK, will fix it in v2.
>
> >+
> >+ ret = engine->init(ad);
> >+ if (ret)
> >+ return ret;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_flow_uninit(struct ice_adapter *ad) {
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_flow_engine *engine;
> >+ struct rte_flow *p_flow;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->uninit)
> >+ engine->uninit(ad);
> >+ }
> >+
> >+ /* Remove all flows */
> >+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> >+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> >+ if (p_flow->engine->free)
> >+ p_flow->engine->free(p_flow);
> >+ rte_free(p_flow);
> >+ }
> >+
> >+ /* Cleanup parser list */
> >+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
> >+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
> >+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
> >+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); }
> >+
> >+int
> >+ice_register_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_parser_list *list = NULL;
> >+ struct ice_pf *pf = &ad->pf;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return -EINVAL;
> >+ }
> >+
> >+ if (ad->devargs.pipeline_mode_support)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else {
> >+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
> >+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
> >+ TAILQ_INSERT_HEAD(list, parser, node);
> >+ else
> >+ return -EINVAL;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_unregister_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_parser_list *list;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return;
> >+ }
> >+
> >+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
> >+ if (p_parser->engine->type == parser->engine->type)
> >+ TAILQ_REMOVE(list, p_parser, node);
> >+ }
> >+
> >+}
> >+
> > static int
> >-ice_flow_valid_attr(const struct rte_flow_attr *attr,
> >- struct rte_flow_error *error)
> >+ice_flow_valid_attr(struct ice_adapter *ad,
> >+ const struct rte_flow_attr *attr,
> >+ struct rte_flow_error *error)
> > {
> > /* Must be input direction */
> > if (!attr->ingress) {
> >@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
> > attr, "Not support egress.");
> > return -rte_errno;
> > }
> >-
> >- /* Not supported */
> >- if (attr->priority) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> >- attr, "Not support priority.");
> >- return -rte_errno;
> >+ /* Check pipeline mode support to set classification stage */
> >+ if (ad->devargs.pipeline_mode_support) {
> >+ if (0 == attr->priority)
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
> >+ else
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
> >+ } else {
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
> >+ /* Not supported */
> >+ if (attr->priority) {
> >+ rte_flow_error_set(error, EINVAL,
> >+
> RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> >+ attr, "Not support priority.");
> >+ return -rte_errno;
> >+ }
> > }
> >-
> > /* Not supported */
> > if (attr->group) {
> > rte_flow_error_set(error, EINVAL,
> >@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item
> >*item, bool is_void)
> > /* Skip all VOID items of the pattern */ static void
> >ice_pattern_skip_void_item(struct rte_flow_item *items,
> >- const struct rte_flow_item *pattern)
> >+ const struct rte_flow_item *pattern)
>
> Unnecessary change here, only indentation changes.
For the previous indentation is not tab-aligned, I will add a separate code cleanup patch for these changes.
>
> > {
> > uint32_t cpy_count = 0;
> > const struct rte_flow_item *pb = pattern, *pe = pattern; @@ -124,7
> >+285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
> > items += cpy_count;
> >
> > if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
> >- pb = pe;
>
> seems this is some code cleanup, prefer a separate patch, not a strong opinion
> though.
OK, will add a separate patch for code cleanup.
>
> > break;
> > }
> >
> >@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type
> *item_array,
> > item->type == RTE_FLOW_ITEM_TYPE_END); }
> >
> >-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item
> >pattern[],
> >+struct ice_pattern_match_item *
> >+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
> >+ struct ice_pattern_match_item *array,
> >+ uint32_t array_len,
> > struct rte_flow_error *error)
> > {
> > uint16_t i = 0;
> >- uint64_t inset;
> >+ struct ice_pattern_match_item *pattern_match_item;
> >+ /* need free by each filter */
> > struct rte_flow_item *items; /* used for pattern without VOID items */
> > uint32_t item_num = 0; /* non-void item number */
> >
> >@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const
> struct rte_flow_item pattern[],
> > if (!items) {
> > rte_flow_error_set(error, ENOMEM,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > NULL, "No memory for PMD internal items.");
> >- return -ENOMEM;
> >+ return NULL;
> >+ }
> >+ pattern_match_item = rte_zmalloc("ice_pattern_match_item",
> >+ sizeof(struct ice_pattern_match_item), 0);
> >+ if (!pattern_match_item) {
> >+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
> >+ return NULL;
> > }
> >-
> > ice_pattern_skip_void_item(items, pattern);
> >
> >- for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
> [snip]
> >
> >+static int
> >+ice_flow_validate(struct rte_eth_dev *dev,
> >+ const struct rte_flow_attr *attr,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ struct rte_flow_error *error)
> >+{
> >+ int ret = ICE_ERR_NOT_SUPPORTED;
>
> Unnecessary initialization.
OK, will fix it in v2.
>
> >+ void *meta = NULL;
> >+ struct ice_flow_engine *engine = NULL;
> >+
> >+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
> >+ &engine, &meta, error);
> >+ return ret;
> >+}
> >+
> > static struct rte_flow *
> > ice_flow_create(struct rte_eth_dev *dev,
> >- const struct rte_flow_attr *attr,
> >- const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow_error *error)
> >+ const struct rte_flow_attr *attr,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ struct rte_flow_error *error)
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> > struct rte_flow *flow = NULL;
> >- int ret;
> >+ int ret = 0;
>
> Unnecessary initialization.
OK, will fix it in v2.
>
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >+ struct ice_flow_engine *engine = NULL;
> >+ void *meta = NULL;
> >
> > flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> > if (!flow) {
> >@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
> > return flow;
> > }
> >
> >- ret = ice_flow_validate(dev, attr, pattern, actions, error);
> >+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
> >+ &engine, &meta, error);
> > if (ret < 0)
> > goto free_flow;
> >
> >- ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> >+ if (engine->create == NULL)
> >+ goto free_flow;
> >+
> >+ ret = engine->create(ad, flow, meta, error);
> > if (ret)
> > goto free_flow;
> >
> >+ flow->engine = engine;
> > TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> > return flow;
> >
> > free_flow:
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "Failed to create flow.");
> >+ PMD_DRV_LOG(ERR, "Failed to create flow");
> > rte_free(flow);
> > return NULL;
> > }
> >
> > static int
> > ice_flow_destroy(struct rte_eth_dev *dev,
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error)
> >+ struct rte_flow *flow,
> >+ struct rte_flow_error *error)
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> > int ret = 0;
> >
> >- ret = ice_destroy_switch_filter(pf, flow, error);
> >-
> >+ if (!flow || !flow->engine->destroy) {
> >+ rte_flow_error_set(error, EINVAL,
> >+ RTE_FLOW_ERROR_TYPE_HANDLE,
> >+ NULL, "NULL flow or NULL destroy");
> >+ return -rte_errno;
> >+ }
> >+ ret = flow->engine->destroy(ad, flow, error);
> > if (!ret) {
> > TAILQ_REMOVE(&pf->flow_list, flow, node);
> > rte_free(flow);
> >- } else {
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "Failed to destroy flow.");
> >- }
> >+ } else
> >+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
> >
> > return ret;
> > }
> >
> > static int
> > ice_flow_flush(struct rte_eth_dev *dev,
> >- struct rte_flow_error *error)
> >+ struct rte_flow_error *error)
>
> Unnecessary change.
Will add a separate code cleanup patch for this change, since the previous indentation is not tab-aligned.
>
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >- struct rte_flow *p_flow;
> >+ struct rte_flow *p_flow = NULL;
>
> Unnecessary initialization.
OK, will fix it in v2.
>
> > void *temp;
> > int ret = 0;
> >
> > TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
> > ret = ice_flow_destroy(dev, p_flow, error);
> > if (ret) {
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> >- "Failed to flush SW flows.");
> >- return -rte_errno;
> >+ PMD_DRV_LOG(ERR, "Failed to flush flows");
> >+ return -EINVAL;
> > }
> > }
> >
> > return ret;
> > }
> >+
> >+static int
> >+ice_flow_query_count(struct rte_eth_dev *dev,
> >+ struct rte_flow *flow,
> >+ const struct rte_flow_action *actions,
> >+ void *data,
> >+ struct rte_flow_error *error)
> >+{
> >+ int ret = -EINVAL;
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >+
> >+ if (!flow || !flow->engine->query) {
> >+ rte_flow_error_set(error, EINVAL,
> >+ RTE_FLOW_ERROR_TYPE_HANDLE,
> >+ NULL, "NULL flow or NULL query");
> >+ return -rte_errno;
> >+ }
> >+
> >+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> >+ switch (actions->type) {
> >+ case RTE_FLOW_ACTION_TYPE_VOID:
> >+ break;
> >+ case RTE_FLOW_ACTION_TYPE_COUNT:
> >+ ret = flow->engine->query(ad, flow, data, error);
> >+ break;
> >+ default:
> >+ return rte_flow_error_set(error, ENOTSUP,
> >+ RTE_FLOW_ERROR_TYPE_ACTION,
> >+ actions,
> >+ "action not supported");
> >+ }
> >+ }
> >+ return ret;
> >+}
> [snip]
> >+TAILQ_HEAD(ice_engine_list, ice_flow_engine);
> >+
> >+/* Struct to store flow created. */
> >+struct rte_flow {
> >+TAILQ_ENTRY(rte_flow) node;
>
> Indentation is needed here.
OK, will fix it in v2.
>
> >+ struct ice_flow_engine *engine;
> >+ void *rule;
> >+};
> >+
> >+/* Struct to store parser created. */
> >+struct ice_flow_parser {
> >+ TAILQ_ENTRY(ice_flow_parser) node;
> >+ struct ice_flow_engine *engine;
> >+ struct ice_pattern_match_item *array;
> >+ uint32_t array_len;
> >+ parse_pattern_action_t parse_pattern_action;
> >+ enum ice_flow_classification_stage stage; };
> >+
> >+void ice_register_flow_engine(struct ice_flow_engine *engine); int
> >+ice_flow_init(struct ice_adapter *ad); void ice_flow_uninit(struct
> >+ice_adapter *ad); int ice_register_parser(struct ice_flow_parser
> >+*parser,
> >+ struct ice_adapter *ad);
> >+void ice_unregister_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad);
> >+struct ice_pattern_match_item *
> >+ice_search_pattern_match_item(
> >+ const struct rte_flow_item pattern[],
> >+ struct ice_pattern_match_item *array,
> >+ uint32_t array_len,
> >+ struct rte_flow_error *error);
> >
> > #endif
> >diff --git a/drivers/net/ice/ice_switch_filter.c
> >b/drivers/net/ice/ice_switch_filter.c
> >index b88b4f59a..6b72bf252 100644
> >--- a/drivers/net/ice/ice_switch_filter.c
> >+++ b/drivers/net/ice/ice_switch_filter.c
> >@@ -2,515 +2,4 @@
> > * Copyright(c) 2019 Intel Corporation
> > */
> >
> >-#include <sys/queue.h>
> >-#include <stdio.h>
> >-#include <errno.h>
> >-#include <stdint.h>
> >-#include <string.h>
> >-#include <unistd.h>
> >-#include <stdarg.h>
> >
> >-#include <rte_debug.h>
> >-#include <rte_ether.h>
> >-#include <rte_ethdev_driver.h>
> >-#include <rte_log.h>
> >-#include <rte_malloc.h>
> >-#include <rte_eth_ctrl.h>
> >-#include <rte_tailq.h>
> >-#include <rte_flow_driver.h>
> >-
> >-#include "ice_logs.h"
> >-#include "base/ice_type.h"
> >-#include "ice_switch_filter.h"
> >-
> >-static int
> >-ice_parse_switch_filter(const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow_error *error,
> >- struct ice_adv_lkup_elem *list,
> >- uint16_t *lkups_num,
> >- enum ice_sw_tunnel_type tun_type)
> >-{
> >- const struct rte_flow_item *item = pattern;
> >- enum rte_flow_item_type item_type;
> >- const struct rte_flow_item_eth *eth_spec, *eth_mask;
> >- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> >- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> >- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> >- const struct rte_flow_item_udp *udp_spec, *udp_mask;
> >- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> >- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
> >- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
> >- uint16_t j, t = 0;
> >- uint16_t tunnel_valid = 0;
> >-
> >- for (item = pattern; item->type !=
> >- RTE_FLOW_ITEM_TYPE_END; item++) {
> >- item_type = item->type;
> >-
> >- switch (item_type) {
> >- case RTE_FLOW_ITEM_TYPE_ETH:
> >- eth_spec = item->spec;
> >- eth_mask = item->mask;
> >- if (eth_spec && eth_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_MAC_OFOS : ICE_MAC_IL;
> >- struct ice_ether_hdr *h;
> >- struct ice_ether_hdr *m;
> >- uint16_t i = 0;
> >- h = &list[t].h_u.eth_hdr;
> >- m = &list[t].m_u.eth_hdr;
> >- for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> >- if (eth_mask->src.addr_bytes[j] ==
> >- UINT8_MAX) {
> >- h->src_addr[j] =
> >- eth_spec->src.addr_bytes[j];
> >- m->src_addr[j] =
> >- eth_mask->src.addr_bytes[j];
> >- i = 1;
> >- }
> >- if (eth_mask->dst.addr_bytes[j] ==
> >- UINT8_MAX) {
> >- h->dst_addr[j] =
> >- eth_spec->dst.addr_bytes[j];
> >- m->dst_addr[j] =
> >- eth_mask->dst.addr_bytes[j];
> >- i = 1;
> >- }
> >- }
> >- if (i)
> >- t++;
> >- if (eth_mask->type == UINT16_MAX) {
> >- list[t].type = ICE_ETYPE_OL;
> >- list[t].h_u.ethertype.ethtype_id =
> >- eth_spec->type;
> >- list[t].m_u.ethertype.ethtype_id =
> >- UINT16_MAX;
> >- t++;
> >- }
> >- } else if (!eth_spec && !eth_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_MAC_OFOS : ICE_MAC_IL;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_IPV4:
> >- ipv4_spec = item->spec;
> >- ipv4_mask = item->mask;
> >- if (ipv4_spec && ipv4_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_IPV4_OFOS : ICE_IPV4_IL;
> >- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> >- list[t].h_u.ipv4_hdr.src_addr =
> >- ipv4_spec->hdr.src_addr;
> >- list[t].m_u.ipv4_hdr.src_addr =
> >- UINT32_MAX;
> >- }
> >- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> >- list[t].h_u.ipv4_hdr.dst_addr =
> >- ipv4_spec->hdr.dst_addr;
> >- list[t].m_u.ipv4_hdr.dst_addr =
> >- UINT32_MAX;
> >- }
> >- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> {
> >- list[t].h_u.ipv4_hdr.time_to_live =
> >- ipv4_spec->hdr.time_to_live;
> >- list[t].m_u.ipv4_hdr.time_to_live =
> >- UINT8_MAX;
> >- }
> >- if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> >- list[t].h_u.ipv4_hdr.protocol =
> >- ipv4_spec->hdr.next_proto_id;
> >- list[t].m_u.ipv4_hdr.protocol =
> >- UINT8_MAX;
> >- }
> >- if (ipv4_mask->hdr.type_of_service ==
> >- UINT8_MAX) {
> >- list[t].h_u.ipv4_hdr.tos =
> >- ipv4_spec-
> >hdr.type_of_service;
> >- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
> >- }
> >- t++;
> >- } else if (!ipv4_spec && !ipv4_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_IPV4_OFOS : ICE_IPV4_IL;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_IPV6:
> >- ipv6_spec = item->spec;
> >- ipv6_mask = item->mask;
> >- if (ipv6_spec && ipv6_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_IPV6_OFOS : ICE_IPV6_IL;
> >- struct ice_ipv6_hdr *f;
> >- struct ice_ipv6_hdr *s;
> >- f = &list[t].h_u.ipv6_hdr;
> >- s = &list[t].m_u.ipv6_hdr;
> >- for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> >- if (ipv6_mask->hdr.src_addr[j] ==
> >- UINT8_MAX) {
> >- f->src_addr[j] =
> >- ipv6_spec->hdr.src_addr[j];
> >- s->src_addr[j] =
> >- ipv6_mask->hdr.src_addr[j];
> >- }
> >- if (ipv6_mask->hdr.dst_addr[j] ==
> >- UINT8_MAX) {
> >- f->dst_addr[j] =
> >- ipv6_spec->hdr.dst_addr[j];
> >- s->dst_addr[j] =
> >- ipv6_mask->hdr.dst_addr[j];
> >- }
> >- }
> >- if (ipv6_mask->hdr.proto == UINT8_MAX) {
> >- f->next_hdr =
> >- ipv6_spec->hdr.proto;
> >- s->next_hdr = UINT8_MAX;
> >- }
> >- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> >- f->hop_limit =
> >- ipv6_spec->hdr.hop_limits;
> >- s->hop_limit = UINT8_MAX;
> >- }
> >- t++;
> >- } else if (!ipv6_spec && !ipv6_mask) {
> >- list[t].type = (tun_type == ICE_NON_TUN) ?
> >- ICE_IPV4_OFOS : ICE_IPV4_IL;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_UDP:
> >- udp_spec = item->spec;
> >- udp_mask = item->mask;
> >- if (udp_spec && udp_mask) {
> >- if (tun_type == ICE_SW_TUN_VXLAN &&
> >- tunnel_valid == 0)
> >- list[t].type = ICE_UDP_OF;
> >- else
> >- list[t].type = ICE_UDP_ILOS;
> >- if (udp_mask->hdr.src_port == UINT16_MAX) {
> >- list[t].h_u.l4_hdr.src_port =
> >- udp_spec->hdr.src_port;
> >- list[t].m_u.l4_hdr.src_port =
> >- udp_mask->hdr.src_port;
> >- }
> >- if (udp_mask->hdr.dst_port == UINT16_MAX) {
> >- list[t].h_u.l4_hdr.dst_port =
> >- udp_spec->hdr.dst_port;
> >- list[t].m_u.l4_hdr.dst_port =
> >- udp_mask->hdr.dst_port;
> >- }
> >- t++;
> >- } else if (!udp_spec && !udp_mask) {
> >- list[t].type = ICE_UDP_ILOS;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_TCP:
> >- tcp_spec = item->spec;
> >- tcp_mask = item->mask;
> >- if (tcp_spec && tcp_mask) {
> >- list[t].type = ICE_TCP_IL;
> >- if (tcp_mask->hdr.src_port == UINT16_MAX) {
> >- list[t].h_u.l4_hdr.src_port =
> >- tcp_spec->hdr.src_port;
> >- list[t].m_u.l4_hdr.src_port =
> >- tcp_mask->hdr.src_port;
> >- }
> >- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> >- list[t].h_u.l4_hdr.dst_port =
> >- tcp_spec->hdr.dst_port;
> >- list[t].m_u.l4_hdr.dst_port =
> >- tcp_mask->hdr.dst_port;
> >- }
> >- t++;
> >- } else if (!tcp_spec && !tcp_mask) {
> >- list[t].type = ICE_TCP_IL;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_SCTP:
> >- sctp_spec = item->spec;
> >- sctp_mask = item->mask;
> >- if (sctp_spec && sctp_mask) {
> >- list[t].type = ICE_SCTP_IL;
> >- if (sctp_mask->hdr.src_port == UINT16_MAX) {
> >- list[t].h_u.sctp_hdr.src_port =
> >- sctp_spec->hdr.src_port;
> >- list[t].m_u.sctp_hdr.src_port =
> >- sctp_mask->hdr.src_port;
> >- }
> >- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> >- list[t].h_u.sctp_hdr.dst_port =
> >- sctp_spec->hdr.dst_port;
> >- list[t].m_u.sctp_hdr.dst_port =
> >- sctp_mask->hdr.dst_port;
> >- }
> >- t++;
> >- } else if (!sctp_spec && !sctp_mask) {
> >- list[t].type = ICE_SCTP_IL;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_VXLAN:
> >- vxlan_spec = item->spec;
> >- vxlan_mask = item->mask;
> >- tunnel_valid = 1;
> >- if (vxlan_spec && vxlan_mask) {
> >- list[t].type = ICE_VXLAN;
> >- if (vxlan_mask->vni[0] == UINT8_MAX &&
> >- vxlan_mask->vni[1] == UINT8_MAX &&
> >- vxlan_mask->vni[2] == UINT8_MAX) {
> >- list[t].h_u.tnl_hdr.vni =
> >- (vxlan_spec->vni[2] << 16) |
> >- (vxlan_spec->vni[1] << 8) |
> >- vxlan_spec->vni[0];
> >- list[t].m_u.tnl_hdr.vni =
> >- UINT32_MAX;
> >- }
> >- t++;
> >- } else if (!vxlan_spec && !vxlan_mask) {
> >- list[t].type = ICE_VXLAN;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_NVGRE:
> >- nvgre_spec = item->spec;
> >- nvgre_mask = item->mask;
> >- tunnel_valid = 1;
> >- if (nvgre_spec && nvgre_mask) {
> >- list[t].type = ICE_NVGRE;
> >- if (nvgre_mask->tni[0] == UINT8_MAX &&
> >- nvgre_mask->tni[1] == UINT8_MAX &&
> >- nvgre_mask->tni[2] == UINT8_MAX) {
> >- list[t].h_u.nvgre_hdr.tni_flow =
> >- (nvgre_spec->tni[2] << 16) |
> >- (nvgre_spec->tni[1] << 8) |
> >- nvgre_spec->tni[0];
> >- list[t].m_u.nvgre_hdr.tni_flow =
> >- UINT32_MAX;
> >- }
> >- t++;
> >- } else if (!nvgre_spec && !nvgre_mask) {
> >- list[t].type = ICE_NVGRE;
> >- }
> >- break;
> >-
> >- case RTE_FLOW_ITEM_TYPE_VOID:
> >- case RTE_FLOW_ITEM_TYPE_END:
> >- break;
> >-
> >- default:
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM, actions,
> >- "Invalid pattern item.");
> >- goto out;
> >- }
> >- }
> >-
> >- *lkups_num = t;
> >-
> >- return 0;
> >-out:
> >- return -rte_errno;
> >-}
> >-
> >-/* By now ice switch filter action code implement only
> >- * supports QUEUE or DROP.
> >- */
> >-static int
> >-ice_parse_switch_action(struct ice_pf *pf,
> >- const struct rte_flow_action *actions,
> >- struct rte_flow_error *error,
> >- struct ice_adv_rule_info *rule_info)
> >-{
> >- struct ice_vsi *vsi = pf->main_vsi;
> >- const struct rte_flow_action_queue *act_q;
> >- uint16_t base_queue;
> >- const struct rte_flow_action *action;
> >- enum rte_flow_action_type action_type;
> >-
> >- base_queue = pf->base_queue;
> >- for (action = actions; action->type !=
> >- RTE_FLOW_ACTION_TYPE_END; action++) {
> >- action_type = action->type;
> >- switch (action_type) {
> >- case RTE_FLOW_ACTION_TYPE_QUEUE:
> >- act_q = action->conf;
> >- rule_info->sw_act.fltr_act =
> >- ICE_FWD_TO_Q;
> >- rule_info->sw_act.fwd_id.q_id =
> >- base_queue + act_q->index;
> >- break;
> >-
> >- case RTE_FLOW_ACTION_TYPE_DROP:
> >- rule_info->sw_act.fltr_act =
> >- ICE_DROP_PACKET;
> >- break;
> >-
> >- case RTE_FLOW_ACTION_TYPE_VOID:
> >- break;
> >-
> >- default:
> >- rte_flow_error_set(error,
> >- EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- actions,
> >- "Invalid action type");
> >- return -rte_errno;
> >- }
> >- }
> >-
> >- rule_info->sw_act.vsi_handle = vsi->idx;
> >- rule_info->rx = 1;
> >- rule_info->sw_act.src = vsi->idx;
> >- rule_info->priority = 5;
> >-
> >- return 0;
> >-}
> >-
> >-static int
> >-ice_switch_rule_set(struct ice_pf *pf,
> >- struct ice_adv_lkup_elem *list,
> >- uint16_t lkups_cnt,
> >- struct ice_adv_rule_info *rule_info,
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error)
> >-{
> >- struct ice_hw *hw = ICE_PF_TO_HW(pf);
> >- int ret;
> >- struct ice_rule_query_data rule_added = {0};
> >- struct ice_rule_query_data *filter_ptr;
> >-
> >- if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> >- "item number too large for rule");
> >- return -rte_errno;
> >- }
> >- if (!list) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> >- "lookup list should not be NULL");
> >- return -rte_errno;
> >- }
> >-
> >- ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> >-
> >- if (!ret) {
> >- filter_ptr = rte_zmalloc("ice_switch_filter",
> >- sizeof(struct ice_rule_query_data), 0);
> >- if (!filter_ptr) {
> >- PMD_DRV_LOG(ERR, "failed to allocate memory");
> >- return -EINVAL;
> >- }
> >- flow->rule = filter_ptr;
> >- rte_memcpy(filter_ptr,
> >- &rule_added,
> >- sizeof(struct ice_rule_query_data));
> >- }
> >-
> >- return ret;
> >-}
> >-
> >-int
> >-ice_create_switch_filter(struct ice_pf *pf,
> >- const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error)
> >-{
> >- int ret = 0;
> >- struct ice_adv_rule_info rule_info = {0};
> >- struct ice_adv_lkup_elem *list = NULL;
> >- uint16_t lkups_num = 0;
> >- const struct rte_flow_item *item = pattern;
> >- uint16_t item_num = 0;
> >- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> >-
> >- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> >- item_num++;
> >- if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
> >- tun_type = ICE_SW_TUN_VXLAN;
> >- if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> >- tun_type = ICE_SW_TUN_NVGRE;
> >- /* reserve one more memory slot for ETH which may
> >- * consume 2 lookup items.
> >- */
> >- if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
> >- item_num++;
> >- }
> >- rule_info.tun_type = tun_type;
> >-
> >- list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> >- if (!list) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "No memory for PMD internal items");
> >- return -rte_errno;
> >- }
> >-
> >- ret = ice_parse_switch_filter(pattern, actions, error,
> >- list, &lkups_num, tun_type);
> >- if (ret)
> >- goto error;
> >-
> >- ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> >- if (ret)
> >- goto error;
> >-
> >- ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
> >- if (ret)
> >- goto error;
> >-
> >- rte_free(list);
> >- return 0;
> >-
> >-error:
> >- rte_free(list);
> >-
> >- return -rte_errno;
> >-}
> >-
> >-int
> >-ice_destroy_switch_filter(struct ice_pf *pf,
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error)
> >-{
> >- struct ice_hw *hw = ICE_PF_TO_HW(pf);
> >- int ret;
> >- struct ice_rule_query_data *filter_ptr;
> >-
> >- filter_ptr = (struct ice_rule_query_data *)
> >- flow->rule;
> >-
> >- if (!filter_ptr) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "no such flow"
> >- " create by switch filter");
> >- return -rte_errno;
> >- }
> >-
> >- ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
> >- if (ret) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "fail to destroy switch filter rule");
> >- return -rte_errno;
> >- }
> >-
> >- rte_free(filter_ptr);
> >- return ret;
> >-}
> >-
> >-void
> >-ice_free_switch_filter_rule(void *rule) -{
> >- struct ice_rule_query_data *filter_ptr;
> >-
> >- filter_ptr = (struct ice_rule_query_data *)rule;
> >-
> >- rte_free(filter_ptr);
> >-}
> >diff --git a/drivers/net/ice/ice_switch_filter.h
> >b/drivers/net/ice/ice_switch_filter.h
> >index cea47990e..5afcddeaf 100644
> >--- a/drivers/net/ice/ice_switch_filter.h
> >+++ b/drivers/net/ice/ice_switch_filter.h
> >@@ -2,23 +2,5 @@
> > * Copyright(c) 2019 Intel Corporation
> > */
> >
> >-#ifndef _ICE_SWITCH_FILTER_H_
> >-#define _ICE_SWITCH_FILTER_H_
> >
> >-#include "base/ice_switch.h"
> >-#include "base/ice_type.h"
> >-#include "ice_ethdev.h"
> >
> >-int
> >-ice_create_switch_filter(struct ice_pf *pf,
> >- const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error);
> >-int
> >-ice_destroy_switch_filter(struct ice_pf *pf,
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error);
> >-void
> >-ice_free_switch_filter_rule(void *rule); -#endif /*
> >_ICE_SWITCH_FILTER_H_ */
> >--
> >2.15.1
> >
On 09/05, Wang, Ying A wrote:
>
>For the previous indentation is not tab-aligned, I will add a separate code cleanup patch for these changes.
>
>>
>> > {
>> > uint32_t cpy_count = 0;
>> > const struct rte_flow_item *pb = pattern, *pe = pattern; @@ -124,7
>> >+285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
>> > items += cpy_count;
>> >
>> > if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
>> >- pb = pe;
>>
>> seems this is some code cleanup, prefer a separate patch, not a strong opinion
>> though.
>
>OK, will add a separate patch for code cleanup.
Thanks, and please put the cleanup/fix patches at the beginning of your patchset.
On 09/04, Ying Wang wrote:
>The patch reworks the generic flow API (rte_flow) implementation.
>It introduces an abstract layer which provides a unified interface
>for low-level filter engine (switch, fdir, hash) to register supported
>patterns and actions and implement flow validate/create/destroy/flush/
>query activities.
>
>The patch also removes the existing switch filter implementation to
>avoid compile error. Switch filter implementation for the new framework
>will be added in the following patch.
>
>Signed-off-by: Ying Wang <ying.a.wang@intel.com>
>---
> drivers/net/ice/ice_ethdev.c | 22 +-
> drivers/net/ice/ice_ethdev.h | 15 +-
> drivers/net/ice/ice_generic_flow.c | 768 +++++++++++++++--------------------
> drivers/net/ice/ice_generic_flow.h | 782 ++++++++----------------------------
> drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> drivers/net/ice/ice_switch_filter.h | 18 -
> 6 files changed, 525 insertions(+), 1591 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 4e0645db1..647aca3ed 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -15,7 +15,7 @@
> #include "base/ice_dcb.h"
> #include "ice_ethdev.h"
> #include "ice_rxtx.h"
>-#include "ice_switch_filter.h"
>+#include "ice_generic_flow.h"
>
> /* devargs */
> #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
>@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
> /* get base queue pairs index in the device */
> ice_base_queue_get(pf);
>
>- TAILQ_INIT(&pf->flow_list);
>+ ret = ice_flow_init(ad);
>+ if (ret) {
>+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
>+ return ret;
>+ }
>
> return 0;
>
>@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>
> /* Since stop will make link down, then the link event will be
> * triggered, disable the irq firstly to avoid the port_infoe etc
>@@ -1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
>
> ice_dev_stop(dev);
>
>+ ice_flow_uninit(ad);
>+
> /* release all queue resource */
> ice_free_queues(dev);
>
>@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> {
> struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
>- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>- struct rte_flow *p_flow;
>
> ice_dev_close(dev);
>
>@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> /* unregister callback func from eal lib */
> rte_intr_callback_unregister(intr_handle,
> ice_interrupt_handler, dev);
>-
>- /* Remove all flows */
>- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>- ice_free_switch_filter_rule(p_flow->rule);
>- rte_free(p_flow);
>- }
>-
> return 0;
> }
>
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 9bf5de08d..d1d07641d 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -241,16 +241,14 @@ struct ice_vsi {
> bool offset_loaded;
> };
>
>-extern const struct rte_flow_ops ice_flow_ops;
>-
>-/* Struct to store flow created. */
>-struct rte_flow {
>- TAILQ_ENTRY(rte_flow) node;
>- void *rule;
>-};
>
>+struct rte_flow;
> TAILQ_HEAD(ice_flow_list, rte_flow);
>
>+
>+struct ice_flow_parser;
>+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
>+
> struct ice_pf {
> struct ice_adapter *adapter; /* The adapter this PF associate to */
> struct ice_vsi *main_vsi; /* pointer to main VSI structure */
>@@ -278,6 +276,9 @@ struct ice_pf {
> bool offset_loaded;
> bool adapter_stopped;
> struct ice_flow_list flow_list;
>+ struct ice_parser_list rss_parser_list;
>+ struct ice_parser_list perm_parser_list;
>+ struct ice_parser_list dist_parser_list;
> };
>
> /**
>diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
>index 1c0adc779..aa11d6170 100644
>--- a/drivers/net/ice/ice_generic_flow.c
>+++ b/drivers/net/ice/ice_generic_flow.c
>@@ -17,7 +17,22 @@
>
> #include "ice_ethdev.h"
> #include "ice_generic_flow.h"
>-#include "ice_switch_filter.h"
>+
>+/**
>+ * Non-pipeline mode, fdir and swith both used as distributor,
s/swith/switch
>+ * fdir used first, switch used as fdir's backup.
>+ */
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
>+/*Pipeline mode, switch used at permission stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
>+/*Pipeline mode, fdir used at distributor stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
>+
>+static int ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>+
>+static struct ice_engine_list engine_list =
>+ TAILQ_HEAD_INITIALIZER(engine_list);
>
> static int ice_flow_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
> struct rte_flow_error *error);
> static int ice_flow_flush(struct rte_eth_dev *dev,
> struct rte_flow_error *error);
>+static int ice_flow_query_count(struct rte_eth_dev *dev,
>+ struct rte_flow *flow,
>+ const struct rte_flow_action *actions,
>+ void *data,
>+ struct rte_flow_error *error);
>
> const struct rte_flow_ops ice_flow_ops = {
> .validate = ice_flow_validate,
> .create = ice_flow_create,
> .destroy = ice_flow_destroy,
> .flush = ice_flow_flush,
>+ .query = ice_flow_query_count,
> };
>
>+
>+void
>+ice_register_flow_engine(struct ice_flow_engine *engine)
>+{
>+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
>+}
>+
>+int
>+ice_flow_init(struct ice_adapter *ad)
>+{
>+ int ret = 0;
>+ struct ice_pf *pf = &ad->pf;
>+ void *temp;
>+ struct ice_flow_engine *engine = NULL;
>+
>+ TAILQ_INIT(&pf->flow_list);
>+ TAILQ_INIT(&pf->rss_parser_list);
>+ TAILQ_INIT(&pf->perm_parser_list);
>+ TAILQ_INIT(&pf->dist_parser_list);
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->init == NULL)
>+ return -EINVAL;
>+
>+ ret = engine->init(ad);
>+ if (ret)
>+ return ret;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_flow_uninit(struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_flow_engine *engine;
>+ struct rte_flow *p_flow;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->uninit)
>+ engine->uninit(ad);
>+ }
>+
>+ /* Remove all flows */
>+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>+ if (p_flow->engine->free)
>+ p_flow->engine->free(p_flow);
>+ rte_free(p_flow);
>+ }
>+
>+ /* Cleanup parser list */
>+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
>+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
>+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
>+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
>+}
>+
>+int
>+ice_register_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_parser_list *list = NULL;
>+ struct ice_pf *pf = &ad->pf;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return -EINVAL;
>+ }
>+
>+ if (ad->devargs.pipeline_mode_support)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else {
>+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
>+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
>+ TAILQ_INSERT_HEAD(list, parser, node);
>+ else
>+ return -EINVAL;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_unregister_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_parser_list *list;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return;
>+ }
The switch blocks in above functions are the same, it's better to use a common
function to reduce the duplicated code.
Thanks,
Xiaolong
On 09/04, Ying Wang wrote:
>The patch reworks the generic flow API (rte_flow) implementation.
>It introduces an abstract layer which provides a unified interface
>for low-level filter engine (switch, fdir, hash) to register supported
>patterns and actions and implement flow validate/create/destroy/flush/
>query activities.
>
>The patch also removes the existing switch filter implementation to
>avoid compile error. Switch filter implementation for the new framework
>will be added in the following patch.
>
>Signed-off-by: Ying Wang <ying.a.wang@intel.com>
>---
> drivers/net/ice/ice_ethdev.c | 22 +-
> drivers/net/ice/ice_ethdev.h | 15 +-
> drivers/net/ice/ice_generic_flow.c | 768 +++++++++++++++--------------------
> drivers/net/ice/ice_generic_flow.h | 782 ++++++++----------------------------
> drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> drivers/net/ice/ice_switch_filter.h | 18 -
> 6 files changed, 525 insertions(+), 1591 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 4e0645db1..647aca3ed 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -15,7 +15,7 @@
> #include "base/ice_dcb.h"
> #include "ice_ethdev.h"
> #include "ice_rxtx.h"
>-#include "ice_switch_filter.h"
>+#include "ice_generic_flow.h"
>
> /* devargs */
> #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
>@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
> /* get base queue pairs index in the device */
> ice_base_queue_get(pf);
>
>- TAILQ_INIT(&pf->flow_list);
>+ ret = ice_flow_init(ad);
>+ if (ret) {
>+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
>+ return ret;
>+ }
>
> return 0;
>
>@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>
> /* Since stop will make link down, then the link event will be
> * triggered, disable the irq firstly to avoid the port_infoe etc
>@@ -1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
>
> ice_dev_stop(dev);
>
>+ ice_flow_uninit(ad);
>+
> /* release all queue resource */
> ice_free_queues(dev);
>
>@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> {
> struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
>- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>- struct rte_flow *p_flow;
>
> ice_dev_close(dev);
>
>@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> /* unregister callback func from eal lib */
> rte_intr_callback_unregister(intr_handle,
> ice_interrupt_handler, dev);
>-
>- /* Remove all flows */
>- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>- ice_free_switch_filter_rule(p_flow->rule);
>- rte_free(p_flow);
>- }
>-
> return 0;
> }
>
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 9bf5de08d..d1d07641d 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -241,16 +241,14 @@ struct ice_vsi {
> bool offset_loaded;
> };
>
>-extern const struct rte_flow_ops ice_flow_ops;
>-
>-/* Struct to store flow created. */
>-struct rte_flow {
>- TAILQ_ENTRY(rte_flow) node;
>- void *rule;
>-};
>
>+struct rte_flow;
> TAILQ_HEAD(ice_flow_list, rte_flow);
>
>+
>+struct ice_flow_parser;
>+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
>+
> struct ice_pf {
> struct ice_adapter *adapter; /* The adapter this PF associate to */
> struct ice_vsi *main_vsi; /* pointer to main VSI structure */
>@@ -278,6 +276,9 @@ struct ice_pf {
> bool offset_loaded;
> bool adapter_stopped;
> struct ice_flow_list flow_list;
>+ struct ice_parser_list rss_parser_list;
>+ struct ice_parser_list perm_parser_list;
>+ struct ice_parser_list dist_parser_list;
> };
>
> /**
>diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
>index 1c0adc779..aa11d6170 100644
>--- a/drivers/net/ice/ice_generic_flow.c
>+++ b/drivers/net/ice/ice_generic_flow.c
>@@ -17,7 +17,22 @@
>
> #include "ice_ethdev.h"
> #include "ice_generic_flow.h"
>-#include "ice_switch_filter.h"
>+
>+/**
>+ * Non-pipeline mode, fdir and swith both used as distributor,
>+ * fdir used first, switch used as fdir's backup.
>+ */
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
>+/*Pipeline mode, switch used at permission stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
>+/*Pipeline mode, fdir used at distributor stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
>+
>+static int ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>+
>+static struct ice_engine_list engine_list =
>+ TAILQ_HEAD_INITIALIZER(engine_list);
>
> static int ice_flow_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
> struct rte_flow_error *error);
> static int ice_flow_flush(struct rte_eth_dev *dev,
> struct rte_flow_error *error);
>+static int ice_flow_query_count(struct rte_eth_dev *dev,
>+ struct rte_flow *flow,
>+ const struct rte_flow_action *actions,
>+ void *data,
>+ struct rte_flow_error *error);
>
> const struct rte_flow_ops ice_flow_ops = {
> .validate = ice_flow_validate,
> .create = ice_flow_create,
> .destroy = ice_flow_destroy,
> .flush = ice_flow_flush,
>+ .query = ice_flow_query_count,
> };
>
>+
>+void
>+ice_register_flow_engine(struct ice_flow_engine *engine)
>+{
>+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
>+}
>+
>+int
>+ice_flow_init(struct ice_adapter *ad)
>+{
>+ int ret = 0;
>+ struct ice_pf *pf = &ad->pf;
>+ void *temp;
>+ struct ice_flow_engine *engine = NULL;
>+
>+ TAILQ_INIT(&pf->flow_list);
>+ TAILQ_INIT(&pf->rss_parser_list);
>+ TAILQ_INIT(&pf->perm_parser_list);
>+ TAILQ_INIT(&pf->dist_parser_list);
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->init == NULL)
What about provide some debug log info here? Adding one engine name member to
struct ice_flow_engine may help.
>+ return -EINVAL;
>+
>+ ret = engine->init(ad);
>+ if (ret)
>+ return ret;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_flow_uninit(struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_flow_engine *engine;
>+ struct rte_flow *p_flow;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+ if (engine->uninit)
>+ engine->uninit(ad);
>+ }
>+
>+ /* Remove all flows */
>+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>+ if (p_flow->engine->free)
>+ p_flow->engine->free(p_flow);
>+ rte_free(p_flow);
>+ }
>+
>+ /* Cleanup parser list */
>+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
>+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
>+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
>+
>+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
>+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
>+}
>+
>+int
>+ice_register_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_parser_list *list = NULL;
>+ struct ice_pf *pf = &ad->pf;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return -EINVAL;
>+ }
>+
>+ if (ad->devargs.pipeline_mode_support)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else {
>+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
>+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
>+ TAILQ_INSERT_TAIL(list, parser, node);
>+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
>+ TAILQ_INSERT_HEAD(list, parser, node);
>+ else
>+ return -EINVAL;
>+ }
>+ return 0;
>+}
>+
>+void
>+ice_unregister_parser(struct ice_flow_parser *parser,
>+ struct ice_adapter *ad)
>+{
>+ struct ice_pf *pf = &ad->pf;
>+ struct ice_parser_list *list;
>+ struct ice_flow_parser *p_parser;
>+ void *temp;
>+
>+ switch (parser->stage) {
>+ case ICE_FLOW_STAGE_RSS:
>+ list = &pf->rss_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_PERMISSION:
>+ list = &pf->perm_parser_list;
>+ break;
>+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>+ list = &pf->dist_parser_list;
>+ break;
>+ default:
>+ return;
>+ }
>+
>+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
>+ if (p_parser->engine->type == parser->engine->type)
>+ TAILQ_REMOVE(list, p_parser, node);
>+ }
>+
>+}
>+
> static int
>-ice_flow_valid_attr(const struct rte_flow_attr *attr,
>- struct rte_flow_error *error)
>+ice_flow_valid_attr(struct ice_adapter *ad,
>+ const struct rte_flow_attr *attr,
>+ struct rte_flow_error *error)
> {
> /* Must be input direction */
> if (!attr->ingress) {
>@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
> attr, "Not support egress.");
> return -rte_errno;
> }
>-
>- /* Not supported */
>- if (attr->priority) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>- attr, "Not support priority.");
>- return -rte_errno;
>+ /* Check pipeline mode support to set classification stage */
>+ if (ad->devargs.pipeline_mode_support) {
>+ if (0 == attr->priority)
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
>+ else
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
>+ } else {
>+ ice_pipeline_stage =
>+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
Do we really this assignment?
>+ /* Not supported */
>+ if (attr->priority) {
>+ rte_flow_error_set(error, EINVAL,
>+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>+ attr, "Not support priority.");
>+ return -rte_errno;
>+ }
> }
>-
Unrelated change.
> /* Not supported */
> if (attr->group) {
> rte_flow_error_set(error, EINVAL,
>@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item *item, bool is_void)
> /* Skip all VOID items of the pattern */
> static void
> ice_pattern_skip_void_item(struct rte_flow_item *items,
>- const struct rte_flow_item *pattern)
>+ const struct rte_flow_item *pattern)
> {
> uint32_t cpy_count = 0;
> const struct rte_flow_item *pb = pattern, *pe = pattern;
>@@ -124,7 +285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
> items += cpy_count;
>
> if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
>- pb = pe;
> break;
> }
>
>@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array,
> item->type == RTE_FLOW_ITEM_TYPE_END);
> }
>
>-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
>+struct ice_pattern_match_item *
>+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
>+ struct ice_pattern_match_item *array,
>+ uint32_t array_len,
> struct rte_flow_error *error)
> {
> uint16_t i = 0;
>- uint64_t inset;
>+ struct ice_pattern_match_item *pattern_match_item;
>+ /* need free by each filter */
> struct rte_flow_item *items; /* used for pattern without VOID items */
> uint32_t item_num = 0; /* non-void item number */
>
>@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
> if (!items) {
> rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> NULL, "No memory for PMD internal items.");
>- return -ENOMEM;
>+ return NULL;
>+ }
>+ pattern_match_item = rte_zmalloc("ice_pattern_match_item",
>+ sizeof(struct ice_pattern_match_item), 0);
>+ if (!pattern_match_item) {
>+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
Use rte_flow_error_set to align with others.
>+ return NULL;
> }
>-
> ice_pattern_skip_void_item(items, pattern);
>
>- for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
>- if (ice_match_pattern(ice_supported_patterns[i].items,
>+ for (i = 0; i < array_len; i++)
>+ if (ice_match_pattern(array[i].pattern_list,
> items)) {
>- inset = ice_supported_patterns[i].sw_fields;
>+ pattern_match_item->input_set_mask =
>+ array[i].input_set_mask;
>+ pattern_match_item->pattern_list =
>+ array[i].pattern_list;
>+ pattern_match_item->meta = array[i].meta;
> rte_free(items);
>- return inset;
>+ return pattern_match_item;
> }
> rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> pattern, "Unsupported pattern");
>
> rte_free(items);
>- return 0;
>-}
>-
>-static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
>- struct rte_flow_error *error)
>-{
>- const struct rte_flow_item *item = pattern;
>- const struct rte_flow_item_eth *eth_spec, *eth_mask;
>- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
>- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
>- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>- const struct rte_flow_item_udp *udp_spec, *udp_mask;
>- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
>- const struct rte_flow_item_icmp *icmp_mask;
>- const struct rte_flow_item_icmp6 *icmp6_mask;
>- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
>- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
>- enum rte_flow_item_type item_type;
>- uint8_t ipv6_addr_mask[16] = {
>- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
>- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
>- uint64_t input_set = ICE_INSET_NONE;
>- bool is_tunnel = false;
>-
>- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
>- if (item->last) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Not support range");
>- return 0;
>- }
>- item_type = item->type;
>- switch (item_type) {
>- case RTE_FLOW_ITEM_TYPE_ETH:
>- eth_spec = item->spec;
>- eth_mask = item->mask;
>-
>- if (eth_spec && eth_mask) {
>- if (rte_is_broadcast_ether_addr(ð_mask->src))
>- input_set |= ICE_INSET_SMAC;
>- if (rte_is_broadcast_ether_addr(ð_mask->dst))
>- input_set |= ICE_INSET_DMAC;
>- if (eth_mask->type == RTE_BE16(0xffff))
>- input_set |= ICE_INSET_ETHERTYPE;
>- }
>- break;
>- case RTE_FLOW_ITEM_TYPE_IPV4:
>- ipv4_spec = item->spec;
>- ipv4_mask = item->mask;
>-
>- if (!(ipv4_spec && ipv4_mask))
>- break;
>-
>- /* Check IPv4 mask and update input set */
>- if (ipv4_mask->hdr.version_ihl ||
>- ipv4_mask->hdr.total_length ||
>- ipv4_mask->hdr.packet_id ||
>- ipv4_mask->hdr.hdr_checksum) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid IPv4 mask.");
>- return 0;
>- }
>-
>- if (is_tunnel) {
>- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
>- input_set |= ICE_INSET_TUN_IPV4_SRC;
>- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>- input_set |= ICE_INSET_TUN_IPV4_DST;
>- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_IPV4_TTL;
>- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_IPV4_PROTO;
>- } else {
>- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
>- input_set |= ICE_INSET_IPV4_SRC;
>- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>- input_set |= ICE_INSET_IPV4_DST;
>- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
>- input_set |= ICE_INSET_IPV4_TTL;
>- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
>- input_set |= ICE_INSET_IPV4_PROTO;
>- if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
>- input_set |= ICE_INSET_IPV4_TOS;
>- }
>- break;
>- case RTE_FLOW_ITEM_TYPE_IPV6:
>- ipv6_spec = item->spec;
>- ipv6_mask = item->mask;
>-
>- if (!(ipv6_spec && ipv6_mask))
>- break;
>-
>- if (ipv6_mask->hdr.payload_len) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid IPv6 mask");
>- return 0;
>- }
>-
>- if (is_tunnel) {
>- if (!memcmp(ipv6_mask->hdr.src_addr,
>- ipv6_addr_mask,
>- RTE_DIM(ipv6_mask->hdr.src_addr)))
>- input_set |= ICE_INSET_TUN_IPV6_SRC;
>- if (!memcmp(ipv6_mask->hdr.dst_addr,
>- ipv6_addr_mask,
>- RTE_DIM(ipv6_mask->hdr.dst_addr)))
>- input_set |= ICE_INSET_TUN_IPV6_DST;
>- if (ipv6_mask->hdr.proto == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_IPV6_PROTO;
>- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_IPV6_TTL;
>- } else {
>- if (!memcmp(ipv6_mask->hdr.src_addr,
>- ipv6_addr_mask,
>- RTE_DIM(ipv6_mask->hdr.src_addr)))
>- input_set |= ICE_INSET_IPV6_SRC;
>- if (!memcmp(ipv6_mask->hdr.dst_addr,
>- ipv6_addr_mask,
>- RTE_DIM(ipv6_mask->hdr.dst_addr)))
>- input_set |= ICE_INSET_IPV6_DST;
>- if (ipv6_mask->hdr.proto == UINT8_MAX)
>- input_set |= ICE_INSET_IPV6_PROTO;
>- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
>- input_set |= ICE_INSET_IPV6_HOP_LIMIT;
>- if ((ipv6_mask->hdr.vtc_flow &
>- rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
>- == rte_cpu_to_be_32
>- (RTE_IPV6_HDR_TC_MASK))
>- input_set |= ICE_INSET_IPV6_TOS;
>- }
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_UDP:
>- udp_spec = item->spec;
>- udp_mask = item->mask;
>-
>- if (!(udp_spec && udp_mask))
>- break;
>-
>- /* Check UDP mask and update input set*/
>- if (udp_mask->hdr.dgram_len ||
>- udp_mask->hdr.dgram_cksum) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid UDP mask");
>- return 0;
>- }
>-
>- if (is_tunnel) {
>- if (udp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_SRC_PORT;
>- if (udp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_DST_PORT;
>- } else {
>- if (udp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_SRC_PORT;
>- if (udp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_DST_PORT;
>- }
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_TCP:
>- tcp_spec = item->spec;
>- tcp_mask = item->mask;
>-
>- if (!(tcp_spec && tcp_mask))
>- break;
>-
>- /* Check TCP mask and update input set */
>- if (tcp_mask->hdr.sent_seq ||
>- tcp_mask->hdr.recv_ack ||
>- tcp_mask->hdr.data_off ||
>- tcp_mask->hdr.tcp_flags ||
>- tcp_mask->hdr.rx_win ||
>- tcp_mask->hdr.cksum ||
>- tcp_mask->hdr.tcp_urp) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid TCP mask");
>- return 0;
>- }
>-
>- if (is_tunnel) {
>- if (tcp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_SRC_PORT;
>- if (tcp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_DST_PORT;
>- } else {
>- if (tcp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_SRC_PORT;
>- if (tcp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_DST_PORT;
>- }
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_SCTP:
>- sctp_spec = item->spec;
>- sctp_mask = item->mask;
>-
>- if (!(sctp_spec && sctp_mask))
>- break;
>-
>- /* Check SCTP mask and update input set */
>- if (sctp_mask->hdr.cksum) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid SCTP mask");
>- return 0;
>- }
>-
>- if (is_tunnel) {
>- if (sctp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_SRC_PORT;
>- if (sctp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_TUN_DST_PORT;
>- } else {
>- if (sctp_mask->hdr.src_port == UINT16_MAX)
>- input_set |= ICE_INSET_SRC_PORT;
>- if (sctp_mask->hdr.dst_port == UINT16_MAX)
>- input_set |= ICE_INSET_DST_PORT;
>- }
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_ICMP:
>- icmp_mask = item->mask;
>- if (icmp_mask->hdr.icmp_code ||
>- icmp_mask->hdr.icmp_cksum ||
>- icmp_mask->hdr.icmp_ident ||
>- icmp_mask->hdr.icmp_seq_nb) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid ICMP mask");
>- return 0;
>- }
>-
>- if (icmp_mask->hdr.icmp_type == UINT8_MAX)
>- input_set |= ICE_INSET_ICMP;
>- break;
>- case RTE_FLOW_ITEM_TYPE_ICMP6:
>- icmp6_mask = item->mask;
>- if (icmp6_mask->code ||
>- icmp6_mask->checksum) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid ICMP6 mask");
>- return 0;
>- }
>-
>- if (icmp6_mask->type == UINT8_MAX)
>- input_set |= ICE_INSET_ICMP6;
>- break;
>- case RTE_FLOW_ITEM_TYPE_VXLAN:
>- vxlan_spec = item->spec;
>- vxlan_mask = item->mask;
>- /* Check if VXLAN item is used to describe protocol.
>- * If yes, both spec and mask should be NULL.
>- * If no, both spec and mask shouldn't be NULL.
>- */
>- if ((!vxlan_spec && vxlan_mask) ||
>- (vxlan_spec && !vxlan_mask)) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid VXLAN item");
>- return 0;
>- }
>- if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
>- vxlan_mask->vni[1] == UINT8_MAX &&
>- vxlan_mask->vni[2] == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_ID;
>- is_tunnel = 1;
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_NVGRE:
>- nvgre_spec = item->spec;
>- nvgre_mask = item->mask;
>- /* Check if NVGRE item is used to describe protocol.
>- * If yes, both spec and mask should be NULL.
>- * If no, both spec and mask shouldn't be NULL.
>- */
>- if ((!nvgre_spec && nvgre_mask) ||
>- (nvgre_spec && !nvgre_mask)) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid NVGRE item");
>- return 0;
>- }
>- if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
>- nvgre_mask->tni[1] == UINT8_MAX &&
>- nvgre_mask->tni[2] == UINT8_MAX)
>- input_set |= ICE_INSET_TUN_ID;
>- is_tunnel = 1;
>-
>- break;
>- case RTE_FLOW_ITEM_TYPE_VOID:
>- break;
>- default:
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM,
>- item,
>- "Invalid pattern");
>- break;
>- }
>- }
>- return input_set;
>-}
>-
>-static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
>- uint64_t inset, struct rte_flow_error *error)
>-{
>- uint64_t fields;
>-
>- /* get valid field */
>- fields = ice_get_flow_field(pattern, error);
>- if (!fields || fields & (~inset)) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
>- pattern,
>- "Invalid input set");
>- return -rte_errno;
>- }
>-
>- return 0;
>+ rte_free(pattern_match_item);
>+ return NULL;
> }
>
>-static int ice_flow_valid_action(struct rte_eth_dev *dev,
>- const struct rte_flow_action *actions,
>- struct rte_flow_error *error)
>+static struct ice_flow_engine *
>+ice_parse_engine(struct ice_adapter *ad,
>+ struct ice_parser_list *parser_list,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ void **meta,
>+ struct rte_flow_error *error)
> {
>- const struct rte_flow_action_queue *act_q;
>- uint16_t queue;
>- const struct rte_flow_action *action;
>- for (action = actions; action->type !=
>- RTE_FLOW_ACTION_TYPE_END; action++) {
>- switch (action->type) {
>- case RTE_FLOW_ACTION_TYPE_QUEUE:
>- act_q = action->conf;
>- queue = act_q->index;
>- if (queue >= dev->data->nb_rx_queues) {
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ACTION,
>- actions, "Invalid queue ID for"
>- " switch filter.");
>- return -rte_errno;
>- }
>- break;
>- case RTE_FLOW_ACTION_TYPE_DROP:
>- case RTE_FLOW_ACTION_TYPE_VOID:
>- break;
>- default:
>- rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ACTION, actions,
>- "Invalid action.");
>- return -rte_errno;
>- }
>+ struct ice_flow_engine *engine = NULL;
>+ struct ice_flow_parser *parser = NULL;
>+ void *temp;
>+ TAILQ_FOREACH_SAFE(parser, parser_list, node, temp) {
>+ if (parser->parse_pattern_action(ad, parser->array,
>+ parser->array_len, pattern, actions,
>+ meta, error) < 0)
>+ continue;
>+ engine = parser->engine;
>+ break;
> }
>- return 0;
>+ return engine;
> }
>
> static int
>-ice_flow_validate(struct rte_eth_dev *dev,
>- const struct rte_flow_attr *attr,
>- const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow_error *error)
>+ice_flow_validate_filter(struct rte_eth_dev *dev,
>+ const struct rte_flow_attr *attr,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ struct ice_flow_engine **engine,
>+ void **meta,
>+ struct rte_flow_error *error)
> {
>- uint64_t inset = 0;
> int ret = ICE_ERR_NOT_SUPPORTED;
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>
> if (!pattern) {
> rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
>- NULL, "NULL pattern.");
>+ NULL, "NULL pattern.");
> return -rte_errno;
> }
>
> if (!actions) {
> rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
>- NULL, "NULL action.");
>+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
>+ NULL, "NULL action.");
> return -rte_errno;
> }
>-
> if (!attr) {
> rte_flow_error_set(error, EINVAL,
>- RTE_FLOW_ERROR_TYPE_ATTR,
>- NULL, "NULL attribute.");
>+ RTE_FLOW_ERROR_TYPE_ATTR,
>+ NULL, "NULL attribute.");
> return -rte_errno;
> }
>
>- ret = ice_flow_valid_attr(attr, error);
>+ ret = ice_flow_valid_attr(ad, attr, error);
> if (ret)
> return ret;
>
>- inset = ice_flow_valid_pattern(pattern, error);
>- if (!inset)
>- return -rte_errno;
>-
>- ret = ice_flow_valid_inset(pattern, inset, error);
>- if (ret)
>- return ret;
>+ *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
>+ meta, error);
>+ if (*engine != NULL)
>+ return 0;
>+
>+ switch (ice_pipeline_stage) {
>+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
>+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
>+ *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
>+ actions, meta, error);
>+ break;
>+ case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
>+ *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
>+ actions, meta, error);
>+ break;
>+ default:
>+ return -EINVAL;
>+ }
>
>- ret = ice_flow_valid_action(dev, actions, error);
>- if (ret)
>- return ret;
>+ if (*engine == NULL)
>+ return -EINVAL;
>
> return 0;
> }
>
>+static int
>+ice_flow_validate(struct rte_eth_dev *dev,
>+ const struct rte_flow_attr *attr,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ struct rte_flow_error *error)
>+{
>+ int ret = ICE_ERR_NOT_SUPPORTED;
>+ void *meta = NULL;
>+ struct ice_flow_engine *engine = NULL;
>+
>+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+ &engine, &meta, error);
>+ return ret;
>+}
>+
> static struct rte_flow *
> ice_flow_create(struct rte_eth_dev *dev,
>- const struct rte_flow_attr *attr,
>- const struct rte_flow_item pattern[],
>- const struct rte_flow_action actions[],
>- struct rte_flow_error *error)
>+ const struct rte_flow_attr *attr,
>+ const struct rte_flow_item pattern[],
>+ const struct rte_flow_action actions[],
>+ struct rte_flow_error *error)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct rte_flow *flow = NULL;
>- int ret;
>+ int ret = 0;
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+ struct ice_flow_engine *engine = NULL;
>+ void *meta = NULL;
>
> flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> if (!flow) {
>@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
> return flow;
> }
>
>- ret = ice_flow_validate(dev, attr, pattern, actions, error);
>+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+ &engine, &meta, error);
> if (ret < 0)
> goto free_flow;
>
>- ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
>+ if (engine->create == NULL)
>+ goto free_flow;
>+
>+ ret = engine->create(ad, flow, meta, error);
> if (ret)
> goto free_flow;
>
>+ flow->engine = engine;
> TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> return flow;
>
> free_flow:
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to create flow.");
>+ PMD_DRV_LOG(ERR, "Failed to create flow");
Why is this change?
> rte_free(flow);
> return NULL;
> }
>
> static int
> ice_flow_destroy(struct rte_eth_dev *dev,
>- struct rte_flow *flow,
>- struct rte_flow_error *error)
>+ struct rte_flow *flow,
>+ struct rte_flow_error *error)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+ struct ice_adapter *ad =
>+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> int ret = 0;
>
>- ret = ice_destroy_switch_filter(pf, flow, error);
>-
>+ if (!flow || !flow->engine->destroy) {
>+ rte_flow_error_set(error, EINVAL,
>+ RTE_FLOW_ERROR_TYPE_HANDLE,
>+ NULL, "NULL flow or NULL destroy");
>+ return -rte_errno;
>+ }
>+ ret = flow->engine->destroy(ad, flow, error);
> if (!ret) {
> TAILQ_REMOVE(&pf->flow_list, flow, node);
> rte_free(flow);
>- } else {
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to destroy flow.");
>- }
>+ } else
>+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
Ditto.
>
> return ret;
> }
>
> static int
> ice_flow_flush(struct rte_eth_dev *dev,
>- struct rte_flow_error *error)
>+ struct rte_flow_error *error)
> {
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>- struct rte_flow *p_flow;
>+ struct rte_flow *p_flow = NULL;
> void *temp;
> int ret = 0;
>
> TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
> ret = ice_flow_destroy(dev, p_flow, error);
> if (ret) {
>- rte_flow_error_set(error, -ret,
>- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>- "Failed to flush SW flows.");
>- return -rte_errno;
>+ PMD_DRV_LOG(ERR, "Failed to flush flows");
Ditto.
Thanks,
Xiaolong
Hi, Xiaolong
> -----Original Message-----
> From: Ye, Xiaolong
> Sent: Saturday, September 7, 2019 12:13 AM
> To: Wang, Ying A <ying.a.wang@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [PATCH 2/4] net/ice: rework for generic flow enabling
>
> On 09/04, Ying Wang wrote:
> >The patch reworks the generic flow API (rte_flow) implementation.
> >It introduces an abstract layer which provides a unified interface for
> >low-level filter engine (switch, fdir, hash) to register supported
> >patterns and actions and implement flow validate/create/destroy/flush/
> >query activities.
> >
> >The patch also removes the existing switch filter implementation to
> >avoid compile error. Switch filter implementation for the new framework
> >will be added in the following patch.
> >
> >Signed-off-by: Ying Wang <ying.a.wang@intel.com>
> >---
> > drivers/net/ice/ice_ethdev.c | 22 +-
> > drivers/net/ice/ice_ethdev.h | 15 +-
> > drivers/net/ice/ice_generic_flow.c | 768
> >+++++++++++++++--------------------
> > drivers/net/ice/ice_generic_flow.h | 782
> >++++++++----------------------------
> > drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> >drivers/net/ice/ice_switch_filter.h | 18 -
> > 6 files changed, 525 insertions(+), 1591 deletions(-)
> >
> >diff --git a/drivers/net/ice/ice_ethdev.c
> >b/drivers/net/ice/ice_ethdev.c index 4e0645db1..647aca3ed 100644
> >--- a/drivers/net/ice/ice_ethdev.c
> >+++ b/drivers/net/ice/ice_ethdev.c
> >@@ -15,7 +15,7 @@
> > #include "base/ice_dcb.h"
> > #include "ice_ethdev.h"
> > #include "ice_rxtx.h"
> >-#include "ice_switch_filter.h"
> >+#include "ice_generic_flow.h"
> >
> > /* devargs */
> > #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
> >@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
> > /* get base queue pairs index in the device */
> > ice_base_queue_get(pf);
> >
> >- TAILQ_INIT(&pf->flow_list);
> >+ ret = ice_flow_init(ad);
> >+ if (ret) {
> >+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
> >+ return ret;
> >+ }
> >
> > return 0;
> >
> >@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev) {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> > struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >
> > /* Since stop will make link down, then the link event will be
> > * triggered, disable the irq firstly to avoid the port_infoe etc @@
> >-1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> >
> > ice_dev_stop(dev);
> >
> >+ ice_flow_uninit(ad);
> >+
> > /* release all queue resource */
> > ice_free_queues(dev);
> >
> >@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) {
> > struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> > struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
> >- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >- struct rte_flow *p_flow;
> >
> > ice_dev_close(dev);
> >
> >@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> > /* unregister callback func from eal lib */
> > rte_intr_callback_unregister(intr_handle,
> > ice_interrupt_handler, dev);
> >-
> >- /* Remove all flows */
> >- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> >- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> >- ice_free_switch_filter_rule(p_flow->rule);
> >- rte_free(p_flow);
> >- }
> >-
> > return 0;
> > }
> >
> >diff --git a/drivers/net/ice/ice_ethdev.h
> >b/drivers/net/ice/ice_ethdev.h index 9bf5de08d..d1d07641d 100644
> >--- a/drivers/net/ice/ice_ethdev.h
> >+++ b/drivers/net/ice/ice_ethdev.h
> >@@ -241,16 +241,14 @@ struct ice_vsi {
> > bool offset_loaded;
> > };
> >
> >-extern const struct rte_flow_ops ice_flow_ops;
> >-
> >-/* Struct to store flow created. */
> >-struct rte_flow {
> >- TAILQ_ENTRY(rte_flow) node;
> >- void *rule;
> >-};
> >
> >+struct rte_flow;
> > TAILQ_HEAD(ice_flow_list, rte_flow);
> >
> >+
> >+struct ice_flow_parser;
> >+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
> >+
> > struct ice_pf {
> > struct ice_adapter *adapter; /* The adapter this PF associate to */
> > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@
> >-278,6 +276,9 @@ struct ice_pf {
> > bool offset_loaded;
> > bool adapter_stopped;
> > struct ice_flow_list flow_list;
> >+ struct ice_parser_list rss_parser_list;
> >+ struct ice_parser_list perm_parser_list;
> >+ struct ice_parser_list dist_parser_list;
> > };
> >
> > /**
> >diff --git a/drivers/net/ice/ice_generic_flow.c
> >b/drivers/net/ice/ice_generic_flow.c
> >index 1c0adc779..aa11d6170 100644
> >--- a/drivers/net/ice/ice_generic_flow.c
> >+++ b/drivers/net/ice/ice_generic_flow.c
> >@@ -17,7 +17,22 @@
> >
> > #include "ice_ethdev.h"
> > #include "ice_generic_flow.h"
> >-#include "ice_switch_filter.h"
> >+
> >+/**
> >+ * Non-pipeline mode, fdir and swith both used as distributor,
>
> s/swith/switch
OK, will fix it in v2.
>
> >+ * fdir used first, switch used as fdir's backup.
> >+ */
> >+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 /*Pipeline mode,
> >+switch used at permission stage*/ #define
> >+ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 /*Pipeline mode, fdir used at
> >+distributor stage*/ #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
> >+
> >+static int ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
> >+
> >+static struct ice_engine_list engine_list =
> >+ TAILQ_HEAD_INITIALIZER(engine_list);
> >
> > static int ice_flow_validate(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> >@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
> > struct rte_flow_error *error);
> > static int ice_flow_flush(struct rte_eth_dev *dev,
> > struct rte_flow_error *error);
> >+static int ice_flow_query_count(struct rte_eth_dev *dev,
> >+ struct rte_flow *flow,
> >+ const struct rte_flow_action *actions,
> >+ void *data,
> >+ struct rte_flow_error *error);
> >
> > const struct rte_flow_ops ice_flow_ops = {
> > .validate = ice_flow_validate,
> > .create = ice_flow_create,
> > .destroy = ice_flow_destroy,
> > .flush = ice_flow_flush,
> >+ .query = ice_flow_query_count,
> > };
> >
> >+
> >+void
> >+ice_register_flow_engine(struct ice_flow_engine *engine) {
> >+ TAILQ_INSERT_TAIL(&engine_list, engine, node); }
> >+
> >+int
> >+ice_flow_init(struct ice_adapter *ad)
> >+{
> >+ int ret = 0;
> >+ struct ice_pf *pf = &ad->pf;
> >+ void *temp;
> >+ struct ice_flow_engine *engine = NULL;
> >+
> >+ TAILQ_INIT(&pf->flow_list);
> >+ TAILQ_INIT(&pf->rss_parser_list);
> >+ TAILQ_INIT(&pf->perm_parser_list);
> >+ TAILQ_INIT(&pf->dist_parser_list);
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->init == NULL)
> >+ return -EINVAL;
> >+
> >+ ret = engine->init(ad);
> >+ if (ret)
> >+ return ret;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_flow_uninit(struct ice_adapter *ad) {
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_flow_engine *engine;
> >+ struct rte_flow *p_flow;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->uninit)
> >+ engine->uninit(ad);
> >+ }
> >+
> >+ /* Remove all flows */
> >+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> >+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> >+ if (p_flow->engine->free)
> >+ p_flow->engine->free(p_flow);
> >+ rte_free(p_flow);
> >+ }
> >+
> >+ /* Cleanup parser list */
> >+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
> >+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
> >+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
> >+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); }
> >+
> >+int
> >+ice_register_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_parser_list *list = NULL;
> >+ struct ice_pf *pf = &ad->pf;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return -EINVAL;
> >+ }
> >+
> >+ if (ad->devargs.pipeline_mode_support)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else {
> >+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
> >+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
> >+ TAILQ_INSERT_HEAD(list, parser, node);
> >+ else
> >+ return -EINVAL;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_unregister_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_parser_list *list;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return;
> >+ }
>
> The switch blocks in above functions are the same, it's better to use a common
> function to reduce the duplicated code.
The switch blocks in the above two functions have little difference in the default behavior, one is return -EINVAL, the other is just return, for register/unregister funcs have different return value types. So, Can I just keep this format?
>
> Thanks,
> Xiaolong
Thanks,
Ying
Hi, Xiaolong
> -----Original Message-----
> From: Ye, Xiaolong
> Sent: Sunday, September 8, 2019 11:56 PM
> To: Wang, Ying A <ying.a.wang@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [PATCH 2/4] net/ice: rework for generic flow enabling
>
> On 09/04, Ying Wang wrote:
> >The patch reworks the generic flow API (rte_flow) implementation.
> >It introduces an abstract layer which provides a unified interface for
> >low-level filter engine (switch, fdir, hash) to register supported
> >patterns and actions and implement flow validate/create/destroy/flush/
> >query activities.
> >
> >The patch also removes the existing switch filter implementation to
> >avoid compile error. Switch filter implementation for the new framework
> >will be added in the following patch.
> >
> >Signed-off-by: Ying Wang <ying.a.wang@intel.com>
> >---
> > drivers/net/ice/ice_ethdev.c | 22 +-
> > drivers/net/ice/ice_ethdev.h | 15 +-
> > drivers/net/ice/ice_generic_flow.c | 768
> >+++++++++++++++--------------------
> > drivers/net/ice/ice_generic_flow.h | 782
> >++++++++----------------------------
> > drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> >drivers/net/ice/ice_switch_filter.h | 18 -
> > 6 files changed, 525 insertions(+), 1591 deletions(-)
> >
> >diff --git a/drivers/net/ice/ice_ethdev.c
> >b/drivers/net/ice/ice_ethdev.c index 4e0645db1..647aca3ed 100644
> >--- a/drivers/net/ice/ice_ethdev.c
> >+++ b/drivers/net/ice/ice_ethdev.c
> >@@ -15,7 +15,7 @@
> > #include "base/ice_dcb.h"
> > #include "ice_ethdev.h"
> > #include "ice_rxtx.h"
> >-#include "ice_switch_filter.h"
> >+#include "ice_generic_flow.h"
> >
> > /* devargs */
> > #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
> >@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
> > /* get base queue pairs index in the device */
> > ice_base_queue_get(pf);
> >
> >- TAILQ_INIT(&pf->flow_list);
> >+ ret = ice_flow_init(ad);
> >+ if (ret) {
> >+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
> >+ return ret;
> >+ }
> >
> > return 0;
> >
> >@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev) {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> > struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >
> > /* Since stop will make link down, then the link event will be
> > * triggered, disable the irq firstly to avoid the port_infoe etc @@
> >-1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> >
> > ice_dev_stop(dev);
> >
> >+ ice_flow_uninit(ad);
> >+
> > /* release all queue resource */
> > ice_free_queues(dev);
> >
> >@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) {
> > struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> > struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
> >- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >- struct rte_flow *p_flow;
> >
> > ice_dev_close(dev);
> >
> >@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> > /* unregister callback func from eal lib */
> > rte_intr_callback_unregister(intr_handle,
> > ice_interrupt_handler, dev);
> >-
> >- /* Remove all flows */
> >- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> >- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> >- ice_free_switch_filter_rule(p_flow->rule);
> >- rte_free(p_flow);
> >- }
> >-
> > return 0;
> > }
> >
> >diff --git a/drivers/net/ice/ice_ethdev.h
> >b/drivers/net/ice/ice_ethdev.h index 9bf5de08d..d1d07641d 100644
> >--- a/drivers/net/ice/ice_ethdev.h
> >+++ b/drivers/net/ice/ice_ethdev.h
> >@@ -241,16 +241,14 @@ struct ice_vsi {
> > bool offset_loaded;
> > };
> >
> >-extern const struct rte_flow_ops ice_flow_ops;
> >-
> >-/* Struct to store flow created. */
> >-struct rte_flow {
> >- TAILQ_ENTRY(rte_flow) node;
> >- void *rule;
> >-};
> >
> >+struct rte_flow;
> > TAILQ_HEAD(ice_flow_list, rte_flow);
> >
> >+
> >+struct ice_flow_parser;
> >+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
> >+
> > struct ice_pf {
> > struct ice_adapter *adapter; /* The adapter this PF associate to */
> > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@
> >-278,6 +276,9 @@ struct ice_pf {
> > bool offset_loaded;
> > bool adapter_stopped;
> > struct ice_flow_list flow_list;
> >+ struct ice_parser_list rss_parser_list;
> >+ struct ice_parser_list perm_parser_list;
> >+ struct ice_parser_list dist_parser_list;
> > };
> >
> > /**
> >diff --git a/drivers/net/ice/ice_generic_flow.c
> >b/drivers/net/ice/ice_generic_flow.c
> >index 1c0adc779..aa11d6170 100644
> >--- a/drivers/net/ice/ice_generic_flow.c
> >+++ b/drivers/net/ice/ice_generic_flow.c
> >@@ -17,7 +17,22 @@
> >
> > #include "ice_ethdev.h"
> > #include "ice_generic_flow.h"
> >-#include "ice_switch_filter.h"
> >+
> >+/**
> >+ * Non-pipeline mode, fdir and swith both used as distributor,
> >+ * fdir used first, switch used as fdir's backup.
> >+ */
> >+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 /*Pipeline mode,
> >+switch used at permission stage*/ #define
> >+ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 /*Pipeline mode, fdir used at
> >+distributor stage*/ #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
> >+
> >+static int ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
> >+
> >+static struct ice_engine_list engine_list =
> >+ TAILQ_HEAD_INITIALIZER(engine_list);
> >
> > static int ice_flow_validate(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> >@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
> > struct rte_flow_error *error);
> > static int ice_flow_flush(struct rte_eth_dev *dev,
> > struct rte_flow_error *error);
> >+static int ice_flow_query_count(struct rte_eth_dev *dev,
> >+ struct rte_flow *flow,
> >+ const struct rte_flow_action *actions,
> >+ void *data,
> >+ struct rte_flow_error *error);
> >
> > const struct rte_flow_ops ice_flow_ops = {
> > .validate = ice_flow_validate,
> > .create = ice_flow_create,
> > .destroy = ice_flow_destroy,
> > .flush = ice_flow_flush,
> >+ .query = ice_flow_query_count,
> > };
> >
> >+
> >+void
> >+ice_register_flow_engine(struct ice_flow_engine *engine) {
> >+ TAILQ_INSERT_TAIL(&engine_list, engine, node); }
> >+
> >+int
> >+ice_flow_init(struct ice_adapter *ad)
> >+{
> >+ int ret = 0;
> >+ struct ice_pf *pf = &ad->pf;
> >+ void *temp;
> >+ struct ice_flow_engine *engine = NULL;
> >+
> >+ TAILQ_INIT(&pf->flow_list);
> >+ TAILQ_INIT(&pf->rss_parser_list);
> >+ TAILQ_INIT(&pf->perm_parser_list);
> >+ TAILQ_INIT(&pf->dist_parser_list);
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->init == NULL)
>
> What about provide some debug log info here? Adding one engine name
> member to struct ice_flow_engine may help.
It's a good suggestion. struct ice_flow_engine has engine_type already. I will add debug log info in v2.
>
> >+ return -EINVAL;
> >+
> >+ ret = engine->init(ad);
> >+ if (ret)
> >+ return ret;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_flow_uninit(struct ice_adapter *ad) {
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_flow_engine *engine;
> >+ struct rte_flow *p_flow;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> >+ if (engine->uninit)
> >+ engine->uninit(ad);
> >+ }
> >+
> >+ /* Remove all flows */
> >+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> >+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> >+ if (p_flow->engine->free)
> >+ p_flow->engine->free(p_flow);
> >+ rte_free(p_flow);
> >+ }
> >+
> >+ /* Cleanup parser list */
> >+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
> >+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
> >+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
> >+
> >+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
> >+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); }
> >+
> >+int
> >+ice_register_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_parser_list *list = NULL;
> >+ struct ice_pf *pf = &ad->pf;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return -EINVAL;
> >+ }
> >+
> >+ if (ad->devargs.pipeline_mode_support)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else {
> >+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
> >+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
> >+ TAILQ_INSERT_TAIL(list, parser, node);
> >+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
> >+ TAILQ_INSERT_HEAD(list, parser, node);
> >+ else
> >+ return -EINVAL;
> >+ }
> >+ return 0;
> >+}
> >+
> >+void
> >+ice_unregister_parser(struct ice_flow_parser *parser,
> >+ struct ice_adapter *ad)
> >+{
> >+ struct ice_pf *pf = &ad->pf;
> >+ struct ice_parser_list *list;
> >+ struct ice_flow_parser *p_parser;
> >+ void *temp;
> >+
> >+ switch (parser->stage) {
> >+ case ICE_FLOW_STAGE_RSS:
> >+ list = &pf->rss_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_PERMISSION:
> >+ list = &pf->perm_parser_list;
> >+ break;
> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >+ list = &pf->dist_parser_list;
> >+ break;
> >+ default:
> >+ return;
> >+ }
> >+
> >+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
> >+ if (p_parser->engine->type == parser->engine->type)
> >+ TAILQ_REMOVE(list, p_parser, node);
> >+ }
> >+
> >+}
> >+
> > static int
> >-ice_flow_valid_attr(const struct rte_flow_attr *attr,
> >- struct rte_flow_error *error)
> >+ice_flow_valid_attr(struct ice_adapter *ad,
> >+ const struct rte_flow_attr *attr,
> >+ struct rte_flow_error *error)
> > {
> > /* Must be input direction */
> > if (!attr->ingress) {
> >@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
> > attr, "Not support egress.");
> > return -rte_errno;
> > }
> >-
> >- /* Not supported */
> >- if (attr->priority) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> >- attr, "Not support priority.");
> >- return -rte_errno;
> >+ /* Check pipeline mode support to set classification stage */
> >+ if (ad->devargs.pipeline_mode_support) {
> >+ if (0 == attr->priority)
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
> >+ else
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
> >+ } else {
> >+ ice_pipeline_stage =
> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>
> Do we really this assignment?
Yes. We use devargs.pipeline_mode_support as a hint to decide which mode to use, 1 for pipeline mode, 0 for non-pipeline mode.
By default, non-pipeline mode is used and both switch/fdir used as distributor, switch is fdir's backup.
In pipeline mode, attr->priority is enabled, 0 for permission stage and 1 for distributor stage.
>
> >+ /* Not supported */
> >+ if (attr->priority) {
> >+ rte_flow_error_set(error, EINVAL,
> >+
> RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> >+ attr, "Not support priority.");
> >+ return -rte_errno;
> >+ }
> > }
> >-
>
> Unrelated change.
>
> > /* Not supported */
> > if (attr->group) {
> > rte_flow_error_set(error, EINVAL,
> >@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item
> >*item, bool is_void)
> > /* Skip all VOID items of the pattern */ static void
> >ice_pattern_skip_void_item(struct rte_flow_item *items,
> >- const struct rte_flow_item *pattern)
> >+ const struct rte_flow_item *pattern)
> > {
> > uint32_t cpy_count = 0;
> > const struct rte_flow_item *pb = pattern, *pe = pattern; @@ -124,7
> >+285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
> > items += cpy_count;
> >
> > if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
> >- pb = pe;
> > break;
> > }
> >
> >@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type
> *item_array,
> > item->type == RTE_FLOW_ITEM_TYPE_END); }
> >
> >-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item
> >pattern[],
> >+struct ice_pattern_match_item *
> >+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
> >+ struct ice_pattern_match_item *array,
> >+ uint32_t array_len,
> > struct rte_flow_error *error)
> > {
> > uint16_t i = 0;
> >- uint64_t inset;
> >+ struct ice_pattern_match_item *pattern_match_item;
> >+ /* need free by each filter */
> > struct rte_flow_item *items; /* used for pattern without VOID items */
> > uint32_t item_num = 0; /* non-void item number */
> >
> >@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const
> struct rte_flow_item pattern[],
> > if (!items) {
> > rte_flow_error_set(error, ENOMEM,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > NULL, "No memory for PMD internal items.");
> >- return -ENOMEM;
> >+ return NULL;
> >+ }
> >+ pattern_match_item = rte_zmalloc("ice_pattern_match_item",
> >+ sizeof(struct ice_pattern_match_item), 0);
> >+ if (!pattern_match_item) {
> >+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
>
> Use rte_flow_error_set to align with others.
OK, will fix it in v2.
>
> >+ return NULL;
> > }
> >-
> > ice_pattern_skip_void_item(items, pattern);
> >
> >- for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
> >- if (ice_match_pattern(ice_supported_patterns[i].items,
> >+ for (i = 0; i < array_len; i++)
> >+ if (ice_match_pattern(array[i].pattern_list,
> > items)) {
> >- inset = ice_supported_patterns[i].sw_fields;
> >+ pattern_match_item->input_set_mask =
> >+ array[i].input_set_mask;
> >+ pattern_match_item->pattern_list =
> >+ array[i].pattern_list;
> >+ pattern_match_item->meta = array[i].meta;
> > rte_free(items);
> >- return inset;
> >+ return pattern_match_item;
> > }
> > rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> > pattern, "Unsupported pattern");
> >
> > rte_free(items);
> >- return 0;
> >-}
> >-
> >-static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
> >- struct rte_flow_error *error)
> >-{
> >- const struct rte_flow_item *item = pattern;
> >- const struct rte_flow_item_eth *eth_spec, *eth_mask;
> >- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> >- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> >- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> >- const struct rte_flow_item_udp *udp_spec, *udp_mask;
> >- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> >- const struct rte_flow_item_icmp *icmp_mask;
> >- const struct rte_flow_item_icmp6 *icmp6_mask;
> >- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
> >- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
> >- enum rte_flow_item_type item_type;
> >- uint8_t ipv6_addr_mask[16] = {
> >- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> >- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
> >- uint64_t input_set = ICE_INSET_NONE;
> >- bool is_tunnel = false;
> >-
> >- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> >- if (item->last) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Not support range");
> >- return 0;
> >- }
> >- item_type = item->type;
> >- switch (item_type) {
> >- case RTE_FLOW_ITEM_TYPE_ETH:
> >- eth_spec = item->spec;
> >- eth_mask = item->mask;
> >-
> >- if (eth_spec && eth_mask) {
> >- if (rte_is_broadcast_ether_addr(ð_mask-
> >src))
> >- input_set |= ICE_INSET_SMAC;
> >- if (rte_is_broadcast_ether_addr(ð_mask-
> >dst))
> >- input_set |= ICE_INSET_DMAC;
> >- if (eth_mask->type == RTE_BE16(0xffff))
> >- input_set |= ICE_INSET_ETHERTYPE;
> >- }
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_IPV4:
> >- ipv4_spec = item->spec;
> >- ipv4_mask = item->mask;
> >-
> >- if (!(ipv4_spec && ipv4_mask))
> >- break;
> >-
> >- /* Check IPv4 mask and update input set */
> >- if (ipv4_mask->hdr.version_ihl ||
> >- ipv4_mask->hdr.total_length ||
> >- ipv4_mask->hdr.packet_id ||
> >- ipv4_mask->hdr.hdr_checksum) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid IPv4 mask.");
> >- return 0;
> >- }
> >-
> >- if (is_tunnel) {
> >- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> >- input_set |=
> ICE_INSET_TUN_IPV4_SRC;
> >- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> >- input_set |=
> ICE_INSET_TUN_IPV4_DST;
> >- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> >- input_set |= ICE_INSET_TUN_IPV4_TTL;
> >- if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX)
> >- input_set |=
> ICE_INSET_TUN_IPV4_PROTO;
> >- } else {
> >- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> >- input_set |= ICE_INSET_IPV4_SRC;
> >- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> >- input_set |= ICE_INSET_IPV4_DST;
> >- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> >- input_set |= ICE_INSET_IPV4_TTL;
> >- if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX)
> >- input_set |= ICE_INSET_IPV4_PROTO;
> >- if (ipv4_mask->hdr.type_of_service ==
> UINT8_MAX)
> >- input_set |= ICE_INSET_IPV4_TOS;
> >- }
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_IPV6:
> >- ipv6_spec = item->spec;
> >- ipv6_mask = item->mask;
> >-
> >- if (!(ipv6_spec && ipv6_mask))
> >- break;
> >-
> >- if (ipv6_mask->hdr.payload_len) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid IPv6 mask");
> >- return 0;
> >- }
> >-
> >- if (is_tunnel) {
> >- if (!memcmp(ipv6_mask->hdr.src_addr,
> >- ipv6_addr_mask,
> >- RTE_DIM(ipv6_mask->hdr.src_addr)))
> >- input_set |=
> ICE_INSET_TUN_IPV6_SRC;
> >- if (!memcmp(ipv6_mask->hdr.dst_addr,
> >- ipv6_addr_mask,
> >- RTE_DIM(ipv6_mask->hdr.dst_addr)))
> >- input_set |=
> ICE_INSET_TUN_IPV6_DST;
> >- if (ipv6_mask->hdr.proto == UINT8_MAX)
> >- input_set |=
> ICE_INSET_TUN_IPV6_PROTO;
> >- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> >- input_set |= ICE_INSET_TUN_IPV6_TTL;
> >- } else {
> >- if (!memcmp(ipv6_mask->hdr.src_addr,
> >- ipv6_addr_mask,
> >- RTE_DIM(ipv6_mask->hdr.src_addr)))
> >- input_set |= ICE_INSET_IPV6_SRC;
> >- if (!memcmp(ipv6_mask->hdr.dst_addr,
> >- ipv6_addr_mask,
> >- RTE_DIM(ipv6_mask->hdr.dst_addr)))
> >- input_set |= ICE_INSET_IPV6_DST;
> >- if (ipv6_mask->hdr.proto == UINT8_MAX)
> >- input_set |= ICE_INSET_IPV6_PROTO;
> >- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> >- input_set |=
> ICE_INSET_IPV6_HOP_LIMIT;
> >- if ((ipv6_mask->hdr.vtc_flow &
> >-
> rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
> >- == rte_cpu_to_be_32
> >- (RTE_IPV6_HDR_TC_MASK))
> >- input_set |= ICE_INSET_IPV6_TOS;
> >- }
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_UDP:
> >- udp_spec = item->spec;
> >- udp_mask = item->mask;
> >-
> >- if (!(udp_spec && udp_mask))
> >- break;
> >-
> >- /* Check UDP mask and update input set*/
> >- if (udp_mask->hdr.dgram_len ||
> >- udp_mask->hdr.dgram_cksum) {
> >- rte_flow_error_set(error, EINVAL,
> >-
> RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid UDP mask");
> >- return 0;
> >- }
> >-
> >- if (is_tunnel) {
> >- if (udp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_SRC_PORT;
> >- if (udp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_DST_PORT;
> >- } else {
> >- if (udp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |= ICE_INSET_SRC_PORT;
> >- if (udp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |= ICE_INSET_DST_PORT;
> >- }
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_TCP:
> >- tcp_spec = item->spec;
> >- tcp_mask = item->mask;
> >-
> >- if (!(tcp_spec && tcp_mask))
> >- break;
> >-
> >- /* Check TCP mask and update input set */
> >- if (tcp_mask->hdr.sent_seq ||
> >- tcp_mask->hdr.recv_ack ||
> >- tcp_mask->hdr.data_off ||
> >- tcp_mask->hdr.tcp_flags ||
> >- tcp_mask->hdr.rx_win ||
> >- tcp_mask->hdr.cksum ||
> >- tcp_mask->hdr.tcp_urp) {
> >- rte_flow_error_set(error, EINVAL,
> >-
> RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid TCP mask");
> >- return 0;
> >- }
> >-
> >- if (is_tunnel) {
> >- if (tcp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_SRC_PORT;
> >- if (tcp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_DST_PORT;
> >- } else {
> >- if (tcp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |= ICE_INSET_SRC_PORT;
> >- if (tcp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |= ICE_INSET_DST_PORT;
> >- }
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_SCTP:
> >- sctp_spec = item->spec;
> >- sctp_mask = item->mask;
> >-
> >- if (!(sctp_spec && sctp_mask))
> >- break;
> >-
> >- /* Check SCTP mask and update input set */
> >- if (sctp_mask->hdr.cksum) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid SCTP mask");
> >- return 0;
> >- }
> >-
> >- if (is_tunnel) {
> >- if (sctp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_SRC_PORT;
> >- if (sctp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |=
> ICE_INSET_TUN_DST_PORT;
> >- } else {
> >- if (sctp_mask->hdr.src_port == UINT16_MAX)
> >- input_set |= ICE_INSET_SRC_PORT;
> >- if (sctp_mask->hdr.dst_port == UINT16_MAX)
> >- input_set |= ICE_INSET_DST_PORT;
> >- }
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_ICMP:
> >- icmp_mask = item->mask;
> >- if (icmp_mask->hdr.icmp_code ||
> >- icmp_mask->hdr.icmp_cksum ||
> >- icmp_mask->hdr.icmp_ident ||
> >- icmp_mask->hdr.icmp_seq_nb) {
> >- rte_flow_error_set(error, EINVAL,
> >-
> RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid ICMP mask");
> >- return 0;
> >- }
> >-
> >- if (icmp_mask->hdr.icmp_type == UINT8_MAX)
> >- input_set |= ICE_INSET_ICMP;
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_ICMP6:
> >- icmp6_mask = item->mask;
> >- if (icmp6_mask->code ||
> >- icmp6_mask->checksum) {
> >- rte_flow_error_set(error, EINVAL,
> >-
> RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid ICMP6 mask");
> >- return 0;
> >- }
> >-
> >- if (icmp6_mask->type == UINT8_MAX)
> >- input_set |= ICE_INSET_ICMP6;
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_VXLAN:
> >- vxlan_spec = item->spec;
> >- vxlan_mask = item->mask;
> >- /* Check if VXLAN item is used to describe protocol.
> >- * If yes, both spec and mask should be NULL.
> >- * If no, both spec and mask shouldn't be NULL.
> >- */
> >- if ((!vxlan_spec && vxlan_mask) ||
> >- (vxlan_spec && !vxlan_mask)) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid VXLAN item");
> >- return 0;
> >- }
> >- if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX
> &&
> >- vxlan_mask->vni[1] == UINT8_MAX &&
> >- vxlan_mask->vni[2] == UINT8_MAX)
> >- input_set |= ICE_INSET_TUN_ID;
> >- is_tunnel = 1;
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_NVGRE:
> >- nvgre_spec = item->spec;
> >- nvgre_mask = item->mask;
> >- /* Check if NVGRE item is used to describe protocol.
> >- * If yes, both spec and mask should be NULL.
> >- * If no, both spec and mask shouldn't be NULL.
> >- */
> >- if ((!nvgre_spec && nvgre_mask) ||
> >- (nvgre_spec && !nvgre_mask)) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid NVGRE item");
> >- return 0;
> >- }
> >- if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX
> &&
> >- nvgre_mask->tni[1] == UINT8_MAX &&
> >- nvgre_mask->tni[2] == UINT8_MAX)
> >- input_set |= ICE_INSET_TUN_ID;
> >- is_tunnel = 1;
> >-
> >- break;
> >- case RTE_FLOW_ITEM_TYPE_VOID:
> >- break;
> >- default:
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM,
> >- item,
> >- "Invalid pattern");
> >- break;
> >- }
> >- }
> >- return input_set;
> >-}
> >-
> >-static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> >- uint64_t inset, struct rte_flow_error *error)
> >-{
> >- uint64_t fields;
> >-
> >- /* get valid field */
> >- fields = ice_get_flow_field(pattern, error);
> >- if (!fields || fields & (~inset)) {
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> >- pattern,
> >- "Invalid input set");
> >- return -rte_errno;
> >- }
> >-
> >- return 0;
> >+ rte_free(pattern_match_item);
> >+ return NULL;
> > }
> >
> >-static int ice_flow_valid_action(struct rte_eth_dev *dev,
> >- const struct rte_flow_action *actions,
> >- struct rte_flow_error *error)
> >+static struct ice_flow_engine *
> >+ice_parse_engine(struct ice_adapter *ad,
> >+ struct ice_parser_list *parser_list,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ void **meta,
> >+ struct rte_flow_error *error)
> > {
> >- const struct rte_flow_action_queue *act_q;
> >- uint16_t queue;
> >- const struct rte_flow_action *action;
> >- for (action = actions; action->type !=
> >- RTE_FLOW_ACTION_TYPE_END; action++) {
> >- switch (action->type) {
> >- case RTE_FLOW_ACTION_TYPE_QUEUE:
> >- act_q = action->conf;
> >- queue = act_q->index;
> >- if (queue >= dev->data->nb_rx_queues) {
> >- rte_flow_error_set(error, EINVAL,
> >-
> RTE_FLOW_ERROR_TYPE_ACTION,
> >- actions, "Invalid queue ID for"
> >- " switch filter.");
> >- return -rte_errno;
> >- }
> >- break;
> >- case RTE_FLOW_ACTION_TYPE_DROP:
> >- case RTE_FLOW_ACTION_TYPE_VOID:
> >- break;
> >- default:
> >- rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ACTION,
> actions,
> >- "Invalid action.");
> >- return -rte_errno;
> >- }
> >+ struct ice_flow_engine *engine = NULL;
> >+ struct ice_flow_parser *parser = NULL;
> >+ void *temp;
> >+ TAILQ_FOREACH_SAFE(parser, parser_list, node, temp) {
> >+ if (parser->parse_pattern_action(ad, parser->array,
> >+ parser->array_len, pattern, actions,
> >+ meta, error) < 0)
> >+ continue;
> >+ engine = parser->engine;
> >+ break;
> > }
> >- return 0;
> >+ return engine;
> > }
> >
> > static int
> >-ice_flow_validate(struct rte_eth_dev *dev,
> >- const struct rte_flow_attr *attr,
> >- const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow_error *error)
> >+ice_flow_validate_filter(struct rte_eth_dev *dev,
> >+ const struct rte_flow_attr *attr,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ struct ice_flow_engine **engine,
> >+ void **meta,
> >+ struct rte_flow_error *error)
> > {
> >- uint64_t inset = 0;
> > int ret = ICE_ERR_NOT_SUPPORTED;
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >
> > if (!pattern) {
> > rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> >- NULL, "NULL pattern.");
> >+ NULL, "NULL pattern.");
> > return -rte_errno;
> > }
> >
> > if (!actions) {
> > rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> >- NULL, "NULL action.");
> >+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> >+ NULL, "NULL action.");
> > return -rte_errno;
> > }
> >-
> > if (!attr) {
> > rte_flow_error_set(error, EINVAL,
> >- RTE_FLOW_ERROR_TYPE_ATTR,
> >- NULL, "NULL attribute.");
> >+ RTE_FLOW_ERROR_TYPE_ATTR,
> >+ NULL, "NULL attribute.");
> > return -rte_errno;
> > }
> >
> >- ret = ice_flow_valid_attr(attr, error);
> >+ ret = ice_flow_valid_attr(ad, attr, error);
> > if (ret)
> > return ret;
> >
> >- inset = ice_flow_valid_pattern(pattern, error);
> >- if (!inset)
> >- return -rte_errno;
> >-
> >- ret = ice_flow_valid_inset(pattern, inset, error);
> >- if (ret)
> >- return ret;
> >+ *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
> >+ meta, error);
> >+ if (*engine != NULL)
> >+ return 0;
> >+
> >+ switch (ice_pipeline_stage) {
> >+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
> >+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
> >+ *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
> >+ actions, meta, error);
> >+ break;
> >+ case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
> >+ *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
> >+ actions, meta, error);
> >+ break;
> >+ default:
> >+ return -EINVAL;
> >+ }
> >
> >- ret = ice_flow_valid_action(dev, actions, error);
> >- if (ret)
> >- return ret;
> >+ if (*engine == NULL)
> >+ return -EINVAL;
> >
> > return 0;
> > }
> >
> >+static int
> >+ice_flow_validate(struct rte_eth_dev *dev,
> >+ const struct rte_flow_attr *attr,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ struct rte_flow_error *error)
> >+{
> >+ int ret = ICE_ERR_NOT_SUPPORTED;
> >+ void *meta = NULL;
> >+ struct ice_flow_engine *engine = NULL;
> >+
> >+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
> >+ &engine, &meta, error);
> >+ return ret;
> >+}
> >+
> > static struct rte_flow *
> > ice_flow_create(struct rte_eth_dev *dev,
> >- const struct rte_flow_attr *attr,
> >- const struct rte_flow_item pattern[],
> >- const struct rte_flow_action actions[],
> >- struct rte_flow_error *error)
> >+ const struct rte_flow_attr *attr,
> >+ const struct rte_flow_item pattern[],
> >+ const struct rte_flow_action actions[],
> >+ struct rte_flow_error *error)
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> > struct rte_flow *flow = NULL;
> >- int ret;
> >+ int ret = 0;
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >+ struct ice_flow_engine *engine = NULL;
> >+ void *meta = NULL;
> >
> > flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> > if (!flow) {
> >@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
> > return flow;
> > }
> >
> >- ret = ice_flow_validate(dev, attr, pattern, actions, error);
> >+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
> >+ &engine, &meta, error);
> > if (ret < 0)
> > goto free_flow;
> >
> >- ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> >+ if (engine->create == NULL)
> >+ goto free_flow;
> >+
> >+ ret = engine->create(ad, flow, meta, error);
> > if (ret)
> > goto free_flow;
> >
> >+ flow->engine = engine;
> > TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> > return flow;
> >
> > free_flow:
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "Failed to create flow.");
> >+ PMD_DRV_LOG(ERR, "Failed to create flow");
>
> Why is this change?
For framework has passed the "error" to each filter, rte_flow_error_set() will be used within each filter (switch/fdir/rss).
If used rte_flow_error_set() here, it will cover the error set value by each filter, so PMD_DRV_LOG is used here.
>
> > rte_free(flow);
> > return NULL;
> > }
> >
> > static int
> > ice_flow_destroy(struct rte_eth_dev *dev,
> >- struct rte_flow *flow,
> >- struct rte_flow_error *error)
> >+ struct rte_flow *flow,
> >+ struct rte_flow_error *error)
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >+ struct ice_adapter *ad =
> >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> > int ret = 0;
> >
> >- ret = ice_destroy_switch_filter(pf, flow, error);
> >-
> >+ if (!flow || !flow->engine->destroy) {
> >+ rte_flow_error_set(error, EINVAL,
> >+ RTE_FLOW_ERROR_TYPE_HANDLE,
> >+ NULL, "NULL flow or NULL destroy");
> >+ return -rte_errno;
> >+ }
> >+ ret = flow->engine->destroy(ad, flow, error);
> > if (!ret) {
> > TAILQ_REMOVE(&pf->flow_list, flow, node);
> > rte_free(flow);
> >- } else {
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> >- "Failed to destroy flow.");
> >- }
> >+ } else
> >+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
>
> Ditto.
Ditto.
>
> >
> > return ret;
> > }
> >
> > static int
> > ice_flow_flush(struct rte_eth_dev *dev,
> >- struct rte_flow_error *error)
> >+ struct rte_flow_error *error)
> > {
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> >- struct rte_flow *p_flow;
> >+ struct rte_flow *p_flow = NULL;
> > void *temp;
> > int ret = 0;
> >
> > TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
> > ret = ice_flow_destroy(dev, p_flow, error);
> > if (ret) {
> >- rte_flow_error_set(error, -ret,
> >- RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> >- "Failed to flush SW flows.");
> >- return -rte_errno;
> >+ PMD_DRV_LOG(ERR, "Failed to flush flows");
>
> Ditto.
Ditto.
>
>
> Thanks,
> Xiaolong
Thanks
Ying
On 09/09, Wang, Ying A wrote:
[snip]
>> >+ if (ad->devargs.pipeline_mode_support) {
>> >+ if (0 == attr->priority)
>> >+ ice_pipeline_stage =
>> >+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
>> >+ else
>> >+ ice_pipeline_stage =
>> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
>> >+ } else {
>> >+ ice_pipeline_stage =
>> >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>>
>> Do we really this assignment?
>
>Yes. We use devargs.pipeline_mode_support as a hint to decide which mode to use, 1 for pipeline mode, 0 for non-pipeline mode.
>By default, non-pipeline mode is used and both switch/fdir used as distributor, switch is fdir's backup.
>In pipeline mode, attr->priority is enabled, 0 for permission stage and 1 for distributor stage.
>
I saw ice_pipeline_stage has been set to ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY
in its initialization, do we need to reassign it every time here. The pipeline
mode won't change at runtime, right?
>>
>> >+ /* Not supported */
>> >+ if (attr->priority) {
[snip]
>> > free_flow:
>> >- rte_flow_error_set(error, -ret,
>> >- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>> >- "Failed to create flow.");
>> >+ PMD_DRV_LOG(ERR, "Failed to create flow");
>>
>> Why is this change?
>
>For framework has passed the "error" to each filter, rte_flow_error_set() will be used within each filter (switch/fdir/rss).
>If used rte_flow_error_set() here, it will cover the error set value by each filter, so PMD_DRV_LOG is used here.
>
I think it makes sense, thanks for the explanation.
Thanks,
Xiaolong
On 09/09, Wang, Ying A wrote:
>> >+ice_unregister_parser(struct ice_flow_parser *parser,
>> >+ struct ice_adapter *ad)
>> >+{
>> >+ struct ice_pf *pf = &ad->pf;
>> >+ struct ice_parser_list *list;
>> >+ struct ice_flow_parser *p_parser;
>> >+ void *temp;
>> >+
>> >+ switch (parser->stage) {
>> >+ case ICE_FLOW_STAGE_RSS:
>> >+ list = &pf->rss_parser_list;
>> >+ break;
>> >+ case ICE_FLOW_STAGE_PERMISSION:
>> >+ list = &pf->perm_parser_list;
>> >+ break;
>> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
>> >+ list = &pf->dist_parser_list;
>> >+ break;
>> >+ default:
>> >+ return;
>> >+ }
>>
>> The switch blocks in above functions are the same, it's better to use a common
>> function to reduce the duplicated code.
>
>The switch blocks in the above two functions have little difference in the default behavior, one is return -EINVAL, the other is just return, for register/unregister funcs have different return value types. So, Can I just keep this format?
>
Duplication is bad and I think it should be easy to deal with the return type
difference,
struct ice_prase_list *
ice_get_parser_list(struct ice_flow_parser *parser,
struct ice_adapter *ad)
{
struct ice_parser_list *list = NULL;
struct ice_pf *pf = &ad->pf;
switch (parser->stage) {
case ICE_FLOW_STAGE_RSS:
list = &pf->rss_parser_list;
break;
case ICE_FLOW_STAGE_PERMISSION:
list = &pf->perm_parser_list;
break;
case ICE_FLOW_STAGE_DISTRIBUTOR:
list = &pf->dist_parser_list;
break;
default:
break;
}
return list;
}
Then you just need to check its return value, if it's NULL, simply return
-EINVAL on register and directly return on unregister.
Thanks,
Xiaolong
> -----Original Message-----
> From: Ye, Xiaolong
> Sent: Monday, September 9, 2019 5:54 PM
> To: Wang, Ying A <ying.a.wang@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [PATCH 2/4] net/ice: rework for generic flow enabling
>
> On 09/09, Wang, Ying A wrote:
> >> >+ice_unregister_parser(struct ice_flow_parser *parser,
> >> >+ struct ice_adapter *ad)
> >> >+{
> >> >+ struct ice_pf *pf = &ad->pf;
> >> >+ struct ice_parser_list *list;
> >> >+ struct ice_flow_parser *p_parser;
> >> >+ void *temp;
> >> >+
> >> >+ switch (parser->stage) {
> >> >+ case ICE_FLOW_STAGE_RSS:
> >> >+ list = &pf->rss_parser_list;
> >> >+ break;
> >> >+ case ICE_FLOW_STAGE_PERMISSION:
> >> >+ list = &pf->perm_parser_list;
> >> >+ break;
> >> >+ case ICE_FLOW_STAGE_DISTRIBUTOR:
> >> >+ list = &pf->dist_parser_list;
> >> >+ break;
> >> >+ default:
> >> >+ return;
> >> >+ }
> >>
> >> The switch blocks in above functions are the same, it's better to use
> >> a common function to reduce the duplicated code.
> >
> >The switch blocks in the above two functions have little difference in the
> default behavior, one is return -EINVAL, the other is just return, for
> register/unregister funcs have different return value types. So, Can I just keep
> this format?
> >
>
> Duplication is bad and I think it should be easy to deal with the return type
> difference,
>
> struct ice_prase_list *
> ice_get_parser_list(struct ice_flow_parser *parser,
> struct ice_adapter *ad)
> {
> struct ice_parser_list *list = NULL;
> struct ice_pf *pf = &ad->pf;
>
> switch (parser->stage) {
> case ICE_FLOW_STAGE_RSS:
> list = &pf->rss_parser_list;
> break;
> case ICE_FLOW_STAGE_PERMISSION:
> list = &pf->perm_parser_list;
> break;
> case ICE_FLOW_STAGE_DISTRIBUTOR:
> list = &pf->dist_parser_list;
> break;
> default:
> break;
> }
>
> return list;
> }
>
> Then you just need to check its return value, if it's NULL, simply return -EINVAL
> on register and directly return on unregister.
OK, thanks for your guidance. I will fix it in v2.
>
> Thanks,
> Xiaolong
@@ -15,7 +15,7 @@
#include "base/ice_dcb.h"
#include "ice_ethdev.h"
#include "ice_rxtx.h"
-#include "ice_switch_filter.h"
+#include "ice_generic_flow.h"
/* devargs */
#define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
/* get base queue pairs index in the device */
ice_base_queue_get(pf);
- TAILQ_INIT(&pf->flow_list);
+ ret = ice_flow_init(ad);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ return ret;
+ }
return 0;
@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Since stop will make link down, then the link event will be
* triggered, disable the irq firstly to avoid the port_infoe etc
@@ -1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
ice_dev_stop(dev);
+ ice_flow_uninit(ad);
+
/* release all queue resource */
ice_free_queues(dev);
@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_flow *p_flow;
ice_dev_close(dev);
@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
/* unregister callback func from eal lib */
rte_intr_callback_unregister(intr_handle,
ice_interrupt_handler, dev);
-
- /* Remove all flows */
- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
- TAILQ_REMOVE(&pf->flow_list, p_flow, node);
- ice_free_switch_filter_rule(p_flow->rule);
- rte_free(p_flow);
- }
-
return 0;
}
@@ -241,16 +241,14 @@ struct ice_vsi {
bool offset_loaded;
};
-extern const struct rte_flow_ops ice_flow_ops;
-
-/* Struct to store flow created. */
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) node;
- void *rule;
-};
+struct rte_flow;
TAILQ_HEAD(ice_flow_list, rte_flow);
+
+struct ice_flow_parser;
+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -278,6 +276,9 @@ struct ice_pf {
bool offset_loaded;
bool adapter_stopped;
struct ice_flow_list flow_list;
+ struct ice_parser_list rss_parser_list;
+ struct ice_parser_list perm_parser_list;
+ struct ice_parser_list dist_parser_list;
};
/**
@@ -17,7 +17,22 @@
#include "ice_ethdev.h"
#include "ice_generic_flow.h"
-#include "ice_switch_filter.h"
+
+/**
+ * Non-pipeline mode, fdir and swith both used as distributor,
+ * fdir used first, switch used as fdir's backup.
+ */
+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
+/*Pipeline mode, switch used at permission stage*/
+#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
+/*Pipeline mode, fdir used at distributor stage*/
+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
+
+static int ice_pipeline_stage =
+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
+
+static struct ice_engine_list engine_list =
+ TAILQ_HEAD_INITIALIZER(engine_list);
static int ice_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow_error *error);
static int ice_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
+static int ice_flow_query_count(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error);
const struct rte_flow_ops ice_flow_ops = {
.validate = ice_flow_validate,
.create = ice_flow_create,
.destroy = ice_flow_destroy,
.flush = ice_flow_flush,
+ .query = ice_flow_query_count,
};
+
+void
+ice_register_flow_engine(struct ice_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+int
+ice_flow_init(struct ice_adapter *ad)
+{
+ int ret = 0;
+ struct ice_pf *pf = &ad->pf;
+ void *temp;
+ struct ice_flow_engine *engine = NULL;
+
+ TAILQ_INIT(&pf->flow_list);
+ TAILQ_INIT(&pf->rss_parser_list);
+ TAILQ_INIT(&pf->perm_parser_list);
+ TAILQ_INIT(&pf->dist_parser_list);
+
+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->init == NULL)
+ return -EINVAL;
+
+ ret = engine->init(ad);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+void
+ice_flow_uninit(struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_flow_engine *engine;
+ struct rte_flow *p_flow;
+ struct ice_flow_parser *p_parser;
+ void *temp;
+
+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->uninit)
+ engine->uninit(ad);
+ }
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ if (p_flow->engine->free)
+ p_flow->engine->free(p_flow);
+ rte_free(p_flow);
+ }
+
+ /* Cleanup parser list */
+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
+
+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
+
+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
+}
+
+int
+ice_register_parser(struct ice_flow_parser *parser,
+ struct ice_adapter *ad)
+{
+ struct ice_parser_list *list = NULL;
+ struct ice_pf *pf = &ad->pf;
+
+ switch (parser->stage) {
+ case ICE_FLOW_STAGE_RSS:
+ list = &pf->rss_parser_list;
+ break;
+ case ICE_FLOW_STAGE_PERMISSION:
+ list = &pf->perm_parser_list;
+ break;
+ case ICE_FLOW_STAGE_DISTRIBUTOR:
+ list = &pf->dist_parser_list;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ad->devargs.pipeline_mode_support)
+ TAILQ_INSERT_TAIL(list, parser, node);
+ else {
+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
+ || parser->engine->type == ICE_FLOW_ENGINE_HASH)
+ TAILQ_INSERT_TAIL(list, parser, node);
+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
+ TAILQ_INSERT_HEAD(list, parser, node);
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void
+ice_unregister_parser(struct ice_flow_parser *parser,
+ struct ice_adapter *ad)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_parser_list *list;
+ struct ice_flow_parser *p_parser;
+ void *temp;
+
+ switch (parser->stage) {
+ case ICE_FLOW_STAGE_RSS:
+ list = &pf->rss_parser_list;
+ break;
+ case ICE_FLOW_STAGE_PERMISSION:
+ list = &pf->perm_parser_list;
+ break;
+ case ICE_FLOW_STAGE_DISTRIBUTOR:
+ list = &pf->dist_parser_list;
+ break;
+ default:
+ return;
+ }
+
+ TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
+ if (p_parser->engine->type == parser->engine->type)
+ TAILQ_REMOVE(list, p_parser, node);
+ }
+
+}
+
static int
-ice_flow_valid_attr(const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+ice_flow_valid_attr(struct ice_adapter *ad,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
/* Must be input direction */
if (!attr->ingress) {
@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
attr, "Not support egress.");
return -rte_errno;
}
-
- /* Not supported */
- if (attr->priority) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "Not support priority.");
- return -rte_errno;
+ /* Check pipeline mode support to set classification stage */
+ if (ad->devargs.pipeline_mode_support) {
+ if (0 == attr->priority)
+ ice_pipeline_stage =
+ ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
+ else
+ ice_pipeline_stage =
+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
+ } else {
+ ice_pipeline_stage =
+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
}
-
/* Not supported */
if (attr->group) {
rte_flow_error_set(error, EINVAL,
@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item *item, bool is_void)
/* Skip all VOID items of the pattern */
static void
ice_pattern_skip_void_item(struct rte_flow_item *items,
- const struct rte_flow_item *pattern)
+ const struct rte_flow_item *pattern)
{
uint32_t cpy_count = 0;
const struct rte_flow_item *pb = pattern, *pe = pattern;
@@ -124,7 +285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
items += cpy_count;
if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
- pb = pe;
break;
}
@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array,
item->type == RTE_FLOW_ITEM_TYPE_END);
}
-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+struct ice_pattern_match_item *
+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
struct rte_flow_error *error)
{
uint16_t i = 0;
- uint64_t inset;
+ struct ice_pattern_match_item *pattern_match_item;
+ /* need free by each filter */
struct rte_flow_item *items; /* used for pattern without VOID items */
uint32_t item_num = 0; /* non-void item number */
@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
if (!items) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "No memory for PMD internal items.");
- return -ENOMEM;
+ return NULL;
+ }
+ pattern_match_item = rte_zmalloc("ice_pattern_match_item",
+ sizeof(struct ice_pattern_match_item), 0);
+ if (!pattern_match_item) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return NULL;
}
-
ice_pattern_skip_void_item(items, pattern);
- for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
- if (ice_match_pattern(ice_supported_patterns[i].items,
+ for (i = 0; i < array_len; i++)
+ if (ice_match_pattern(array[i].pattern_list,
items)) {
- inset = ice_supported_patterns[i].sw_fields;
+ pattern_match_item->input_set_mask =
+ array[i].input_set_mask;
+ pattern_match_item->pattern_list =
+ array[i].pattern_list;
+ pattern_match_item->meta = array[i].meta;
rte_free(items);
- return inset;
+ return pattern_match_item;
}
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
pattern, "Unsupported pattern");
rte_free(items);
- return 0;
-}
-
-static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
- struct rte_flow_error *error)
-{
- const struct rte_flow_item *item = pattern;
- const struct rte_flow_item_eth *eth_spec, *eth_mask;
- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
- const struct rte_flow_item_udp *udp_spec, *udp_mask;
- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
- const struct rte_flow_item_icmp *icmp_mask;
- const struct rte_flow_item_icmp6 *icmp6_mask;
- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
- enum rte_flow_item_type item_type;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- uint64_t input_set = ICE_INSET_NONE;
- bool is_tunnel = false;
-
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Not support range");
- return 0;
- }
- item_type = item->type;
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = item->spec;
- eth_mask = item->mask;
-
- if (eth_spec && eth_mask) {
- if (rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
- input_set |= ICE_INSET_ETHERTYPE;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ipv4_spec = item->spec;
- ipv4_mask = item->mask;
-
- if (!(ipv4_spec && ipv4_mask))
- break;
-
- /* Check IPv4 mask and update input set */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv4 mask.");
- return 0;
- }
-
- if (is_tunnel) {
- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
- input_set |= ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
- input_set |= ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
- input_set |= ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
- input_set |= ICE_INSET_TUN_IPV4_PROTO;
- } else {
- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
- input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
- input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
- input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
- input_set |= ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
- input_set |= ICE_INSET_IPV4_TOS;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6_spec = item->spec;
- ipv6_mask = item->mask;
-
- if (!(ipv6_spec && ipv6_mask))
- break;
-
- if (ipv6_mask->hdr.payload_len) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask");
- return 0;
- }
-
- if (is_tunnel) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |= ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |= ICE_INSET_TUN_IPV6_PROTO;
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
- input_set |= ICE_INSET_TUN_IPV6_TTL;
- } else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |= ICE_INSET_IPV6_PROTO;
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
- input_set |= ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |= ICE_INSET_IPV6_TOS;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = item->spec;
- udp_mask = item->mask;
-
- if (!(udp_spec && udp_mask))
- break;
-
- /* Check UDP mask and update input set*/
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return 0;
- }
-
- if (is_tunnel) {
- if (udp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_SRC_PORT;
- if (udp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_DST_PORT;
- } else {
- if (udp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_SRC_PORT;
- if (udp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_DST_PORT;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = item->spec;
- tcp_mask = item->mask;
-
- if (!(tcp_spec && tcp_mask))
- break;
-
- /* Check TCP mask and update input set */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return 0;
- }
-
- if (is_tunnel) {
- if (tcp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_SRC_PORT;
- if (tcp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_DST_PORT;
- } else {
- if (tcp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_SRC_PORT;
- if (tcp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_DST_PORT;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_SCTP:
- sctp_spec = item->spec;
- sctp_mask = item->mask;
-
- if (!(sctp_spec && sctp_mask))
- break;
-
- /* Check SCTP mask and update input set */
- if (sctp_mask->hdr.cksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid SCTP mask");
- return 0;
- }
-
- if (is_tunnel) {
- if (sctp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_SRC_PORT;
- if (sctp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_TUN_DST_PORT;
- } else {
- if (sctp_mask->hdr.src_port == UINT16_MAX)
- input_set |= ICE_INSET_SRC_PORT;
- if (sctp_mask->hdr.dst_port == UINT16_MAX)
- input_set |= ICE_INSET_DST_PORT;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_ICMP:
- icmp_mask = item->mask;
- if (icmp_mask->hdr.icmp_code ||
- icmp_mask->hdr.icmp_cksum ||
- icmp_mask->hdr.icmp_ident ||
- icmp_mask->hdr.icmp_seq_nb) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid ICMP mask");
- return 0;
- }
-
- if (icmp_mask->hdr.icmp_type == UINT8_MAX)
- input_set |= ICE_INSET_ICMP;
- break;
- case RTE_FLOW_ITEM_TYPE_ICMP6:
- icmp6_mask = item->mask;
- if (icmp6_mask->code ||
- icmp6_mask->checksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid ICMP6 mask");
- return 0;
- }
-
- if (icmp6_mask->type == UINT8_MAX)
- input_set |= ICE_INSET_ICMP6;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec = item->spec;
- vxlan_mask = item->mask;
- /* Check if VXLAN item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!vxlan_spec && vxlan_mask) ||
- (vxlan_spec && !vxlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VXLAN item");
- return 0;
- }
- if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX)
- input_set |= ICE_INSET_TUN_ID;
- is_tunnel = 1;
-
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec = item->spec;
- nvgre_mask = item->mask;
- /* Check if NVGRE item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!nvgre_spec && nvgre_mask) ||
- (nvgre_spec && !nvgre_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid NVGRE item");
- return 0;
- }
- if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX)
- input_set |= ICE_INSET_TUN_ID;
- is_tunnel = 1;
-
- break;
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pattern");
- break;
- }
- }
- return input_set;
-}
-
-static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
- uint64_t inset, struct rte_flow_error *error)
-{
- uint64_t fields;
-
- /* get valid field */
- fields = ice_get_flow_field(pattern, error);
- if (!fields || fields & (~inset)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- pattern,
- "Invalid input set");
- return -rte_errno;
- }
-
- return 0;
+ rte_free(pattern_match_item);
+ return NULL;
}
-static int ice_flow_valid_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error)
+static struct ice_flow_engine *
+ice_parse_engine(struct ice_adapter *ad,
+ struct ice_parser_list *parser_list,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
{
- const struct rte_flow_action_queue *act_q;
- uint16_t queue;
- const struct rte_flow_action *action;
- for (action = actions; action->type !=
- RTE_FLOW_ACTION_TYPE_END; action++) {
- switch (action->type) {
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q = action->conf;
- queue = act_q->index;
- if (queue >= dev->data->nb_rx_queues) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "Invalid queue ID for"
- " switch filter.");
- return -rte_errno;
- }
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, actions,
- "Invalid action.");
- return -rte_errno;
- }
+ struct ice_flow_engine *engine = NULL;
+ struct ice_flow_parser *parser = NULL;
+ void *temp;
+ TAILQ_FOREACH_SAFE(parser, parser_list, node, temp) {
+ if (parser->parse_pattern_action(ad, parser->array,
+ parser->array_len, pattern, actions,
+ meta, error) < 0)
+ continue;
+ engine = parser->engine;
+ break;
}
- return 0;
+ return engine;
}
static int
-ice_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+ice_flow_validate_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ice_flow_engine **engine,
+ void **meta,
+ struct rte_flow_error *error)
{
- uint64_t inset = 0;
int ret = ICE_ERR_NOT_SUPPORTED;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
if (!pattern) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "NULL pattern.");
+ NULL, "NULL pattern.");
return -rte_errno;
}
if (!actions) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
- NULL, "NULL action.");
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
return -rte_errno;
}
-
if (!attr) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR,
- NULL, "NULL attribute.");
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
return -rte_errno;
}
- ret = ice_flow_valid_attr(attr, error);
+ ret = ice_flow_valid_attr(ad, attr, error);
if (ret)
return ret;
- inset = ice_flow_valid_pattern(pattern, error);
- if (!inset)
- return -rte_errno;
-
- ret = ice_flow_valid_inset(pattern, inset, error);
- if (ret)
- return ret;
+ *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
+ meta, error);
+ if (*engine != NULL)
+ return 0;
+
+ switch (ice_pipeline_stage) {
+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
+ case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
+ *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
+ actions, meta, error);
+ break;
+ case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
+ *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
+ actions, meta, error);
+ break;
+ default:
+ return -EINVAL;
+ }
- ret = ice_flow_valid_action(dev, actions, error);
- if (ret)
- return ret;
+ if (*engine == NULL)
+ return -EINVAL;
return 0;
}
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret = ICE_ERR_NOT_SUPPORTED;
+ void *meta = NULL;
+ struct ice_flow_engine *engine = NULL;
+
+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
+ &engine, &meta, error);
+ return ret;
+}
+
static struct rte_flow *
ice_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_flow *flow = NULL;
- int ret;
+ int ret = 0;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_flow_engine *engine = NULL;
+ void *meta = NULL;
flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
if (!flow) {
@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
return flow;
}
- ret = ice_flow_validate(dev, attr, pattern, actions, error);
+ ret = ice_flow_validate_filter(dev, attr, pattern, actions,
+ &engine, &meta, error);
if (ret < 0)
goto free_flow;
- ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+ if (engine->create == NULL)
+ goto free_flow;
+
+ ret = engine->create(ad, flow, meta, error);
if (ret)
goto free_flow;
+ flow->engine = engine;
TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
return flow;
free_flow:
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to create flow.");
+ PMD_DRV_LOG(ERR, "Failed to create flow");
rte_free(flow);
return NULL;
}
static int
ice_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
int ret = 0;
- ret = ice_destroy_switch_filter(pf, flow, error);
-
+ if (!flow || !flow->engine->destroy) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "NULL flow or NULL destroy");
+ return -rte_errno;
+ }
+ ret = flow->engine->destroy(ad, flow, error);
if (!ret) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
- } else {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to destroy flow.");
- }
+ } else
+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
return ret;
}
static int
ice_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+ struct rte_flow_error *error)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct rte_flow *p_flow;
+ struct rte_flow *p_flow = NULL;
void *temp;
int ret = 0;
TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
ret = ice_flow_destroy(dev, p_flow, error);
if (ret) {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to flush SW flows.");
- return -rte_errno;
+ PMD_DRV_LOG(ERR, "Failed to flush flows");
+ return -EINVAL;
}
}
return ret;
}
+
+static int
+ice_flow_query_count(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = -EINVAL;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (!flow || !flow->engine->query) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "NULL flow or NULL query");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow->engine->query(ad, flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ return ret;
+}
@@ -7,615 +7,177 @@
#include <rte_flow_driver.h>
-struct ice_flow_pattern {
- enum rte_flow_item_type *items;
- uint64_t sw_fields;
-};
-
-#define ICE_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define ICE_INSET_SMAC 0x0000000000000001ULL
-#define ICE_INSET_DMAC 0x0000000000000002ULL
-#define ICE_INSET_ETHERTYPE 0x0000000000000020ULL
-
-/* bit 8 ~ bit 15 */
-#define ICE_INSET_IPV4_SRC 0x0000000000000100ULL
-#define ICE_INSET_IPV4_DST 0x0000000000000200ULL
-#define ICE_INSET_IPV6_SRC 0x0000000000000400ULL
-#define ICE_INSET_IPV6_DST 0x0000000000000800ULL
-#define ICE_INSET_SRC_PORT 0x0000000000001000ULL
-#define ICE_INSET_DST_PORT 0x0000000000002000ULL
-#define ICE_INSET_ARP 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define ICE_INSET_IPV4_TOS 0x0000000000010000ULL
-#define ICE_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define ICE_INSET_IPV4_TTL 0x0000000000040000ULL
-#define ICE_INSET_IPV6_TOS 0x0000000000100000ULL
-#define ICE_INSET_IPV6_PROTO 0x0000000000200000ULL
-#define ICE_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define ICE_INSET_ICMP 0x0000000001000000ULL
-#define ICE_INSET_ICMP6 0x0000000002000000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define ICE_INSET_TUN_SMAC 0x0000000100000000ULL
-#define ICE_INSET_TUN_DMAC 0x0000000200000000ULL
-#define ICE_INSET_TUN_IPV4_SRC 0x0000000400000000ULL
-#define ICE_INSET_TUN_IPV4_DST 0x0000000800000000ULL
-#define ICE_INSET_TUN_IPV4_TTL 0x0000001000000000ULL
-#define ICE_INSET_TUN_IPV4_PROTO 0x0000002000000000ULL
-#define ICE_INSET_TUN_IPV6_SRC 0x0000004000000000ULL
-#define ICE_INSET_TUN_IPV6_DST 0x0000008000000000ULL
-#define ICE_INSET_TUN_IPV6_TTL 0x0000010000000000ULL
-#define ICE_INSET_TUN_IPV6_PROTO 0x0000020000000000ULL
-#define ICE_INSET_TUN_SRC_PORT 0x0000040000000000ULL
-#define ICE_INSET_TUN_DST_PORT 0x0000080000000000ULL
-#define ICE_INSET_TUN_ID 0x0000100000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-#define ICE_FLAG_VLAN_INNER 0x00000001ULL
-#define ICE_FLAG_VLAN_OUTER 0x00000002ULL
-
-#define INSET_ETHER ( \
- ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
-#define INSET_MAC_IPV4 ( \
- ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
- ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
-#define INSET_MAC_IPV4_L4 ( \
- ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
- ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
- ICE_INSET_SRC_PORT)
-#define INSET_MAC_IPV4_ICMP ( \
- ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
- ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
-#define INSET_MAC_IPV6 ( \
- ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
- ICE_INSET_IPV6_TOS | ICE_INSET_IPV6_HOP_LIMIT)
-#define INSET_MAC_IPV6_L4 ( \
- ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
- ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | \
- ICE_INSET_DST_PORT | ICE_INSET_SRC_PORT)
-#define INSET_MAC_IPV6_ICMP ( \
- ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
- ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | ICE_INSET_ICMP6)
-#define INSET_TUNNEL_IPV4_TYPE1 ( \
- ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
- ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
- ICE_INSET_TUN_ID)
-#define INSET_TUNNEL_IPV4_TYPE2 ( \
- ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
- ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
- ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \
- ICE_INSET_TUN_ID)
-#define INSET_TUNNEL_IPV4_TYPE3 ( \
- ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
- ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP | \
- ICE_INSET_TUN_ID)
-#define INSET_TUNNEL_IPV6_TYPE1 ( \
- ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
- ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
- ICE_INSET_TUN_ID)
-#define INSET_TUNNEL_IPV6_TYPE2 ( \
- ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
- ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
- ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \
- ICE_INSET_TUN_ID)
-#define INSET_TUNNEL_IPV6_TYPE3 ( \
- ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
- ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6 | \
- ICE_INSET_TUN_ID)
-
-/* L2 */
-static enum rte_flow_item_type pattern_ethertype[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* non-tunnel IPv4 */
-static enum rte_flow_item_type pattern_ipv4[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* non-tunnel IPv6 */
-static enum rte_flow_item_type pattern_ipv6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv6_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv6_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv6_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_ICMP6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 VXLAN IPv4 */
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 VXLAN MAC IPv4 */
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 VXLAN IPv6 */
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 VXLAN MAC IPv6 */
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 NVGRE IPv4 */
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 NVGRE MAC IPv4 */
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_ICMP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* IPv4 NVGRE IPv6 */
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-
-/* IPv4 NVGRE MAC IPv6 */
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_NVGRE,
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static struct ice_flow_pattern ice_supported_patterns[] = {
- {pattern_ethertype, INSET_ETHER},
- {pattern_ipv4, INSET_MAC_IPV4},
- {pattern_ipv4_udp, INSET_MAC_IPV4_L4},
- {pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
- {pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
- {pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
- {pattern_ipv6, INSET_MAC_IPV6},
- {pattern_ipv6_udp, INSET_MAC_IPV6_L4},
- {pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
- {pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
- {pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
- {pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
- {pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
- {pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
- {pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
- {pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
- {pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
- {pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
- {pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
- {pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
- {pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
- {pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
- {pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
- {pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
- {pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
- {pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
- {pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
-};
+#define ICE_INSET_NONE 0ULL
+
+/* bit0 ~ bit 11 */
+#define ICE_INSET_SMAC (1ULL << 0)
+#define ICE_INSET_DMAC (1ULL << 1)
+#define ICE_INSET_VLAN_INNER (1ULL << 2)
+#define ICE_INSET_VLAN_OUTER (1ULL << 3)
+#define ICE_INSET_ETHERTYPE (1ULL << 4)
+#define ICE_INSET_ARP_SHA (1ULL << 5)
+#define ICE_INSET_ARP_SPA (1ULL << 6)
+#define ICE_INSET_ARP_THA (1ULL << 7)
+#define ICE_INSET_ARP_TPA (1ULL << 8)
+#define ICE_INSET_ARP_OP (1ULL << 9)
+
+/* bit 12 ~ bit 23 */
+#define ICE_INSET_IPV4_SRC (1ULL << 12)
+#define ICE_INSET_IPV4_DST (1ULL << 13)
+#define ICE_INSET_IPV4_TOS (1ULL << 14)
+#define ICE_INSET_IPV4_PROTO (1ULL << 15)
+#define ICE_INSET_IPV4_TTL (1ULL << 16)
+#define ICE_INSET_IPV6_SRC (1ULL << 17)
+#define ICE_INSET_IPV6_DST (1ULL << 18)
+#define ICE_INSET_IPV6_NEXT_HDR (1ULL << 19)
+#define ICE_INSET_IPV6_HOP_LIMIT (1ULL << 20)
+#define ICE_INSET_IPV6_TC (1ULL << 21)
+#define ICE_INSET_TCP_FLAGS (1ULL << 22)
+
+/* bit 24 ~ bit 35 */
+#define ICE_INSET_ICMP_TYPE (1ULL << 24)
+#define ICE_INSET_ICMP_CODE (1ULL << 25)
+#define ICE_INSET_ICMP6_TYPE (1ULL << 26)
+#define ICE_INSET_ICMP6_CODE (1ULL << 27)
+#define ICE_INSET_TCP_SRC_PORT (1ULL << 28)
+#define ICE_INSET_TCP_DST_PORT (1ULL << 29)
+#define ICE_INSET_UDP_SRC_PORT (1ULL << 30)
+#define ICE_INSET_UDP_DST_PORT (1ULL << 31)
+#define ICE_INSET_SCTP_SRC_PORT (1ULL << 32)
+#define ICE_INSET_SCTP_DST_PORT (1ULL << 33)
+#define ICE_INSET_ICMP_SRC_PORT (1ULL << 34)
+#define ICE_INSET_ICMP_DST_PORT (1ULL << 35)
+
+/* bit 36 ~ bit 59, tunnel fields */
+#define ICE_INSET_TUN_SMAC (1ULL << 36)
+#define ICE_INSET_TUN_DMAC (1ULL << 37)
+#define ICE_INSET_TUN_IPV4_SRC (1ULL << 38)
+#define ICE_INSET_TUN_IPV4_DST (1ULL << 39)
+#define ICE_INSET_TUN_IPV4_TTL (1ULL << 40)
+#define ICE_INSET_TUN_IPV4_PROTO (1ULL << 41)
+#define ICE_INSET_TUN_IPV4_TOS (1ULL << 42)
+#define ICE_INSET_TUN_IPV6_SRC (1ULL << 43)
+#define ICE_INSET_TUN_IPV6_DST (1ULL << 44)
+#define ICE_INSET_TUN_IPV6_HOP_LIMIT (1ULL << 45)
+#define ICE_INSET_TUN_IPV6_NEXT_HDR (1ULL << 46)
+#define ICE_INSET_TUN_IPV6_TC (1ULL << 47)
+#define ICE_INSET_TUN_SRC_PORT (1ULL << 48)
+#define ICE_INSET_TUN_DST_PORT (1ULL << 49)
+#define ICE_INSET_TUN_ICMP_TYPE (1ULL << 50)
+#define ICE_INSET_TUN_ICMP_CODE (1ULL << 51)
+#define ICE_INSET_TUN_ICMP6_TYPE (1ULL << 52)
+#define ICE_INSET_TUN_ICMP6_CODE (1ULL << 53)
+#define ICE_INSET_TUN_ID (1ULL << 54)
+#define ICE_INSET_TUN_TYPE (1ULL << 55)
+#define ICE_INSET_GTPU_TEID (1ULL << 56)
+#define ICE_INSET_GTPU_QFI (1ULL << 57)
+#define ICE_INSET_GTP_EH_PDU (1ULL << 58)
+#define ICE_INSET_TUN_TCP_FLAGS (1ULL << 59)
+
+/* bit 60 ~ bit 63 */
+#define ICE_INSET_LAST_ETHER_TYPE (1ULL << 60)
+
+
+struct ice_adapter;
+
+extern const struct rte_flow_ops ice_flow_ops;
+
+/* engine types. */
+enum ice_flow_engine_type {
+ ICE_FLOW_ENGINE_NONE = 0,
+ ICE_FLOW_ENGINE_FDIR,
+ ICE_FLOW_ENGINE_SWITCH,
+ ICE_FLOW_ENGINE_HASH,
+ ICE_FLOW_ENGINE_ACL,
+ ICE_FLOW_ENGINE_MAX,
+};
+
+/**
+ * classification stages.
+ * for non-pipeline mode, we have two classification stages: Distributor/RSS
+ * for pipeline-mode we have three classification stages:
+ * Permission/Distributor/RSS
+ */
+enum ice_flow_classification_stage {
+ ICE_FLOW_STAGE_NONE = 0,
+ ICE_FLOW_STAGE_RSS,
+ ICE_FLOW_STAGE_PERMISSION,
+ ICE_FLOW_STAGE_DISTRIBUTOR,
+ ICE_FLOW_STAGE_MAX,
+};
+
+/* pattern structure */
+struct ice_pattern_match_item {
+ enum rte_flow_item_type *pattern_list;
+ /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */
+ uint64_t input_set_mask;
+ uint64_t meta;
+};
+
+typedef int (*engine_init_t)(struct ice_adapter *ad);
+typedef void (*engine_uninit_t)(struct ice_adapter *ad);
+typedef int (*engine_create_t)(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ void *data,
+ struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*parse_pattern_action_t)(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error);
+
+/* Struct to store engine created. */
+struct ice_flow_engine {
+ TAILQ_ENTRY(ice_flow_engine) node;
+ engine_init_t init;
+ engine_uninit_t uninit;
+ engine_create_t create;
+ engine_destroy_t destroy;
+ engine_query_t query;
+ engine_free_t free;
+ enum ice_flow_engine_type type;
+};
+TAILQ_HEAD(ice_engine_list, ice_flow_engine);
+
+/* Struct to store flow created. */
+struct rte_flow {
+TAILQ_ENTRY(rte_flow) node;
+ struct ice_flow_engine *engine;
+ void *rule;
+};
+
+/* Struct to store parser created. */
+struct ice_flow_parser {
+ TAILQ_ENTRY(ice_flow_parser) node;
+ struct ice_flow_engine *engine;
+ struct ice_pattern_match_item *array;
+ uint32_t array_len;
+ parse_pattern_action_t parse_pattern_action;
+ enum ice_flow_classification_stage stage;
+};
+
+void ice_register_flow_engine(struct ice_flow_engine *engine);
+int ice_flow_init(struct ice_adapter *ad);
+void ice_flow_uninit(struct ice_adapter *ad);
+int ice_register_parser(struct ice_flow_parser *parser,
+ struct ice_adapter *ad);
+void ice_unregister_parser(struct ice_flow_parser *parser,
+ struct ice_adapter *ad);
+struct ice_pattern_match_item *
+ice_search_pattern_match_item(
+ const struct rte_flow_item pattern[],
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ struct rte_flow_error *error);
#endif
@@ -2,515 +2,4 @@
* Copyright(c) 2019 Intel Corporation
*/
-#include <sys/queue.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_eth_ctrl.h>
-#include <rte_tailq.h>
-#include <rte_flow_driver.h>
-
-#include "ice_logs.h"
-#include "base/ice_type.h"
-#include "ice_switch_filter.h"
-
-static int
-ice_parse_switch_filter(const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct ice_adv_lkup_elem *list,
- uint16_t *lkups_num,
- enum ice_sw_tunnel_type tun_type)
-{
- const struct rte_flow_item *item = pattern;
- enum rte_flow_item_type item_type;
- const struct rte_flow_item_eth *eth_spec, *eth_mask;
- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
- const struct rte_flow_item_udp *udp_spec, *udp_mask;
- const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
- const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
- const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
- uint16_t j, t = 0;
- uint16_t tunnel_valid = 0;
-
- for (item = pattern; item->type !=
- RTE_FLOW_ITEM_TYPE_END; item++) {
- item_type = item->type;
-
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = item->spec;
- eth_mask = item->mask;
- if (eth_spec && eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
- struct ice_ether_hdr *h;
- struct ice_ether_hdr *m;
- uint16_t i = 0;
- h = &list[t].h_u.eth_hdr;
- m = &list[t].m_u.eth_hdr;
- for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
- h->src_addr[j] =
- eth_spec->src.addr_bytes[j];
- m->src_addr[j] =
- eth_mask->src.addr_bytes[j];
- i = 1;
- }
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
- h->dst_addr[j] =
- eth_spec->dst.addr_bytes[j];
- m->dst_addr[j] =
- eth_mask->dst.addr_bytes[j];
- i = 1;
- }
- }
- if (i)
- t++;
- if (eth_mask->type == UINT16_MAX) {
- list[t].type = ICE_ETYPE_OL;
- list[t].h_u.ethertype.ethtype_id =
- eth_spec->type;
- list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
- t++;
- }
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ipv4_spec = item->spec;
- ipv4_mask = item->mask;
- if (ipv4_spec && ipv4_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
- list[t].h_u.ipv4_hdr.src_addr =
- ipv4_spec->hdr.src_addr;
- list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
- }
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
- list[t].h_u.ipv4_hdr.dst_addr =
- ipv4_spec->hdr.dst_addr;
- list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
- }
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
- list[t].h_u.ipv4_hdr.time_to_live =
- ipv4_spec->hdr.time_to_live;
- list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
- }
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
- list[t].h_u.ipv4_hdr.protocol =
- ipv4_spec->hdr.next_proto_id;
- list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
- }
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
- list[t].h_u.ipv4_hdr.tos =
- ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
- }
- t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6_spec = item->spec;
- ipv6_mask = item->mask;
- if (ipv6_spec && ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV6_OFOS : ICE_IPV6_IL;
- struct ice_ipv6_hdr *f;
- struct ice_ipv6_hdr *s;
- f = &list[t].h_u.ipv6_hdr;
- s = &list[t].m_u.ipv6_hdr;
- for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
- f->src_addr[j] =
- ipv6_spec->hdr.src_addr[j];
- s->src_addr[j] =
- ipv6_mask->hdr.src_addr[j];
- }
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
- f->dst_addr[j] =
- ipv6_spec->hdr.dst_addr[j];
- s->dst_addr[j] =
- ipv6_mask->hdr.dst_addr[j];
- }
- }
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
- f->next_hdr =
- ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
- }
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
- f->hop_limit =
- ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
- }
- t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = item->spec;
- udp_mask = item->mask;
- if (udp_spec && udp_mask) {
- if (tun_type == ICE_SW_TUN_VXLAN &&
- tunnel_valid == 0)
- list[t].type = ICE_UDP_OF;
- else
- list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
- list[t].h_u.l4_hdr.src_port =
- udp_spec->hdr.src_port;
- list[t].m_u.l4_hdr.src_port =
- udp_mask->hdr.src_port;
- }
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
- list[t].h_u.l4_hdr.dst_port =
- udp_spec->hdr.dst_port;
- list[t].m_u.l4_hdr.dst_port =
- udp_mask->hdr.dst_port;
- }
- t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = item->spec;
- tcp_mask = item->mask;
- if (tcp_spec && tcp_mask) {
- list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
- list[t].h_u.l4_hdr.src_port =
- tcp_spec->hdr.src_port;
- list[t].m_u.l4_hdr.src_port =
- tcp_mask->hdr.src_port;
- }
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
- list[t].h_u.l4_hdr.dst_port =
- tcp_spec->hdr.dst_port;
- list[t].m_u.l4_hdr.dst_port =
- tcp_mask->hdr.dst_port;
- }
- t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_SCTP:
- sctp_spec = item->spec;
- sctp_mask = item->mask;
- if (sctp_spec && sctp_mask) {
- list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
- list[t].h_u.sctp_hdr.src_port =
- sctp_spec->hdr.src_port;
- list[t].m_u.sctp_hdr.src_port =
- sctp_mask->hdr.src_port;
- }
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
- list[t].h_u.sctp_hdr.dst_port =
- sctp_spec->hdr.dst_port;
- list[t].m_u.sctp_hdr.dst_port =
- sctp_mask->hdr.dst_port;
- }
- t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec = item->spec;
- vxlan_mask = item->mask;
- tunnel_valid = 1;
- if (vxlan_spec && vxlan_mask) {
- list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
- list[t].h_u.tnl_hdr.vni =
- (vxlan_spec->vni[2] << 16) |
- (vxlan_spec->vni[1] << 8) |
- vxlan_spec->vni[0];
- list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
- }
- t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec = item->spec;
- nvgre_mask = item->mask;
- tunnel_valid = 1;
- if (nvgre_spec && nvgre_mask) {
- list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
- list[t].h_u.nvgre_hdr.tni_flow =
- (nvgre_spec->tni[2] << 16) |
- (nvgre_spec->tni[1] << 8) |
- nvgre_spec->tni[0];
- list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
- }
- t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
- }
- break;
-
- case RTE_FLOW_ITEM_TYPE_VOID:
- case RTE_FLOW_ITEM_TYPE_END:
- break;
-
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, actions,
- "Invalid pattern item.");
- goto out;
- }
- }
-
- *lkups_num = t;
-
- return 0;
-out:
- return -rte_errno;
-}
-
-/* By now ice switch filter action code implement only
- * supports QUEUE or DROP.
- */
-static int
-ice_parse_switch_action(struct ice_pf *pf,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error,
- struct ice_adv_rule_info *rule_info)
-{
- struct ice_vsi *vsi = pf->main_vsi;
- const struct rte_flow_action_queue *act_q;
- uint16_t base_queue;
- const struct rte_flow_action *action;
- enum rte_flow_action_type action_type;
-
- base_queue = pf->base_queue;
- for (action = actions; action->type !=
- RTE_FLOW_ACTION_TYPE_END; action++) {
- action_type = action->type;
- switch (action_type) {
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q = action->conf;
- rule_info->sw_act.fltr_act =
- ICE_FWD_TO_Q;
- rule_info->sw_act.fwd_id.q_id =
- base_queue + act_q->index;
- break;
-
- case RTE_FLOW_ACTION_TYPE_DROP:
- rule_info->sw_act.fltr_act =
- ICE_DROP_PACKET;
- break;
-
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
-
- default:
- rte_flow_error_set(error,
- EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- actions,
- "Invalid action type");
- return -rte_errno;
- }
- }
-
- rule_info->sw_act.vsi_handle = vsi->idx;
- rule_info->rx = 1;
- rule_info->sw_act.src = vsi->idx;
- rule_info->priority = 5;
-
- return 0;
-}
-
-static int
-ice_switch_rule_set(struct ice_pf *pf,
- struct ice_adv_lkup_elem *list,
- uint16_t lkups_cnt,
- struct ice_adv_rule_info *rule_info,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct ice_hw *hw = ICE_PF_TO_HW(pf);
- int ret;
- struct ice_rule_query_data rule_added = {0};
- struct ice_rule_query_data *filter_ptr;
-
- if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "item number too large for rule");
- return -rte_errno;
- }
- if (!list) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "lookup list should not be NULL");
- return -rte_errno;
- }
-
- ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
-
- if (!ret) {
- filter_ptr = rte_zmalloc("ice_switch_filter",
- sizeof(struct ice_rule_query_data), 0);
- if (!filter_ptr) {
- PMD_DRV_LOG(ERR, "failed to allocate memory");
- return -EINVAL;
- }
- flow->rule = filter_ptr;
- rte_memcpy(filter_ptr,
- &rule_added,
- sizeof(struct ice_rule_query_data));
- }
-
- return ret;
-}
-
-int
-ice_create_switch_filter(struct ice_pf *pf,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- int ret = 0;
- struct ice_adv_rule_info rule_info = {0};
- struct ice_adv_lkup_elem *list = NULL;
- uint16_t lkups_num = 0;
- const struct rte_flow_item *item = pattern;
- uint16_t item_num = 0;
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
-
- for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- item_num++;
- if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
- tun_type = ICE_SW_TUN_VXLAN;
- if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
- tun_type = ICE_SW_TUN_NVGRE;
- /* reserve one more memory slot for ETH which may
- * consume 2 lookup items.
- */
- if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
- item_num++;
- }
- rule_info.tun_type = tun_type;
-
- list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
- if (!list) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "No memory for PMD internal items");
- return -rte_errno;
- }
-
- ret = ice_parse_switch_filter(pattern, actions, error,
- list, &lkups_num, tun_type);
- if (ret)
- goto error;
-
- ret = ice_parse_switch_action(pf, actions, error, &rule_info);
- if (ret)
- goto error;
-
- ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
- if (ret)
- goto error;
-
- rte_free(list);
- return 0;
-
-error:
- rte_free(list);
-
- return -rte_errno;
-}
-
-int
-ice_destroy_switch_filter(struct ice_pf *pf,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct ice_hw *hw = ICE_PF_TO_HW(pf);
- int ret;
- struct ice_rule_query_data *filter_ptr;
-
- filter_ptr = (struct ice_rule_query_data *)
- flow->rule;
-
- if (!filter_ptr) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "no such flow"
- " create by switch filter");
- return -rte_errno;
- }
-
- ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
- if (ret) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "fail to destroy switch filter rule");
- return -rte_errno;
- }
-
- rte_free(filter_ptr);
- return ret;
-}
-
-void
-ice_free_switch_filter_rule(void *rule)
-{
- struct ice_rule_query_data *filter_ptr;
-
- filter_ptr = (struct ice_rule_query_data *)rule;
-
- rte_free(filter_ptr);
-}
@@ -2,23 +2,5 @@
* Copyright(c) 2019 Intel Corporation
*/
-#ifndef _ICE_SWITCH_FILTER_H_
-#define _ICE_SWITCH_FILTER_H_
-#include "base/ice_switch.h"
-#include "base/ice_type.h"
-#include "ice_ethdev.h"
-int
-ice_create_switch_filter(struct ice_pf *pf,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow *flow,
- struct rte_flow_error *error);
-int
-ice_destroy_switch_filter(struct ice_pf *pf,
- struct rte_flow *flow,
- struct rte_flow_error *error);
-void
-ice_free_switch_filter_rule(void *rule);
-#endif /* _ICE_SWITCH_FILTER_H_ */