From patchwork Tue Sep 29 07:48:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79148 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F3DAA04C0; Tue, 29 Sep 2020 10:10:00 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 312FA1D8E3; Tue, 29 Sep 2020 10:09:44 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C8B421D5C9 for ; Tue, 29 Sep 2020 10:09:39 +0200 (CEST) IronPort-SDR: RYx9aaSWHY05ANnp5tAzqqdCB/8AUEGFDgZjw+UZyN0HmVl6LlNh33MvmBt7eLYl/9ve2ASIlm oLIKHpm1tnEA== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="163018499" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="163018499" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:09:36 -0700 IronPort-SDR: LdzalDWU2ffNbsjd9PaIy9BRIFJ+IK88MoPuFuVgmnH2mkr7Cj1Mqq4oazMnuvMEfYeeVbKdTz M+6MxvPKQE+w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="415289203" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga001.fm.intel.com with ESMTP; 29 Sep 2020 01:09:33 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Jeff Guo , Haiyue Wang , Chenxu Di Date: Tue, 29 Sep 2020 07:48:34 +0000 Message-Id: <20200929074835.39854-2-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200929074835.39854-1-chenxux.di@intel.com> References: <20200929074835.39854-1-chenxux.di@intel.com> Subject: [dpdk-dev] [RFC 3/5] net/e1000: decouple dependency from superseded structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The legacy filter API will be removed, the associated rte_eth_ctrl.h will also be removed. This patch replaces these superseded structures by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte to change. Signed-off-by: Chenxu Di --- drivers/net/e1000/e1000_ethdev.h | 113 ++++++++++++++++-- drivers/net/e1000/igb_ethdev.c | 80 ++++++------- drivers/net/e1000/igb_flow.c | 199 ++++++++++++++++--------------- 3 files changed, 245 insertions(+), 147 deletions(-) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 1e41ae9de..3c30b9ebe 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -237,6 +237,88 @@ struct e1000_2tuple_filter { uint16_t queue; /* rx queue assigned to */ }; +/* Define all structures for ntuple Filter type. */ + +#define IGB_NTUPLE_FLAGS_DST_IP 0x0001 /* If set, dst_ip is part of ntuple */ +#define IGB_NTUPLE_FLAGS_SRC_IP 0x0002 /* If set, src_ip is part of ntuple */ +#define IGB_NTUPLE_FLAGS_DST_PORT 0x0004 /* If set, dstport is part of ntuple */ +#define IGB_NTUPLE_FLAGS_SRC_PORT 0x0008 /* If set, srcport is part of ntuple */ +#define IGB_NTUPLE_FLAGS_PROTO 0x0010 /* If set, proto is part of ntuple */ +#define IGB_NTUPLE_FLAGS_TCP_FLAG 0x0020 /* If set, tcp flag is involved */ + +#define IGB_5TUPLE_FLAGS ( \ + IGB_NTUPLE_FLAGS_DST_IP | \ + IGB_NTUPLE_FLAGS_SRC_IP | \ + IGB_NTUPLE_FLAGS_DST_PORT | \ + IGB_NTUPLE_FLAGS_SRC_PORT | \ + IGB_NTUPLE_FLAGS_PROTO) + +#define IGB_2TUPLE_FLAGS ( \ + IGB_NTUPLE_FLAGS_DST_PORT | \ + IGB_NTUPLE_FLAGS_PROTO) + +#define IGB_NTUPLE_TCP_FLAGS_MASK 0x3F /* TCP flags filter can match. */ + +struct igb_flow_ntuple_filter { + uint16_t flags; /* Flags from IGB_NTUPLE_FLAGS_* */ + uint32_t dst_ip; /* Destination IP address in big endian. */ + uint32_t dst_ip_mask; /* Mask of destination IP address. */ + uint32_t src_ip; /* Source IP address in big endian. */ + uint32_t src_ip_mask; /* Mask of destination IP address. */ + uint16_t dst_port; /* Destination port in big endian. */ + uint16_t dst_port_mask; /* Mask of destination port. */ + uint16_t src_port; /* Source Port in big endian. */ + uint16_t src_port_mask; /* Mask of source port. */ + uint8_t proto; /* L4 protocol. */ + uint8_t proto_mask; /* Mask of L4 protocol. */ + /* tcp_flags only meaningful when the proto is TCP. + * The packet matched above ntuple fields and contain + * any set bit in tcp_flags will hit this filter. + */ + uint8_t tcp_flags; + /* seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint16_t priority; + uint16_t queue; /* Queue assigned to when match*/ +}; + +/* bytes to use in flex filter. */ +#define IGB_FLEX_FILTER_MAXLEN 128 +/* mask bytes in flex filter. */ +#define IGB_FLEX_FILTER_MASK_SIZE \ + (RTE_ALIGN(IGB_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT) + +struct igb_flow_flex_filter { + uint16_t len; + uint8_t bytes[IGB_FLEX_FILTER_MAXLEN]; /* flex bytes in big endian.*/ + /* if mask bit is 1b, do not compare corresponding byte. */ + uint8_t mask[IGB_FLEX_FILTER_MASK_SIZE]; + uint8_t priority; + uint16_t queue; /* Queue assigned to when match. */ +}; + +struct igb_flow_syn_filter { + /* 1 - higher priority than other filters, 0 - lower priority. */ + uint8_t hig_pri; + /* Queue assigned to when match */ + uint16_t queue; +}; + +/** + * Define all structures for Ethertype Filter type. + */ + +#define IGB_ETHTYPE_FLAGS_MAC 0x0001 /* If set, compare mac */ +#define IGB_ETHTYPE_FLAGS_DROP 0x0002 /* If set, drop packet when match */ + +struct igb_flow_ethertype_filter { + struct rte_ether_addr mac_addr; /* Mac address to match. */ + uint16_t ether_type; /* Ether type to match */ + uint16_t flags; /* Flags from IGB_ETHTYPE_FLAGS_* */ + uint16_t queue; /* Queue assigned to when match */ +}; + /* ethertype filter structure */ struct igb_ethertype_filter { uint16_t ethertype; @@ -308,33 +390,46 @@ struct e1000_adapter { #define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ (&((struct e1000_adapter *)adapter)->filter) +/** + * Feature filter types + */ +enum igb_filter_type { + IGB_FILTER_NONE = 0, + IGB_FILTER_ETHERTYPE, + IGB_FILTER_FLEXIBLE, + IGB_FILTER_SYN, + IGB_FILTER_NTUPLE, + IGB_FILTER_HASH, + IGB_FILTER_MAX +}; + struct rte_flow { - enum rte_filter_type filter_type; + enum igb_filter_type filter_type; void *rule; }; /* ntuple filter list structure */ struct igb_ntuple_filter_ele { TAILQ_ENTRY(igb_ntuple_filter_ele) entries; - struct rte_eth_ntuple_filter filter_info; + struct igb_flow_ntuple_filter filter_info; }; /* ethertype filter list structure */ struct igb_ethertype_filter_ele { TAILQ_ENTRY(igb_ethertype_filter_ele) entries; - struct rte_eth_ethertype_filter filter_info; + struct igb_flow_ethertype_filter filter_info; }; /* syn filter list structure */ struct igb_eth_syn_filter_ele { TAILQ_ENTRY(igb_eth_syn_filter_ele) entries; - struct rte_eth_syn_filter filter_info; + struct igb_flow_syn_filter filter_info; }; /* flex filter list structure */ struct igb_flex_filter_ele { TAILQ_ENTRY(igb_flex_filter_ele) entries; - struct rte_eth_flex_filter filter_info; + struct igb_flow_flex_filter filter_info; }; /* rss filter list structure */ @@ -507,15 +602,15 @@ void igb_remove_flex_filter(struct rte_eth_dev *dev, int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, uint8_t idx); int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, bool add); + struct igb_flow_ntuple_filter *ntuple_filter, bool add); int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, bool add); int eth_igb_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, bool add); int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, bool add); int igb_rss_conf_init(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *out, diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 5ab74840a..588fdea11 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -192,20 +192,20 @@ static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int igb_add_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter); + struct igb_flow_flex_filter *filter); static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter); + struct igb_flow_ntuple_filter *filter); static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -3637,7 +3637,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, int eth_igb_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3717,12 +3717,12 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct igb_flow_syn_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct igb_flow_syn_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: @@ -3740,14 +3740,14 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ static inline int -ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, +ntuple_filter_to_2tuple(struct igb_flow_ntuple_filter *filter, struct e1000_2tuple_filter_info *filter_info) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_port_mask) { @@ -3777,7 +3777,7 @@ ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, } filter_info->priority = (uint8_t)filter->priority; - if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG) filter_info->tcp_flags = filter->tcp_flags; else filter_info->tcp_flags = 0; @@ -3827,7 +3827,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev, ttqf &= ~E1000_TTQF_MASK_ENABLE; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) { if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) @@ -3861,7 +3861,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev, */ static int igb_add_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -3942,7 +3942,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev, */ static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4056,7 +4056,7 @@ igb_remove_flex_filter(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, bool add) { struct e1000_filter_info *filter_info = @@ -4130,7 +4130,7 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter) + struct igb_flow_flex_filter *filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = @@ -4180,7 +4180,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, void *arg) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_flex_filter *filter; + struct igb_flow_flex_filter *filter; int ret = 0; MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); @@ -4194,7 +4194,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, return -EINVAL; } - filter = (struct rte_eth_flex_filter *)arg; + filter = (struct igb_flow_flex_filter *)arg; if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN || filter->len % sizeof(uint64_t) != 0) { PMD_DRV_LOG(ERR, "filter's length is out of range"); @@ -4226,14 +4226,14 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ static inline int -ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, +ntuple_filter_to_5tuple_82576(struct igb_flow_ntuple_filter *filter, struct e1000_5tuple_filter_info *filter_info) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_ip_mask) { @@ -4302,7 +4302,7 @@ ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, } filter_info->priority = (uint8_t)filter->priority; - if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG) filter_info->tcp_flags = filter->tcp_flags; else filter_info->tcp_flags = 0; @@ -4363,7 +4363,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) { if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) @@ -4396,7 +4396,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, */ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4483,7 +4483,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, */ static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4568,7 +4568,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * * @param * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * ntuple_filter: Pointer to struct igb_flow_ntuple_filter * add: if true, add filter, if false, remove filter * * @return @@ -4577,15 +4577,15 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) */ int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, + struct igb_flow_ntuple_filter *ntuple_filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; switch (ntuple_filter->flags) { - case RTE_5TUPLE_FLAGS: - case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + case IGB_5TUPLE_FLAGS: + case (IGB_5TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG): if (hw->mac.type != e1000_82576) return -ENOTSUP; if (add) @@ -4595,8 +4595,8 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, ret = igb_remove_5tuple_filter_82576(dev, ntuple_filter); break; - case RTE_2TUPLE_FLAGS: - case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + case IGB_2TUPLE_FLAGS: + case (IGB_2TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG): if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && hw->mac.type != e1000_i210 && hw->mac.type != e1000_i211) @@ -4627,7 +4627,7 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, */ static int igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = @@ -4714,17 +4714,17 @@ igb_ntuple_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct igb_flow_ntuple_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct igb_flow_ntuple_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: ret = igb_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); + (struct igb_flow_ntuple_filter *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); @@ -4780,7 +4780,7 @@ igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4796,11 +4796,11 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) { PMD_DRV_LOG(ERR, "mac compare is unsupported."); return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) { PMD_DRV_LOG(ERR, "drop option is unsupported."); return -EINVAL; } @@ -4895,12 +4895,12 @@ igb_ethertype_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct igb_flow_ethertype_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct igb_flow_ethertype_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 43fef889b..eec7ae3db 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -91,7 +91,7 @@ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct igb_flow_ntuple_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -216,7 +216,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -225,7 +225,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* Not supported last point for range */ if (item->last) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, "Not supported last point for range"); @@ -248,7 +248,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -258,12 +258,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = tcp_mask->hdr.dst_port; filter->src_port_mask = tcp_mask->hdr.src_port; if (tcp_mask->hdr.tcp_flags == 0xFF) { - filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags |= IGB_NTUPLE_FLAGS_TCP_FLAG; } else if (!tcp_mask->hdr.tcp_flags) { - filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags &= ~IGB_NTUPLE_FLAGS_TCP_FLAG; } else { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -286,7 +286,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -311,7 +311,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -331,7 +331,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -347,7 +347,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, item, "Not supported action."); @@ -360,7 +360,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -370,7 +370,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -379,7 +379,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -388,7 +388,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -396,7 +396,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (attr->priority > 0xFFFF) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Error priority."); @@ -413,7 +413,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct igb_flow_ntuple_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -428,7 +428,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, /* Igb doesn't support many priorities. */ if (filter->priority > E1000_2TUPLE_MAX_PRI) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Priority not supported by ntuple filter"); @@ -437,18 +437,20 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " "supported by ntuple filter"); return -rte_errno; } - filter->flags |= RTE_5TUPLE_FLAGS; + filter->flags |= IGB_5TUPLE_FLAGS; } else { if (filter->src_ip_mask || filter->dst_ip_mask || filter->src_port_mask) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "only two tuple are " @@ -456,14 +458,15 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " "supported by ntuple filter"); return -rte_errno; } - filter->flags |= RTE_2TUPLE_FLAGS; + filter->flags |= IGB_2TUPLE_FLAGS; } return 0; @@ -489,7 +492,7 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, const struct rte_flow_action *actions, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -572,13 +575,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* If mask bits of destination MAC address - * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + * are full of 1, set IGB_ETHTYPE_FLAGS_MAC. */ if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; - filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + filter->flags |= IGB_ETHTYPE_FLAGS_MAC; } else { - filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + filter->flags &= ~IGB_ETHTYPE_FLAGS_MAC; } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); @@ -609,7 +612,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; filter->queue = act_q->index; } else { - filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + filter->flags |= IGB_ETHTYPE_FLAGS_DROP; } /* Check if the next non-void item is END */ @@ -671,7 +674,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -688,7 +691,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { memset(filter, 0, sizeof( - struct rte_eth_ethertype_filter)); + struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -698,7 +701,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, } else { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { memset(filter, 0, sizeof( - struct rte_eth_ethertype_filter)); + struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -709,23 +712,23 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || filter->ether_type == RTE_ETHER_TYPE_IPV6) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "IPv4/IPv6 not supported by ethertype filter"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "mac compare is unsupported"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "drop option is unsupported"); @@ -759,7 +762,7 @@ static int cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -883,7 +886,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -894,7 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -907,7 +910,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* check if the first not void action is QUEUE. */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -921,7 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -931,7 +934,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -940,7 +943,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -949,7 +952,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -962,7 +965,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } else if (attr->priority == (uint32_t)~0U) { filter->hig_pri = 1; } else { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Not support priority."); @@ -977,7 +980,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -990,7 +993,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " @@ -999,7 +1002,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, } } else { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " @@ -1041,7 +1044,7 @@ static int cons_parse_flex_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -1102,7 +1105,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, if (!raw_mask->length || !raw_mask->relative) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1116,7 +1119,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, for (j = 0; j < raw_spec->length; j++) { if (raw_mask->pattern[j] != 0xFF) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1140,8 +1143,8 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, } if ((raw_spec->length + offset + total_offset) > - RTE_FLEX_FILTER_MAXLEN) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + IGB_FLEX_FILTER_MAXLEN) { + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1204,7 +1207,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* check if the first not void action is QUEUE. */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1218,7 +1221,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1228,7 +1231,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -1237,7 +1240,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -1246,7 +1249,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -1254,7 +1257,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, } if (attr->priority > 0xFFFF) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Error priority."); @@ -1271,7 +1274,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1283,7 +1286,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, actions, filter, error); if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported by flex filter"); @@ -1441,10 +1444,10 @@ igb_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - struct rte_eth_flex_filter flex_filter; + struct igb_flow_ntuple_filter ntuple_filter; + struct igb_flow_ethertype_filter ethertype_filter; + struct igb_flow_syn_filter syn_filter; + struct igb_flow_flex_filter flex_filter; struct igb_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; struct igb_ntuple_filter_ele *ntuple_filter_ptr; @@ -1471,7 +1474,7 @@ igb_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&igb_flow_list, igb_flow_mem_ptr, entries); - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter)); ret = igb_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) { @@ -1486,17 +1489,17 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); TAILQ_INSERT_TAIL(&igb_filter_ntuple_list, ntuple_filter_ptr, entries); flow->rule = ntuple_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_NTUPLE; + flow->filter_type = IGB_FILTER_NTUPLE; return flow; } goto out; } - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter)); ret = igb_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) { @@ -1513,17 +1516,17 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, - sizeof(struct rte_eth_ethertype_filter)); + sizeof(struct igb_flow_ethertype_filter)); TAILQ_INSERT_TAIL(&igb_filter_ethertype_list, ethertype_filter_ptr, entries); flow->rule = ethertype_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + flow->filter_type = IGB_FILTER_ETHERTYPE; return flow; } goto out; } - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter)); ret = igb_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) { @@ -1538,18 +1541,18 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, - sizeof(struct rte_eth_syn_filter)); + sizeof(struct igb_flow_syn_filter)); TAILQ_INSERT_TAIL(&igb_filter_syn_list, syn_filter_ptr, entries); flow->rule = syn_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_SYN; + flow->filter_type = IGB_FILTER_SYN; return flow; } goto out; } - memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter)); ret = igb_parse_flex_filter(dev, attr, pattern, actions, &flex_filter, error); if (!ret) { @@ -1564,11 +1567,11 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&flex_filter_ptr->filter_info, &flex_filter, - sizeof(struct rte_eth_flex_filter)); + sizeof(struct igb_flow_flex_filter)); TAILQ_INSERT_TAIL(&igb_filter_flex_list, flex_filter_ptr, entries); flow->rule = flex_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_FLEXIBLE; + flow->filter_type = IGB_FILTER_FLEXIBLE; return flow; } } @@ -1590,7 +1593,7 @@ igb_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_HASH; + flow->filter_type = IGB_FILTER_HASH; return flow; } } @@ -1618,32 +1621,32 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - struct rte_eth_flex_filter flex_filter; + struct igb_flow_ntuple_filter ntuple_filter; + struct igb_flow_ethertype_filter ethertype_filter; + struct igb_flow_syn_filter syn_filter; + struct igb_flow_flex_filter flex_filter; struct igb_rte_flow_rss_conf rss_conf; int ret; - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter)); ret = igb_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) return 0; - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter)); ret = igb_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) return 0; - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter)); ret = igb_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) return 0; - memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter)); ret = igb_parse_flex_filter(dev, attr, pattern, actions, &flex_filter, error); if (!ret) @@ -1664,7 +1667,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, { int ret; struct rte_flow *pmd_flow = flow; - enum rte_filter_type filter_type = pmd_flow->filter_type; + enum igb_filter_type filter_type = pmd_flow->filter_type; struct igb_ntuple_filter_ele *ntuple_filter_ptr; struct igb_ethertype_filter_ele *ethertype_filter_ptr; struct igb_eth_syn_filter_ele *syn_filter_ptr; @@ -1673,7 +1676,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, struct igb_rss_conf_ele *rss_filter_ptr; switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: + case IGB_FILTER_NTUPLE: ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) pmd_flow->rule; ret = igb_add_del_ntuple_filter(dev, @@ -1684,7 +1687,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(ntuple_filter_ptr); } break; - case RTE_ETH_FILTER_ETHERTYPE: + case IGB_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) pmd_flow->rule; ret = igb_add_del_ethertype_filter(dev, @@ -1695,7 +1698,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(ethertype_filter_ptr); } break; - case RTE_ETH_FILTER_SYN: + case IGB_FILTER_SYN: syn_filter_ptr = (struct igb_eth_syn_filter_ele *) pmd_flow->rule; ret = eth_igb_syn_filter_set(dev, @@ -1706,7 +1709,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(syn_filter_ptr); } break; - case RTE_ETH_FILTER_FLEXIBLE: + case IGB_FILTER_FLEXIBLE: flex_filter_ptr = (struct igb_flex_filter_ele *) pmd_flow->rule; ret = eth_igb_add_del_flex_filter(dev, @@ -1717,7 +1720,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(flex_filter_ptr); } break; - case RTE_ETH_FILTER_HASH: + case IGB_FILTER_HASH: rss_filter_ptr = (struct igb_rss_conf_ele *) pmd_flow->rule; ret = igb_config_rss_filter(dev, @@ -1836,7 +1839,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) struct igb_flex_filter_ele *flex_filter_ptr; struct igb_rss_conf_ele *rss_filter_ptr; struct igb_flow_mem *igb_flow_mem_ptr; - enum rte_filter_type filter_type; + enum igb_filter_type filter_type; struct rte_flow *pmd_flow; TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { @@ -1845,7 +1848,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) filter_type = pmd_flow->filter_type; switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: + case IGB_FILTER_NTUPLE: ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) pmd_flow->rule; @@ -1853,7 +1856,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) ntuple_filter_ptr, entries); rte_free(ntuple_filter_ptr); break; - case RTE_ETH_FILTER_ETHERTYPE: + case IGB_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) pmd_flow->rule; @@ -1861,7 +1864,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) ethertype_filter_ptr, entries); rte_free(ethertype_filter_ptr); break; - case RTE_ETH_FILTER_SYN: + case IGB_FILTER_SYN: syn_filter_ptr = (struct igb_eth_syn_filter_ele *) pmd_flow->rule; @@ -1869,7 +1872,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) syn_filter_ptr, entries); rte_free(syn_filter_ptr); break; - case RTE_ETH_FILTER_FLEXIBLE: + case IGB_FILTER_FLEXIBLE: flex_filter_ptr = (struct igb_flex_filter_ele *) pmd_flow->rule; @@ -1877,7 +1880,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) flex_filter_ptr, entries); rte_free(flex_filter_ptr); break; - case RTE_ETH_FILTER_HASH: + case IGB_FILTER_HASH: rss_filter_ptr = (struct igb_rss_conf_ele *) pmd_flow->rule;