From patchwork Tue Sep 29 07:49:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79150 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 01982A04C0; Tue, 29 Sep 2020 10:10:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 60E6A1D91B; Tue, 29 Sep 2020 10:10:07 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id CC5401D91B for ; Tue, 29 Sep 2020 10:10:04 +0200 (CEST) IronPort-SDR: oyLQD9Fv5JE4C3qN2fWeiMAWIPbAMF2/cdcbeSRFe/5UYq9RYwcTXLTlmMey4qN/Rpj6pKzBqY 3LuaoAMV68Eg== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="223729324" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="223729324" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:10:02 -0700 IronPort-SDR: 3iacWM5SQUMY5XeyfXpowgnFxjIbF5HuGqSvv0E8awaMQCBSqum4eIuBMC1xJMSaKpj/ZWg4FV W99idZumiv5g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="415289582" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga001.fm.intel.com with ESMTP; 29 Sep 2020 01:09:58 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Jeff Guo , Beilei Xing Date: Tue, 29 Sep 2020 07:49:08 +0000 Message-Id: <20200929074908.39915-1-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [RFC 1/5] net/i40e: decouple dependency from superseded structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Junyu Jiang The legacy filter API will be removed, the associated rte_eth_ctrl.h will also be removed. This patch replaces these superseded structures by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte to change. Signed-off-by: Junyu Jiang --- drivers/net/i40e/i40e_ethdev.c | 312 ++++++++++++++-------------- drivers/net/i40e/i40e_ethdev.h | 357 +++++++++++++++++++++++++++++--- drivers/net/i40e/i40e_fdir.c | 8 +- drivers/net/i40e/i40e_flow.c | 111 +++++----- drivers/net/i40e/i40e_pf.c | 2 +- drivers/net/i40e/rte_pmd_i40e.c | 30 +-- 6 files changed, 559 insertions(+), 261 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 6439baf2f..8b95e70da 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -383,7 +383,7 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int i40e_ethertype_filter_convert( - const struct rte_eth_ethertype_filter *input, + const struct i40e_eth_ethertype_filter *input, struct i40e_ethertype_filter *filter); static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); @@ -2737,7 +2737,7 @@ i40e_dev_close(struct rte_eth_dev *dev) while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { TAILQ_REMOVE(&pf->flow_list, p_flow, node); /* Do not free FDIR flows since they are static allocated */ - if (p_flow->filter_type != RTE_ETH_FILTER_FDIR) + if (p_flow->filter_type != I40E_ETH_FILTER_FDIR) rte_free(p_flow); } @@ -4331,9 +4331,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev, rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; else - mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; + mac_filter.filter_type = I40E_MAC_PERFECT_MATCH; if (pool == 0) vsi = pf->main_vsi; @@ -5652,7 +5652,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) mac = &f->mac_info.mac_addr; rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); - f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; + f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); vsi->mac_num++; @@ -5660,7 +5660,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) } rte_memcpy(&filter.mac_addr, (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); } @@ -6126,7 +6126,7 @@ i40e_vsi_setup(struct i40e_pf *pf, /* MAC/VLAN configuration */ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); if (ret != I40E_SUCCESS) { @@ -6154,15 +6154,15 @@ i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) struct i40e_mac_filter *f; void *temp; struct i40e_mac_filter_info *mac_filter; - enum rte_mac_filter_type desired_filter; + enum i40e_mac_filter_type desired_filter; int ret = I40E_SUCCESS; if (on) { /* Filter to match MAC and VLAN */ - desired_filter = RTE_MACVLAN_PERFECT_MATCH; + desired_filter = I40E_MACVLAN_PERFECT_MATCH; } else { /* Filter to match only MAC */ - desired_filter = RTE_MAC_PERFECT_MATCH; + desired_filter = I40E_MAC_PERFECT_MATCH; } num = vsi->mac_num; @@ -7105,18 +7105,18 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH | I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: @@ -7180,18 +7180,18 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, rte_cpu_to_le_16(filter[num + i].vlan_id); switch (filter[num + i].filter_type) { - case RTE_MAC_PERFECT_MATCH: + case I40E_MAC_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_PERFECT_MATCH: + case I40E_MACVLAN_PERFECT_MATCH: flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; break; - case RTE_MAC_HASH_MATCH: + case I40E_MAC_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; break; - case RTE_MACVLAN_HASH_MATCH: + case I40E_MACVLAN_HASH_MATCH: flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: @@ -7536,8 +7536,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr); if (f != NULL) return I40E_SUCCESS; - if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { /** * If vlan_num is 0, that's the first time to add mac, @@ -7548,8 +7548,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) vsi->vlan_num = 1; } vlan_num = vsi->vlan_num; - } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) || - (mac_filter->filter_type == RTE_MAC_HASH_MATCH)) + } else if ((mac_filter->filter_type == I40E_MAC_PERFECT_MATCH) || + (mac_filter->filter_type == I40E_MAC_HASH_MATCH)) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7564,8 +7564,8 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ETH_ADDR_LEN); } - if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH || - mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) { + if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, &mac_filter->mac_addr); if (ret != I40E_SUCCESS) @@ -7602,7 +7602,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int i, vlan_num; - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; int ret = I40E_SUCCESS; /* Can't find it, return an error */ @@ -7612,14 +7612,14 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) vlan_num = vsi->vlan_num; filter_type = f->mac_info.filter_type; - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } - } else if (filter_type == RTE_MAC_PERFECT_MATCH || - filter_type == RTE_MAC_HASH_MATCH) + } else if (filter_type == I40E_MAC_PERFECT_MATCH || + filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -7633,8 +7633,8 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr); if (ret != I40E_SUCCESS) goto DONE; @@ -7862,25 +7862,25 @@ static int i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) { switch (filter_type) { - case RTE_TUNNEL_FILTER_IMAC_IVLAN: + case I40E_TUNNEL_FILTER_IMAC_IVLAN: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; break; - case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID: + case I40E_TUNNEL_FILTER_IMAC_IVLAN_TENID: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; break; - case RTE_TUNNEL_FILTER_IMAC_TENID: + case I40E_TUNNEL_FILTER_IMAC_TENID: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID; break; - case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC: + case I40E_TUNNEL_FILTER_OMAC_TENID_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC; break; - case ETH_TUNNEL_FILTER_IMAC: + case I40E_TUNNEL_FILTER_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; break; - case ETH_TUNNEL_FILTER_OIP: + case I40E_TUNNEL_FILTER_OIP: *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; break; - case ETH_TUNNEL_FILTER_IIP: + case I40E_TUNNEL_FILTER_IIP: *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; break; default: @@ -9316,7 +9316,7 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) */ static int i40e_get_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) + struct i40e_hash_global_conf *g_cfg) { struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; uint32_t reg; @@ -9362,7 +9362,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, static int i40e_hash_global_config_check(const struct i40e_adapter *adapter, - const struct rte_eth_hash_global_conf *g_cfg) + const struct i40e_hash_global_conf *g_cfg) { uint32_t i; uint64_t mask0, i40e_mask = adapter->flow_types_mask; @@ -9406,7 +9406,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter, */ static int i40e_set_hash_filter_global_config(struct i40e_hw *hw, - struct rte_eth_hash_global_conf *g_cfg) + struct i40e_hash_global_conf *g_cfg) { struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; @@ -9478,7 +9478,7 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, */ static uint64_t i40e_get_valid_input_set(enum i40e_filter_pctype pctype, - enum rte_filter_type filter) + enum i40e_eth_filter_type filter) { uint64_t valid; @@ -9726,7 +9726,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) return 0; - if (filter == RTE_ETH_FILTER_HASH) + if (filter == I40E_ETH_FILTER_HASH) valid = valid_hash_inset_table[pctype]; else valid = valid_fdir_inset_table[pctype]; @@ -9739,7 +9739,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, */ int i40e_validate_input_set(enum i40e_filter_pctype pctype, - enum rte_filter_type filter, uint64_t inset) + enum i40e_eth_filter_type filter, uint64_t inset) { uint64_t valid; @@ -9817,76 +9817,76 @@ i40e_get_default_input_set(uint16_t pctype) static int i40e_parse_input_set(uint64_t *inset, enum i40e_filter_pctype pctype, - enum rte_eth_input_set_field *field, + enum i40e_input_set_field *field, uint16_t size) { uint16_t i, j; int ret = -EINVAL; static const struct { - enum rte_eth_input_set_field field; + enum i40e_input_set_field field; uint64_t inset; } inset_convert_table[] = { - {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE}, - {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, - {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, - {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, - {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, - {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, - {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, - {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, - {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, - {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, - {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, - {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, - {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, - {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER, + {I40E_INPUT_SET_NONE, I40E_INSET_NONE}, + {I40E_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, + {I40E_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, + {I40E_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, + {I40E_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, + {I40E_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, + {I40E_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, + {I40E_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, + {I40E_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, + {I40E_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, + {I40E_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, + {I40E_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, + {I40E_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, + {I40E_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, + {I40E_INPUT_SET_L3_IP6_NEXT_HEADER, I40E_INSET_IPV6_NEXT_HDR}, - {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS, + {I40E_INPUT_SET_L3_IP6_HOP_LIMITS, I40E_INSET_IPV6_HOP_LIMIT}, - {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, - {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, - {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG, + {I40E_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, + {I40E_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, + {I40E_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, + {I40E_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, + {I40E_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, + {I40E_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, + {I40E_INPUT_SET_L4_SCTP_VERIFICATION_TAG, I40E_INSET_SCTP_VT}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, + {I40E_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, I40E_INSET_TUNNEL_DMAC}, - {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN, + {I40E_INPUT_SET_TUNNEL_L2_INNER_VLAN, I40E_INSET_VLAN_TUNNEL}, - {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY, + {I40E_INPUT_SET_TUNNEL_L4_UDP_KEY, I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, + {I40E_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, + {I40E_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, I40E_INSET_FLEX_PAYLOAD_W1}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, I40E_INSET_FLEX_PAYLOAD_W2}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, I40E_INSET_FLEX_PAYLOAD_W3}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, I40E_INSET_FLEX_PAYLOAD_W4}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, I40E_INSET_FLEX_PAYLOAD_W5}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, I40E_INSET_FLEX_PAYLOAD_W6}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, I40E_INSET_FLEX_PAYLOAD_W7}, - {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, + {I40E_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, I40E_INSET_FLEX_PAYLOAD_W8}, }; - if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX) + if (!inset || !field || size > I40E_INSET_SIZE_MAX) return ret; /* Only one item allowed for default or all */ if (size == 1) { - if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) { + if (field[0] == I40E_INPUT_SET_DEFAULT) { *inset = i40e_get_default_input_set(pctype); return 0; - } else if (field[0] == RTE_ETH_INPUT_SET_NONE) { + } else if (field[0] == I40E_INPUT_SET_NONE) { *inset = I40E_INSET_NONE; return 0; } @@ -10149,7 +10149,7 @@ i40e_filter_input_set_init(struct i40e_pf *pf) int i40e_hash_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf) + struct i40e_input_set_conf *conf) { struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; enum i40e_filter_pctype pctype; @@ -10161,8 +10161,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, PMD_DRV_LOG(ERR, "Invalid pointer"); return -EFAULT; } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { + if (conf->op != I40E_INPUT_SET_SELECT && + conf->op != I40E_INPUT_SET_ADD) { PMD_DRV_LOG(ERR, "Unsupported input set operation"); return -EINVAL; } @@ -10191,7 +10191,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return -EINVAL; } - if (conf->op == RTE_ETH_INPUT_SET_ADD) { + if (conf->op == I40E_INPUT_SET_ADD) { /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); inset_reg <<= I40E_32_BIT_WIDTH; @@ -10226,7 +10226,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, int i40e_fdir_filter_inset_select(struct i40e_pf *pf, - struct rte_eth_input_set_conf *conf) + struct i40e_input_set_conf *conf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_filter_pctype pctype; @@ -10238,8 +10238,8 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Invalid pointer"); return -EFAULT; } - if (conf->op != RTE_ETH_INPUT_SET_SELECT && - conf->op != RTE_ETH_INPUT_SET_ADD) { + if (conf->op != I40E_INPUT_SET_SELECT && + conf->op != I40E_INPUT_SET_ADD) { PMD_DRV_LOG(ERR, "Unsupported input set operation"); return -EINVAL; } @@ -10267,7 +10267,7 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, * it is done by writing I40E_PRTQF_FD_FLXINSET * in i40e_set_flex_mask_on_pctype. */ - if (conf->op == RTE_ETH_INPUT_SET_SELECT) + if (conf->op == I40E_INPUT_SET_SELECT) inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; else input_set |= pf->fdir.input_set[pctype]; @@ -10308,7 +10308,7 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, } static int -i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +i40e_hash_filter_get(struct i40e_hw *hw, struct i40e_hash_filter_info *info) { int ret = 0; @@ -10318,11 +10318,11 @@ i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) } switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + case I40E_HASH_FILTER_SYM_HASH_ENA_PER_PORT: i40e_get_symmetric_hash_enable_per_port(hw, &(info->info.enable)); break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + case I40E_HASH_FILTER_GLOBAL_CONFIG: ret = i40e_get_hash_filter_global_config(hw, &(info->info.global_conf)); break; @@ -10337,7 +10337,7 @@ i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) } static int -i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +i40e_hash_filter_set(struct i40e_hw *hw, struct i40e_hash_filter_info *info) { int ret = 0; @@ -10347,14 +10347,14 @@ i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) } switch (info->info_type) { - case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + case I40E_HASH_FILTER_SYM_HASH_ENA_PER_PORT: i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); break; - case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + case I40E_HASH_FILTER_GLOBAL_CONFIG: ret = i40e_set_hash_filter_global_config(hw, &(info->info.global_conf)); break; - case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + case I40E_HASH_FILTER_INPUT_SET_SELECT: ret = i40e_hash_filter_inset_select(hw, &(info->info.input_set_conf)); break; @@ -10383,11 +10383,11 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_GET: ret = i40e_hash_filter_get(hw, - (struct rte_eth_hash_filter_info *)arg); + (struct i40e_hash_filter_info *)arg); break; case RTE_ETH_FILTER_SET: ret = i40e_hash_filter_set(hw, - (struct rte_eth_hash_filter_info *)arg); + (struct i40e_hash_filter_info *)arg); break; default: PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", @@ -10401,7 +10401,7 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev, /* Convert ethertype filter structure */ static int -i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, +i40e_ethertype_filter_convert(const struct i40e_eth_ethertype_filter *input, struct i40e_ethertype_filter *filter) { rte_memcpy(&filter->input.mac_addr, &input->mac_addr, @@ -10482,7 +10482,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf, */ int i40e_ethertype_filter_set(struct i40e_pf *pf, - struct rte_eth_ethertype_filter *filter, + struct i40e_eth_ethertype_filter *filter, bool add) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); @@ -10523,9 +10523,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, return -EINVAL; } - if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + if (!(filter->flags & I40E_ETHTYPE_FLAGS_MAC)) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + if (filter->flags & I40E_ETHTYPE_FLAGS_DROP) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; @@ -10587,12 +10587,12 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, + (struct i40e_eth_ethertype_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = i40e_ethertype_filter_set(pf, - (struct rte_eth_ethertype_filter *)arg, + (struct i40e_eth_ethertype_filter *)arg, FALSE); break; default: @@ -12550,9 +12550,9 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf) TAILQ_FOREACH(f, ethertype_list, rules) { flags = 0; - if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC)) + if (!(f->flags & I40E_ETHTYPE_FLAGS_MAC)) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; - if (f->flags & RTE_ETHTYPE_FLAGS_DROP) + if (f->flags & I40E_ETHTYPE_FLAGS_DROP) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; @@ -13271,108 +13271,108 @@ static int i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - struct rte_eth_input_set_conf conf; + struct i40e_input_set_conf conf; uint64_t mask0; int ret = 0; uint32_t j; int i; static const struct { uint64_t type; - enum rte_eth_input_set_field field; + enum i40e_input_set_field field; } inset_match_table[] = { {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, + I40E_INPUT_SET_L3_SRC_IP4}, {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, + I40E_INPUT_SET_L3_DST_IP4}, {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, + I40E_INPUT_SET_L3_SRC_IP4}, {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, + I40E_INPUT_SET_L3_DST_IP4}, {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + I40E_INPUT_SET_L4_TCP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + I40E_INPUT_SET_L4_TCP_DST_PORT}, {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, + I40E_INPUT_SET_L3_SRC_IP4}, {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, + I40E_INPUT_SET_L3_DST_IP4}, {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + I40E_INPUT_SET_L4_UDP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + I40E_INPUT_SET_L4_UDP_DST_PORT}, {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, + I40E_INPUT_SET_L3_SRC_IP4}, {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, + I40E_INPUT_SET_L3_DST_IP4}, {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + I40E_INPUT_SET_L4_SCTP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + I40E_INPUT_SET_L4_SCTP_DST_PORT}, {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP4}, + I40E_INPUT_SET_L3_SRC_IP4}, {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP4}, + I40E_INPUT_SET_L3_DST_IP4}, {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, + I40E_INPUT_SET_L3_SRC_IP6}, {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, + I40E_INPUT_SET_L3_DST_IP6}, {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, + I40E_INPUT_SET_L3_SRC_IP6}, {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, + I40E_INPUT_SET_L3_DST_IP6}, {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + I40E_INPUT_SET_L4_TCP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + I40E_INPUT_SET_L4_TCP_DST_PORT}, {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, + I40E_INPUT_SET_L3_SRC_IP6}, {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, + I40E_INPUT_SET_L3_DST_IP6}, {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + I40E_INPUT_SET_L4_UDP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + I40E_INPUT_SET_L4_UDP_DST_PORT}, {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, + I40E_INPUT_SET_L3_SRC_IP6}, {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, + I40E_INPUT_SET_L3_DST_IP6}, {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + I40E_INPUT_SET_L4_SCTP_SRC_PORT}, {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + I40E_INPUT_SET_L4_SCTP_DST_PORT}, {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, - RTE_ETH_INPUT_SET_L3_SRC_IP6}, + I40E_INPUT_SET_L3_SRC_IP6}, {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, - RTE_ETH_INPUT_SET_L3_DST_IP6}, + I40E_INPUT_SET_L3_DST_IP6}, {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY, - RTE_ETH_INPUT_SET_UNKNOWN}, + I40E_INPUT_SET_UNKNOWN}, }; mask0 = types & pf->adapter->flow_types_mask; - conf.op = RTE_ETH_INPUT_SET_SELECT; + conf.op = I40E_INPUT_SET_SELECT; conf.inset_size = 0; for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) { if (mask0 & (1ULL << i)) { @@ -13385,7 +13385,7 @@ i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) if ((types & inset_match_table[j].type) == inset_match_table[j].type) { if (inset_match_table[j].field == - RTE_ETH_INPUT_SET_UNKNOWN) + I40E_INPUT_SET_UNKNOWN) return -EINVAL; conf.field[conf.inset_size] = @@ -13645,12 +13645,12 @@ i40e_rss_disable_hash(struct i40e_pf *pf, continue; /* Configure default input set */ - struct rte_eth_input_set_conf input_conf = { - .op = RTE_ETH_INPUT_SET_SELECT, + struct i40e_input_set_conf input_conf = { + .op = I40E_INPUT_SET_SELECT, .flow_type = i, .inset_size = 1, }; - input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT; + input_conf.field[0] = I40E_INPUT_SET_DEFAULT; i40e_hash_filter_inset_select(hw, &input_conf); } diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 1466998aa..65395ef5b 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -266,12 +266,311 @@ enum i40e_flxpld_layer_idx { #define I40E_FDIR_PRG_PKT_CNT 128 +/** + * filter types + */ +enum i40e_eth_filter_type { + I40E_ETH_FILTER_NONE = 0, + I40E_ETH_FILTER_ETHERTYPE, + I40E_ETH_FILTER_TUNNEL, + I40E_ETH_FILTER_FDIR, + I40E_ETH_FILTER_HASH, + I40E_ETH_FILTER_MAX +}; + +/** + * MAC filter type + */ +enum i40e_mac_filter_type { + I40E_MAC_PERFECT_MATCH = 1, /**< exact match of MAC addr. */ + I40E_MACVLAN_PERFECT_MATCH, /**< exact match of MAC addr and VLAN ID. */ + I40E_MAC_HASH_MATCH, /**< hash match of MAC addr. */ + /** hash match of MAC addr and exact match of VLAN ID. */ + I40E_MACVLAN_HASH_MATCH, +}; + +/** + * A structure used to define the ethertype filter entry. + */ +struct i40e_eth_ethertype_filter { + struct rte_ether_addr mac_addr; /**< Mac address to match. */ + uint16_t ether_type; /**< Ether type to match */ + uint16_t flags; /**< Flags from I40E_ETHTYPE_FLAGS_* */ + uint16_t queue; /**< Queue assigned to when match*/ +}; + +/** + * Define all structures for Ethertype Filter type. + */ + +#define I40E_ETHTYPE_FLAGS_MAC 0x0001 /**< If set, compare mac */ +#define I40E_ETHTYPE_FLAGS_DROP 0x0002 /**< If set, drop packet when match */ +#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t)) +#define I40E_FLOW_MASK_ARRAY_SIZE \ + (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT) + +/** + * filter type of tunneling packet + */ +#define I40E_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */ +#define I40E_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */ +#define I40E_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */ +#define I40E_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */ +#define I40E_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */ +#define I40E_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */ + +#define I40E_TUNNEL_FILTER_IMAC_IVLAN (I40E_TUNNEL_FILTER_IMAC | \ + I40E_TUNNEL_FILTER_IVLAN) +#define I40E_TUNNEL_FILTER_IMAC_IVLAN_TENID (I40E_TUNNEL_FILTER_IMAC | \ + I40E_TUNNEL_FILTER_IVLAN | \ + I40E_TUNNEL_FILTER_TENID) +#define I40E_TUNNEL_FILTER_IMAC_TENID (I40E_TUNNEL_FILTER_IMAC | \ + I40E_TUNNEL_FILTER_TENID) +#define I40E_TUNNEL_FILTER_OMAC_TENID_IMAC (I40E_TUNNEL_FILTER_OMAC | \ + I40E_TUNNEL_FILTER_TENID | \ + I40E_TUNNEL_FILTER_IMAC) + +#define I40E_INSET_SIZE_MAX 128 /**< Max length of input set. */ + +/** + * Input set fields for Flow Director and Hash filters + */ +enum i40e_input_set_field { + I40E_INPUT_SET_UNKNOWN = 0, + + /* L2 */ + I40E_INPUT_SET_L2_SRC_MAC = 1, + I40E_INPUT_SET_L2_DST_MAC, + I40E_INPUT_SET_L2_OUTER_VLAN, + I40E_INPUT_SET_L2_INNER_VLAN, + I40E_INPUT_SET_L2_ETHERTYPE, + + /* L3 */ + I40E_INPUT_SET_L3_SRC_IP4 = 129, + I40E_INPUT_SET_L3_DST_IP4, + I40E_INPUT_SET_L3_SRC_IP6, + I40E_INPUT_SET_L3_DST_IP6, + I40E_INPUT_SET_L3_IP4_TOS, + I40E_INPUT_SET_L3_IP4_PROTO, + I40E_INPUT_SET_L3_IP6_TC, + I40E_INPUT_SET_L3_IP6_NEXT_HEADER, + I40E_INPUT_SET_L3_IP4_TTL, + I40E_INPUT_SET_L3_IP6_HOP_LIMITS, + + /* L4 */ + I40E_INPUT_SET_L4_UDP_SRC_PORT = 257, + I40E_INPUT_SET_L4_UDP_DST_PORT, + I40E_INPUT_SET_L4_TCP_SRC_PORT, + I40E_INPUT_SET_L4_TCP_DST_PORT, + I40E_INPUT_SET_L4_SCTP_SRC_PORT, + I40E_INPUT_SET_L4_SCTP_DST_PORT, + I40E_INPUT_SET_L4_SCTP_VERIFICATION_TAG, + + /* Tunnel */ + I40E_INPUT_SET_TUNNEL_L2_INNER_DST_MAC = 385, + I40E_INPUT_SET_TUNNEL_L2_INNER_SRC_MAC, + I40E_INPUT_SET_TUNNEL_L2_INNER_VLAN, + I40E_INPUT_SET_TUNNEL_L4_UDP_KEY, + I40E_INPUT_SET_TUNNEL_GRE_KEY, + + /* Flexible Payload */ + I40E_INPUT_SET_FLEX_PAYLOAD_1ST_WORD = 641, + I40E_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, + I40E_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, + + I40E_INPUT_SET_DEFAULT = 65533, + I40E_INPUT_SET_NONE = 65534, + I40E_INPUT_SET_MAX = 65535, +}; + +/** + * Filters input set operations + */ +enum i40e_filter_input_set_op { + I40E_INPUT_SET_OP_UNKNOWN, + I40E_INPUT_SET_SELECT, /**< select input set */ + I40E_INPUT_SET_ADD, /**< add input set entry */ + I40E_INPUT_SET_OP_MAX +}; + +/** + * A structure used to define the input set configuration for + * flow director and hash filters + */ +struct i40e_input_set_conf { + uint16_t flow_type; + uint16_t inset_size; + enum i40e_input_set_field field[I40E_INSET_SIZE_MAX]; + enum i40e_filter_input_set_op op; +}; + +/** + * Flow Director filter information types. + */ +enum i40e_fdir_filter_info_type { + I40E_FDIR_FILTER_INFO_TYPE_UNKNOWN = 0, + /** Flow Director filter input set configuration */ + I40E_FDIR_FILTER_INPUT_SET_SELECT, + I40E_FDIR_FILTER_INFO_TYPE_MAX, +}; + +/** + * A structure used to set FDIR filter information, to support filter type + * of 'RTE_ETH_FILTER_FDIR' I40E_FDIR_FILTER_INPUT_SET_SELECT operation. + */ +struct i40e_fdir_filter_info { + enum i40e_fdir_filter_info_type info_type; /**< Information type */ + /** Details of fdir filter information */ + union { + /** Flow Director input set configuration per port */ + struct i40e_input_set_conf input_set_conf; + } info; +}; + +/** + * Hash filter information types. + * - I40E_HASH_FILTER_SYM_HASH_ENA_PER_PORT is for getting/setting the + * information/configuration of 'symmetric hash enable' per port. + * - I40E_HASH_FILTER_GLOBAL_CONFIG is for getting/setting the global + * configurations of hash filters. Those global configurations are valid + * for all ports of the same NIC. + * - I40E_HASH_FILTER_INPUT_SET_SELECT is for setting the global + * hash input set fields + */ +enum i40e_hash_filter_info_type { + I40E_HASH_FILTER_INFO_TYPE_UNKNOWN = 0, + /** Symmetric hash enable per port */ + I40E_HASH_FILTER_SYM_HASH_ENA_PER_PORT, + /** Configure globally for hash filter */ + I40E_HASH_FILTER_GLOBAL_CONFIG, + /** Global Hash filter input set configuration */ + I40E_HASH_FILTER_INPUT_SET_SELECT, + I40E_HASH_FILTER_INFO_TYPE_MAX, +}; + +/** + * A structure used to set or get global hash function configurations which + * include symmetric hash enable per flow type and hash function type. + * Each bit in sym_hash_enable_mask[] indicates if the symmetric hash of the + * corresponding flow type is enabled or not. + * Each bit in valid_bit_mask[] indicates if the corresponding bit in + * sym_hash_enable_mask[] is valid or not. For the configurations gotten, it + * also means if the flow type is supported by hardware or not. + */ +struct i40e_hash_global_conf { + enum rte_eth_hash_function hash_func; /**< Hash function type */ + /** Bit mask for symmetric hash enable per flow type */ + uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; + /** Bit mask indicates if the corresponding bit is valid */ + uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE]; +}; + +/** + * A structure used to set or get hash filter information, to support filter + * type of 'RTE_ETH_FILTER_HASH' and its operations. + */ +struct i40e_hash_filter_info { + enum i40e_hash_filter_info_type info_type; /**< Information type */ + /** Details of hash filter information */ + union { + /** For I40E_HASH_FILTER_SYM_HASH_ENA_PER_PORT */ + uint8_t enable; + /** Global configurations of hash filter */ + struct i40e_hash_global_conf global_conf; + /** Global configurations of hash filter input set */ + struct i40e_input_set_conf input_set_conf; + } info; +}; + +/** + * A structure used to define the input for IPV4 flow + */ +struct i40e_ipv4_flow { + uint32_t src_ip; /**< IPv4 source address in big endian. */ + uint32_t dst_ip; /**< IPv4 destination address in big endian. */ + uint8_t tos; /**< Type of service to match. */ + uint8_t ttl; /**< Time to live to match. */ + uint8_t proto; /**< Protocol, next header in big endian. */ +}; + +/** + * A structure used to define the input for IPV4 UDP flow + */ +struct i40e_udpv4_flow { + struct i40e_ipv4_flow ip; /**< IPv4 fields to match. */ + uint16_t src_port; /**< UDP source port in big endian. */ + uint16_t dst_port; /**< UDP destination port in big endian. */ +}; + +/** + * A structure used to define the input for IPV4 TCP flow + */ +struct i40e_tcpv4_flow { + struct i40e_ipv4_flow ip; /**< IPv4 fields to match. */ + uint16_t src_port; /**< TCP source port in big endian. */ + uint16_t dst_port; /**< TCP destination port in big endian. */ +}; + +/** + * A structure used to define the input for IPV4 SCTP flow + */ +struct i40e_sctpv4_flow { + struct i40e_ipv4_flow ip; /**< IPv4 fields to match. */ + uint16_t src_port; /**< SCTP source port in big endian. */ + uint16_t dst_port; /**< SCTP destination port in big endian. */ + uint32_t verify_tag; /**< Verify tag in big endian */ +}; + +/** + * A structure used to define the input for IPV6 flow + */ +struct i40e_ipv6_flow { + uint32_t src_ip[4]; /**< IPv6 source address in big endian. */ + uint32_t dst_ip[4]; /**< IPv6 destination address in big endian. */ + uint8_t tc; /**< Traffic class to match. */ + uint8_t proto; /**< Protocol, next header to match. */ + uint8_t hop_limits; /**< Hop limits to match. */ +}; + +/** + * A structure used to define the input for IPV6 UDP flow + */ +struct i40e_udpv6_flow { + struct i40e_ipv6_flow ip; /**< IPv6 fields to match. */ + uint16_t src_port; /**< UDP source port in big endian. */ + uint16_t dst_port; /**< UDP destination port in big endian. */ +}; + +/** + * A structure used to define the input for IPV6 TCP flow + */ +struct i40e_tcpv6_flow { + struct i40e_ipv6_flow ip; /**< IPv6 fields to match. */ + uint16_t src_port; /**< TCP source port to in big endian. */ + uint16_t dst_port; /**< TCP destination port in big endian. */ +}; + +/** + * A structure used to define the input for IPV6 SCTP flow + */ +struct i40e_sctpv6_flow { + struct i40e_ipv6_flow ip; /**< IPv6 fields to match. */ + uint16_t src_port; /**< SCTP source port in big endian. */ + uint16_t dst_port; /**< SCTP destination port in big endian. */ + uint32_t verify_tag; /**< Verify tag in big endian. */ +}; + /* * Struct to store flow created. */ struct rte_flow { TAILQ_ENTRY(rte_flow) node; - enum rte_filter_type filter_type; + enum i40e_eth_filter_type filter_type; void *rule; }; @@ -292,7 +591,7 @@ struct rte_pci_driver; * MAC filter structure */ struct i40e_mac_filter_info { - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; struct rte_ether_addr mac_addr; }; @@ -347,7 +646,7 @@ struct i40e_veb { /* i40e MACVLAN filter structure */ struct i40e_macvlan_filter { struct rte_ether_addr macaddr; - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; uint16_t vlan_id; }; @@ -502,7 +801,7 @@ struct i40e_vmdq_info { /* A structure used to define the input for GTP flow */ struct i40e_gtp_flow { - struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */ + struct i40e_udpv4_flow udp; /* IPv4 UDP fields to match. */ uint8_t msg_type; /* Message type. */ uint32_t teid; /* TEID in big endian. */ }; @@ -510,35 +809,35 @@ struct i40e_gtp_flow { /* A structure used to define the input for GTP IPV4 flow */ struct i40e_gtp_ipv4_flow { struct i40e_gtp_flow gtp; - struct rte_eth_ipv4_flow ip4; + struct i40e_ipv4_flow ip4; }; /* A structure used to define the input for GTP IPV6 flow */ struct i40e_gtp_ipv6_flow { struct i40e_gtp_flow gtp; - struct rte_eth_ipv6_flow ip6; + struct i40e_ipv6_flow ip6; }; /* A structure used to define the input for ESP IPV4 flow */ struct i40e_esp_ipv4_flow { - struct rte_eth_ipv4_flow ipv4; + struct i40e_ipv4_flow ipv4; uint32_t spi; /* SPI in big endian. */ }; /* A structure used to define the input for ESP IPV6 flow */ struct i40e_esp_ipv6_flow { - struct rte_eth_ipv6_flow ipv6; + struct i40e_ipv6_flow ipv6; uint32_t spi; /* SPI in big endian. */ }; /* A structure used to define the input for ESP IPV4 UDP flow */ struct i40e_esp_ipv4_udp_flow { - struct rte_eth_udpv4_flow udp; + struct i40e_udpv4_flow udp; uint32_t spi; /* SPI in big endian. */ }; /* A structure used to define the input for ESP IPV6 UDP flow */ struct i40e_esp_ipv6_udp_flow { - struct rte_eth_udpv6_flow udp; + struct i40e_udpv6_flow udp; uint32_t spi; /* SPI in big endian. */ }; @@ -551,13 +850,13 @@ struct i40e_raw_flow { /* A structure used to define the input for L2TPv3 over IPv4 flow */ struct i40e_ipv4_l2tpv3oip_flow { - struct rte_eth_ipv4_flow ip4; + struct i40e_ipv4_flow ip4; uint32_t session_id; /* Session ID in big endian. */ }; /* A structure used to define the input for L2TPv3 over IPv6 flow */ struct i40e_ipv6_l2tpv3oip_flow { - struct rte_eth_ipv6_flow ip6; + struct i40e_ipv6_flow ip6; uint32_t session_id; /* Session ID in big endian. */ }; @@ -574,14 +873,14 @@ struct i40e_l2_flow { */ union i40e_fdir_flow { struct i40e_l2_flow l2_flow; - struct rte_eth_udpv4_flow udp4_flow; - struct rte_eth_tcpv4_flow tcp4_flow; - struct rte_eth_sctpv4_flow sctp4_flow; - struct rte_eth_ipv4_flow ip4_flow; - struct rte_eth_udpv6_flow udp6_flow; - struct rte_eth_tcpv6_flow tcp6_flow; - struct rte_eth_sctpv6_flow sctp6_flow; - struct rte_eth_ipv6_flow ipv6_flow; + struct i40e_udpv4_flow udp4_flow; + struct i40e_tcpv4_flow tcp4_flow; + struct i40e_sctpv4_flow sctp4_flow; + struct i40e_ipv4_flow ip4_flow; + struct i40e_udpv6_flow udp6_flow; + struct i40e_tcpv6_flow tcp6_flow; + struct i40e_sctpv6_flow sctp6_flow; + struct i40e_ipv6_flow ipv6_flow; struct i40e_gtp_flow gtp_flow; struct i40e_gtp_ipv4_flow gtp_ipv4_flow; struct i40e_gtp_ipv6_flow gtp_ipv6_flow; @@ -602,7 +901,7 @@ enum i40e_fdir_ip_type { /* A structure used to contain extend input of flow */ struct i40e_fdir_flow_ext { uint16_t vlan_tci; - uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN]; + uint8_t flexbytes[I40E_FDIR_MAX_FLEXLEN]; /* It is filled by the flexible payload to match. */ uint8_t is_vf; /* 1 for VF, 0 for port dev */ uint16_t dst_id; /* VF ID, available when is_vf is 1*/ @@ -922,15 +1221,15 @@ struct i40e_tunnel_filter_conf { uint32_t outer_vlan; /**< Outer VLAN to match */ enum i40e_tunnel_iptype ip_type; /**< IP address type. */ /** - * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP + * Outer destination IP address to match if I40E_TUNNEL_FILTER_OIP * is set in filter_type, or inner destination IP address to match - * if ETH_TUNNEL_FILTER_IIP is set in filter_type. + * if I40E_TUNNEL_FILTER_IIP is set in filter_type. */ union { uint32_t ipv4_addr; /**< IPv4 address in big endian. */ uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */ } ip_addr; - /** Flags from ETH_TUNNEL_FILTER_XX - see above. */ + /** Flags from I40E_TUNNEL_FILTER_XX - see above. */ uint16_t filter_type; enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */ enum i40e_l4_port_type l4_port_type; /**< L4 Port Type. */ @@ -1294,7 +1593,7 @@ struct i40e_vf_representor { extern const struct rte_flow_ops i40e_flow_ops; union i40e_filter_t { - struct rte_eth_ethertype_filter ethertype_filter; + struct i40e_eth_ethertype_filter ethertype_filter; struct i40e_fdir_filter_conf fdir_filter; struct rte_eth_tunnel_filter_conf tunnel_filter; struct i40e_tunnel_filter_conf consistent_tunnel_filter; @@ -1365,9 +1664,9 @@ int i40e_select_filter_input_set(struct i40e_hw *hw, enum rte_filter_type filter); void i40e_fdir_filter_restore(struct i40e_pf *pf); int i40e_hash_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf); + struct i40e_input_set_conf *conf); int i40e_fdir_filter_inset_select(struct i40e_pf *pf, - struct rte_eth_input_set_conf *conf); + struct i40e_input_set_conf *conf); int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode, uint32_t retval, uint8_t *msg, uint16_t msglen); @@ -1393,7 +1692,7 @@ int i40e_sw_tunnel_filter_del(struct i40e_pf *pf, struct i40e_tunnel_filter_input *input); uint64_t i40e_get_default_input_set(uint16_t pctype); int i40e_ethertype_filter_set(struct i40e_pf *pf, - struct rte_eth_ethertype_filter *filter, + struct i40e_eth_ethertype_filter *filter, bool add); int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *filter, @@ -1427,7 +1726,7 @@ bool is_i40e_supported(struct rte_eth_dev *dev); bool is_i40evf_supported(struct rte_eth_dev *dev); int i40e_validate_input_set(enum i40e_filter_pctype pctype, - enum rte_filter_type filter, uint64_t inset); + enum i40e_eth_filter_type filter, uint64_t inset); int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem); uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input); diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index aa8e72949..8bcb8699c 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -2360,7 +2360,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) (uint32_t)hw->func_caps.fd_filters_best_effort; fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; - for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + for (i = 1; i < I40E_FLOW_MASK_ARRAY_SIZE; i++) fdir->flow_types_mask[i] = 0ULL; fdir->flex_payload_unit = sizeof(uint16_t); fdir->flex_bitmask_unit = sizeof(uint16_t); @@ -2403,7 +2403,7 @@ i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat) static int i40e_fdir_filter_set(struct rte_eth_dev *dev, - struct rte_eth_fdir_filter_info *info) + struct i40e_fdir_filter_info *info) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); int ret = 0; @@ -2414,7 +2414,7 @@ i40e_fdir_filter_set(struct rte_eth_dev *dev, } switch (info->info_type) { - case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT: + case I40E_FDIR_FILTER_INPUT_SET_SELECT: ret = i40e_fdir_filter_inset_select(pf, &(info->info.input_set_conf)); break; @@ -2469,7 +2469,7 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_SET: ret = i40e_fdir_filter_set(dev, - (struct rte_eth_fdir_filter_info *)arg); + (struct i40e_fdir_filter_info *)arg); break; case RTE_ETH_FILTER_STATS: i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index adc5da1c5..08423f00b 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -52,11 +52,11 @@ static int i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, struct rte_flow_error *error, - struct rte_eth_ethertype_filter *filter); + struct i40e_eth_ethertype_filter *filter); static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_ethertype_filter *filter); + struct i40e_eth_ethertype_filter *filter); static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, @@ -144,7 +144,7 @@ const struct rte_flow_ops i40e_flow_ops = { }; static union i40e_filter_t cons_filter; -static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; +static enum i40e_eth_filter_type cons_filter_type = I40E_ETH_FILTER_NONE; /* internal pattern w/o VOID items */ struct rte_flow_item g_items[32]; @@ -2041,7 +2041,7 @@ static int i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, struct rte_flow_error *error, - struct rte_eth_ethertype_filter *filter) + struct i40e_eth_ethertype_filter *filter) { const struct rte_flow_item *item = pattern; const struct rte_flow_item_eth *eth_spec; @@ -2093,13 +2093,13 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, } /* If mask bits of destination MAC address - * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + * are full of 1, set I40E_ETHTYPE_FLAGS_MAC. */ if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; - filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + filter->flags |= I40E_ETHTYPE_FLAGS_MAC; } else { - filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + filter->flags &= ~I40E_ETHTYPE_FLAGS_MAC; } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); @@ -2128,7 +2128,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_ethertype_filter *filter) + struct i40e_eth_ethertype_filter *filter) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); const struct rte_flow_action *act; @@ -2155,7 +2155,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, return -rte_errno; } } else { - filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + filter->flags |= I40E_ETHTYPE_FLAGS_DROP; } /* Check if the next non-void item is END */ @@ -2178,7 +2178,7 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev, struct rte_flow_error *error, union i40e_filter_t *filter) { - struct rte_eth_ethertype_filter *ethertype_filter = + struct i40e_eth_ethertype_filter *ethertype_filter = &filter->ethertype_filter; int ret; @@ -2196,7 +2196,7 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_ETHERTYPE; + cons_filter_type = I40E_ETH_FILTER_ETHERTYPE; return ret; } @@ -2397,7 +2397,7 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf, int i, num; /* Check if the input set is valid */ - if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, + if (i40e_validate_input_set(pctype, I40E_ETH_FILTER_FDIR, input_set) != 0) { PMD_DRV_LOG(ERR, "Invalid input set"); return -EINVAL; @@ -3458,7 +3458,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_FDIR; + cons_filter_type = I40E_ETH_FILTER_FDIR; if (pf->fdir.fdir_vsi == NULL) { /* Enable fdir when fdir flow is added at first time. */ @@ -3769,19 +3769,19 @@ i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } static uint16_t i40e_supported_tunnel_filter_types[] = { - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID | - ETH_TUNNEL_FILTER_IVLAN, - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, - ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID, - ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID | - ETH_TUNNEL_FILTER_IMAC, - ETH_TUNNEL_FILTER_IMAC, + I40E_TUNNEL_FILTER_IMAC | I40E_TUNNEL_FILTER_TENID | + I40E_TUNNEL_FILTER_IVLAN, + I40E_TUNNEL_FILTER_IMAC | I40E_TUNNEL_FILTER_IVLAN, + I40E_TUNNEL_FILTER_IMAC | I40E_TUNNEL_FILTER_TENID, + I40E_TUNNEL_FILTER_OMAC | I40E_TUNNEL_FILTER_TENID | + I40E_TUNNEL_FILTER_IMAC, + I40E_TUNNEL_FILTER_IMAC, }; static int @@ -3871,12 +3871,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, rte_memcpy(&filter->outer_mac, ð_spec->dst, RTE_ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_OMAC; + filter_type |= I40E_TUNNEL_FILTER_OMAC; } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, RTE_ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_IMAC; + filter_type |= I40E_TUNNEL_FILTER_IMAC; } } break; @@ -3898,7 +3898,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & I40E_TCI_MASK; - filter_type |= ETH_TUNNEL_FILTER_IVLAN; + filter_type |= I40E_TUNNEL_FILTER_IVLAN; } break; case RTE_FLOW_ITEM_TYPE_IPV4: @@ -3972,7 +3972,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, vxlan_spec->vni, 3); filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); - filter_type |= ETH_TUNNEL_FILTER_TENID; + filter_type |= I40E_TUNNEL_FILTER_TENID; } vxlan_flag = 1; @@ -4022,7 +4022,7 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } @@ -4101,12 +4101,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, rte_memcpy(&filter->outer_mac, ð_spec->dst, RTE_ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_OMAC; + filter_type |= I40E_TUNNEL_FILTER_OMAC; } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, RTE_ETHER_ADDR_LEN); - filter_type |= ETH_TUNNEL_FILTER_IMAC; + filter_type |= I40E_TUNNEL_FILTER_IMAC; } } @@ -4129,7 +4129,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & I40E_TCI_MASK; - filter_type |= ETH_TUNNEL_FILTER_IVLAN; + filter_type |= I40E_TUNNEL_FILTER_IVLAN; } break; case RTE_FLOW_ITEM_TYPE_IPV4: @@ -4224,7 +4224,7 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, nvgre_spec->tni, 3); filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); - filter_type |= ETH_TUNNEL_FILTER_TENID; + filter_type |= I40E_TUNNEL_FILTER_TENID; } nvgre_flag = 1; @@ -4274,7 +4274,7 @@ i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } @@ -4431,7 +4431,7 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } @@ -4571,7 +4571,7 @@ i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } @@ -4694,7 +4694,7 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_TUNNEL; + cons_filter_type = I40E_ETH_FILTER_TUNNEL; return ret; } @@ -5195,7 +5195,7 @@ i40e_parse_rss_filter(struct rte_eth_dev *dev, if (ret) return ret; - cons_filter_type = RTE_ETH_FILTER_HASH; + cons_filter_type = I40E_ETH_FILTER_HASH; return 0; } @@ -5371,7 +5371,7 @@ i40e_flow_create(struct rte_eth_dev *dev, if (ret < 0) return NULL; - if (cons_filter_type == RTE_ETH_FILTER_FDIR) { + if (cons_filter_type == I40E_ETH_FILTER_FDIR) { flow = i40e_fdir_entry_pool_get(fdir_info); if (flow == NULL) { rte_flow_error_set(error, ENOBUFS, @@ -5391,7 +5391,7 @@ i40e_flow_create(struct rte_eth_dev *dev, } switch (cons_filter_type) { - case RTE_ETH_FILTER_ETHERTYPE: + case I40E_ETH_FILTER_ETHERTYPE: ret = i40e_ethertype_filter_set(pf, &cons_filter.ethertype_filter, 1); if (ret) @@ -5399,7 +5399,7 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list, i40e_ethertype_filter_list); break; - case RTE_ETH_FILTER_FDIR: + case I40E_ETH_FILTER_FDIR: ret = i40e_flow_add_del_fdir_filter(dev, &cons_filter.fdir_filter, 1); if (ret) @@ -5407,7 +5407,7 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->fdir.fdir_list, i40e_fdir_filter_list); break; - case RTE_ETH_FILTER_TUNNEL: + case I40E_ETH_FILTER_TUNNEL: ret = i40e_dev_consistent_tunnel_filter_set(pf, &cons_filter.consistent_tunnel_filter, 1); if (ret) @@ -5415,7 +5415,7 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list, i40e_tunnel_filter_list); break; - case RTE_ETH_FILTER_HASH: + case I40E_ETH_FILTER_HASH: ret = i40e_config_rss_filter_set(dev, &cons_filter.rss_conf); if (ret) @@ -5436,7 +5436,7 @@ i40e_flow_create(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow."); - if (cons_filter_type != RTE_ETH_FILTER_FDIR) + if (cons_filter_type != I40E_ETH_FILTER_FDIR) rte_free(flow); else i40e_fdir_entry_pool_put(fdir_info, flow); @@ -5450,20 +5450,20 @@ i40e_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - enum rte_filter_type filter_type = flow->filter_type; + enum i40e_eth_filter_type filter_type = flow->filter_type; struct i40e_fdir_info *fdir_info = &pf->fdir; int ret = 0; switch (filter_type) { - case RTE_ETH_FILTER_ETHERTYPE: + case I40E_ETH_FILTER_ETHERTYPE: ret = i40e_flow_destroy_ethertype_filter(pf, (struct i40e_ethertype_filter *)flow->rule); break; - case RTE_ETH_FILTER_TUNNEL: + case I40E_ETH_FILTER_TUNNEL: ret = i40e_flow_destroy_tunnel_filter(pf, (struct i40e_tunnel_filter *)flow->rule); break; - case RTE_ETH_FILTER_FDIR: + case I40E_ETH_FILTER_FDIR: ret = i40e_flow_add_del_fdir_filter(dev, &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); @@ -5473,7 +5473,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev, i40e_fdir_rx_proc_enable(dev, 0); } break; - case RTE_ETH_FILTER_HASH: + case I40E_ETH_FILTER_HASH: ret = i40e_config_rss_filter_del(dev, &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); break; @@ -5486,7 +5486,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev, if (!ret) { TAILQ_REMOVE(&pf->flow_list, flow, node); - if (filter_type == RTE_ETH_FILTER_FDIR) + if (filter_type == I40E_ETH_FILTER_FDIR) i40e_fdir_entry_pool_put(fdir_info, flow); else rte_free(flow); @@ -5510,9 +5510,9 @@ i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf, uint16_t flags = 0; int ret = 0; - if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + if (!(filter->flags & I40E_ETHTYPE_FLAGS_MAC)) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + if (filter->flags & I40E_ETHTYPE_FLAGS_DROP) flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; @@ -5658,9 +5658,8 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) /* Delete FDIR flows in flow list. */ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { - if (flow->filter_type == RTE_ETH_FILTER_FDIR) { + if (flow->filter_type == I40E_ETH_FILTER_FDIR) TAILQ_REMOVE(&pf->flow_list, flow, node); - } } /* reset bitmap */ @@ -5710,7 +5709,7 @@ i40e_flow_flush_ethertype_filter(struct i40e_pf *pf) /* Delete ethertype flows in flow list. */ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { - if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) { + if (flow->filter_type == I40E_ETH_FILTER_ETHERTYPE) { TAILQ_REMOVE(&pf->flow_list, flow, node); rte_free(flow); } @@ -5738,7 +5737,7 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf) /* Delete tunnel flows in flow list. */ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { - if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) { + if (flow->filter_type == I40E_ETH_FILTER_TUNNEL) { TAILQ_REMOVE(&pf->flow_list, flow, node); rte_free(flow); } @@ -5761,7 +5760,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) /* Delete RSS flows in flow list. */ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { - if (flow->filter_type != RTE_ETH_FILTER_HASH) + if (flow->filter_type != I40E_ETH_FILTER_HASH) continue; if (flow->rule) { @@ -5784,7 +5783,7 @@ i40e_flow_query(struct rte_eth_dev *dev __rte_unused, void *data, struct rte_flow_error *error) { struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule; - enum rte_filter_type filter_type = flow->filter_type; + enum i40e_eth_filter_type filter_type = flow->filter_type; struct rte_flow_action_rss *rss_conf = data; if (!rss_rule) { @@ -5799,7 +5798,7 @@ i40e_flow_query(struct rte_eth_dev *dev __rte_unused, case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_RSS: - if (filter_type != RTE_ETH_FILTER_HASH) { + if (filter_type != I40E_ETH_FILTER_HASH) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 03c2070c3..65d649b62 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -844,7 +844,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, for (i = 0; i < addr_list->num_elements; i++) { mac = (struct rte_ether_addr *)(addr_list->list[i].addr); rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; if (rte_is_zero_ether_addr(mac) || i40e_vsi_add_mac(vf->vsi, &filter)) { ret = I40E_ERR_INVALID_MAC_ADDR; diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index 17938e7d3..790d04200 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -211,7 +211,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int i, vlan_num; - enum rte_mac_filter_type filter_type; + enum i40e_mac_filter_type filter_type; int ret = I40E_SUCCESS; void *temp; @@ -219,14 +219,14 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { vlan_num = vsi->vlan_num; filter_type = f->mac_info.filter_type; - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } - } else if (filter_type == RTE_MAC_PERFECT_MATCH || - filter_type == RTE_MAC_HASH_MATCH) + } else if (filter_type == I40E_MAC_PERFECT_MATCH || + filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -241,8 +241,8 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) &f->mac_info.mac_addr, ETH_ADDR_LEN); } - if (filter_type == RTE_MACVLAN_PERFECT_MATCH || - filter_type == RTE_MACVLAN_HASH_MATCH) { + if (filter_type == I40E_MACVLAN_PERFECT_MATCH || + filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, &f->mac_info.mac_addr); if (ret != I40E_SUCCESS) { @@ -275,8 +275,8 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) /* restore all the MACs */ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { - if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) || - (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) { + if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH || + f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) { /** * If vlan_num is 0, that's the first time to add mac, * set mask for vlan_id 0. @@ -286,8 +286,8 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) vsi->vlan_num = 1; } vlan_num = vsi->vlan_num; - } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) || - (f->mac_info.filter_type == RTE_MAC_HASH_MATCH)) + } else if (f->mac_info.filter_type == I40E_MAC_PERFECT_MATCH || + f->mac_info.filter_type == I40E_MAC_HASH_MATCH) vlan_num = 1; mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); @@ -303,8 +303,8 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) ETH_ADDR_LEN); } - if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH || - f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) { + if (f->mac_info.filter_type == I40E_MACVLAN_PERFECT_MATCH || + f->mac_info.filter_type == I40E_MACVLAN_HASH_MATCH) { ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, &f->mac_info.mac_addr); if (ret != I40E_SUCCESS) { @@ -768,7 +768,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, if (on) { rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); } else { ret = i40e_vsi_delete_mac(vsi, &broadcast); @@ -2388,7 +2388,7 @@ rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, return -EINVAL; } - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; rte_ether_addr_copy(mac_addr, &mac_filter.mac_addr); ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { From patchwork Tue Sep 29 07:48:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79147 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EED66A04C0; Tue, 29 Sep 2020 10:09:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 33FE51D72D; Tue, 29 Sep 2020 10:09:40 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 39A301D5C9 for ; Tue, 29 Sep 2020 10:09:37 +0200 (CEST) IronPort-SDR: C2aMZmjmAZ+lPwUWfmOUdrmSc71mWf/VYwUVRuKw0cju7SPugK3ML1R19+y8fVre4N6gg7tPu4 +a0YJkHSh4gA== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="163018494" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="163018494" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:09:34 -0700 IronPort-SDR: ZfX/cxnBAuWKqKFSH94ou1oEDFscPm4HY1fnaHbowqL3VFCw18Fx5XWqkpoaKd/CfO8ls9Zjsw OFrTph2N5/bw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="415289168" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga001.fm.intel.com with ESMTP; 29 Sep 2020 01:09:31 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Jeff Guo , Haiyue Wang Date: Tue, 29 Sep 2020 07:48:33 +0000 Message-Id: <20200929074835.39854-1-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [RFC 2/5] net/igc: decouple dependency from superseded structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Junyu Jiang The legacy filter API will be removed, the associated rte_eth_ctrl.h will also be removed. This patch replaces these superseded structures by the PMD internal structures. Signed-off-by: Junyu Jiang --- drivers/net/igc/igc_filter.c | 2 +- drivers/net/igc/igc_filter.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/igc/igc_filter.c b/drivers/net/igc/igc_filter.c index 836621d4c..7b6f52a4c 100644 --- a/drivers/net/igc/igc_filter.c +++ b/drivers/net/igc/igc_filter.c @@ -216,7 +216,7 @@ igc_enable_tuple_filter(struct rte_eth_dev *dev, ttqf &= ~IGC_TTQF_MASK_ENABLE; /* TCP flags bits setting. */ - if (info->tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (info->tcp_flags & IGC_NTUPLE_TCP_FLAGS_MASK) { if (info->tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= IGC_IMIREXT_CTRL_URG; if (info->tcp_flags & RTE_TCP_ACK_FLAG) diff --git a/drivers/net/igc/igc_filter.h b/drivers/net/igc/igc_filter.h index 79951504f..34bc0a7e3 100644 --- a/drivers/net/igc/igc_filter.h +++ b/drivers/net/igc/igc_filter.h @@ -16,6 +16,8 @@ extern "C" { #endif +#define IGC_NTUPLE_TCP_FLAGS_MASK 0x3F /**< TCP flags filter can match. */ + int igc_add_ethertype_filter(struct rte_eth_dev *dev, const struct igc_ethertype_filter *filter); int igc_del_ethertype_filter(struct rte_eth_dev *dev, From patchwork Tue Sep 29 07:48:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79148 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F3DAA04C0; Tue, 29 Sep 2020 10:10:00 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 312FA1D8E3; Tue, 29 Sep 2020 10:09:44 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C8B421D5C9 for ; Tue, 29 Sep 2020 10:09:39 +0200 (CEST) IronPort-SDR: RYx9aaSWHY05ANnp5tAzqqdCB/8AUEGFDgZjw+UZyN0HmVl6LlNh33MvmBt7eLYl/9ve2ASIlm oLIKHpm1tnEA== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="163018499" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="163018499" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:09:36 -0700 IronPort-SDR: LdzalDWU2ffNbsjd9PaIy9BRIFJ+IK88MoPuFuVgmnH2mkr7Cj1Mqq4oazMnuvMEfYeeVbKdTz M+6MxvPKQE+w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="415289203" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga001.fm.intel.com with ESMTP; 29 Sep 2020 01:09:33 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Jeff Guo , Haiyue Wang , Chenxu Di Date: Tue, 29 Sep 2020 07:48:34 +0000 Message-Id: <20200929074835.39854-2-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200929074835.39854-1-chenxux.di@intel.com> References: <20200929074835.39854-1-chenxux.di@intel.com> Subject: [dpdk-dev] [RFC 3/5] net/e1000: decouple dependency from superseded structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The legacy filter API will be removed, the associated rte_eth_ctrl.h will also be removed. This patch replaces these superseded structures by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte to change. Signed-off-by: Chenxu Di --- drivers/net/e1000/e1000_ethdev.h | 113 ++++++++++++++++-- drivers/net/e1000/igb_ethdev.c | 80 ++++++------- drivers/net/e1000/igb_flow.c | 199 ++++++++++++++++--------------- 3 files changed, 245 insertions(+), 147 deletions(-) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 1e41ae9de..3c30b9ebe 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -237,6 +237,88 @@ struct e1000_2tuple_filter { uint16_t queue; /* rx queue assigned to */ }; +/* Define all structures for ntuple Filter type. */ + +#define IGB_NTUPLE_FLAGS_DST_IP 0x0001 /* If set, dst_ip is part of ntuple */ +#define IGB_NTUPLE_FLAGS_SRC_IP 0x0002 /* If set, src_ip is part of ntuple */ +#define IGB_NTUPLE_FLAGS_DST_PORT 0x0004 /* If set, dstport is part of ntuple */ +#define IGB_NTUPLE_FLAGS_SRC_PORT 0x0008 /* If set, srcport is part of ntuple */ +#define IGB_NTUPLE_FLAGS_PROTO 0x0010 /* If set, proto is part of ntuple */ +#define IGB_NTUPLE_FLAGS_TCP_FLAG 0x0020 /* If set, tcp flag is involved */ + +#define IGB_5TUPLE_FLAGS ( \ + IGB_NTUPLE_FLAGS_DST_IP | \ + IGB_NTUPLE_FLAGS_SRC_IP | \ + IGB_NTUPLE_FLAGS_DST_PORT | \ + IGB_NTUPLE_FLAGS_SRC_PORT | \ + IGB_NTUPLE_FLAGS_PROTO) + +#define IGB_2TUPLE_FLAGS ( \ + IGB_NTUPLE_FLAGS_DST_PORT | \ + IGB_NTUPLE_FLAGS_PROTO) + +#define IGB_NTUPLE_TCP_FLAGS_MASK 0x3F /* TCP flags filter can match. */ + +struct igb_flow_ntuple_filter { + uint16_t flags; /* Flags from IGB_NTUPLE_FLAGS_* */ + uint32_t dst_ip; /* Destination IP address in big endian. */ + uint32_t dst_ip_mask; /* Mask of destination IP address. */ + uint32_t src_ip; /* Source IP address in big endian. */ + uint32_t src_ip_mask; /* Mask of destination IP address. */ + uint16_t dst_port; /* Destination port in big endian. */ + uint16_t dst_port_mask; /* Mask of destination port. */ + uint16_t src_port; /* Source Port in big endian. */ + uint16_t src_port_mask; /* Mask of source port. */ + uint8_t proto; /* L4 protocol. */ + uint8_t proto_mask; /* Mask of L4 protocol. */ + /* tcp_flags only meaningful when the proto is TCP. + * The packet matched above ntuple fields and contain + * any set bit in tcp_flags will hit this filter. + */ + uint8_t tcp_flags; + /* seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint16_t priority; + uint16_t queue; /* Queue assigned to when match*/ +}; + +/* bytes to use in flex filter. */ +#define IGB_FLEX_FILTER_MAXLEN 128 +/* mask bytes in flex filter. */ +#define IGB_FLEX_FILTER_MASK_SIZE \ + (RTE_ALIGN(IGB_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT) + +struct igb_flow_flex_filter { + uint16_t len; + uint8_t bytes[IGB_FLEX_FILTER_MAXLEN]; /* flex bytes in big endian.*/ + /* if mask bit is 1b, do not compare corresponding byte. */ + uint8_t mask[IGB_FLEX_FILTER_MASK_SIZE]; + uint8_t priority; + uint16_t queue; /* Queue assigned to when match. */ +}; + +struct igb_flow_syn_filter { + /* 1 - higher priority than other filters, 0 - lower priority. */ + uint8_t hig_pri; + /* Queue assigned to when match */ + uint16_t queue; +}; + +/** + * Define all structures for Ethertype Filter type. + */ + +#define IGB_ETHTYPE_FLAGS_MAC 0x0001 /* If set, compare mac */ +#define IGB_ETHTYPE_FLAGS_DROP 0x0002 /* If set, drop packet when match */ + +struct igb_flow_ethertype_filter { + struct rte_ether_addr mac_addr; /* Mac address to match. */ + uint16_t ether_type; /* Ether type to match */ + uint16_t flags; /* Flags from IGB_ETHTYPE_FLAGS_* */ + uint16_t queue; /* Queue assigned to when match */ +}; + /* ethertype filter structure */ struct igb_ethertype_filter { uint16_t ethertype; @@ -308,33 +390,46 @@ struct e1000_adapter { #define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ (&((struct e1000_adapter *)adapter)->filter) +/** + * Feature filter types + */ +enum igb_filter_type { + IGB_FILTER_NONE = 0, + IGB_FILTER_ETHERTYPE, + IGB_FILTER_FLEXIBLE, + IGB_FILTER_SYN, + IGB_FILTER_NTUPLE, + IGB_FILTER_HASH, + IGB_FILTER_MAX +}; + struct rte_flow { - enum rte_filter_type filter_type; + enum igb_filter_type filter_type; void *rule; }; /* ntuple filter list structure */ struct igb_ntuple_filter_ele { TAILQ_ENTRY(igb_ntuple_filter_ele) entries; - struct rte_eth_ntuple_filter filter_info; + struct igb_flow_ntuple_filter filter_info; }; /* ethertype filter list structure */ struct igb_ethertype_filter_ele { TAILQ_ENTRY(igb_ethertype_filter_ele) entries; - struct rte_eth_ethertype_filter filter_info; + struct igb_flow_ethertype_filter filter_info; }; /* syn filter list structure */ struct igb_eth_syn_filter_ele { TAILQ_ENTRY(igb_eth_syn_filter_ele) entries; - struct rte_eth_syn_filter filter_info; + struct igb_flow_syn_filter filter_info; }; /* flex filter list structure */ struct igb_flex_filter_ele { TAILQ_ENTRY(igb_flex_filter_ele) entries; - struct rte_eth_flex_filter filter_info; + struct igb_flow_flex_filter filter_info; }; /* rss filter list structure */ @@ -507,15 +602,15 @@ void igb_remove_flex_filter(struct rte_eth_dev *dev, int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, uint8_t idx); int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, bool add); + struct igb_flow_ntuple_filter *ntuple_filter, bool add); int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, bool add); int eth_igb_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, bool add); int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, bool add); int igb_rss_conf_init(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *out, diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 5ab74840a..588fdea11 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -192,20 +192,20 @@ static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int igb_add_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter); + struct igb_flow_flex_filter *filter); static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter); + struct igb_flow_ntuple_filter *ntuple_filter); static int igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter); + struct igb_flow_ntuple_filter *filter); static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -3637,7 +3637,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, int eth_igb_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3717,12 +3717,12 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct igb_flow_syn_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = eth_igb_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct igb_flow_syn_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: @@ -3740,14 +3740,14 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ static inline int -ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, +ntuple_filter_to_2tuple(struct igb_flow_ntuple_filter *filter, struct e1000_2tuple_filter_info *filter_info) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_port_mask) { @@ -3777,7 +3777,7 @@ ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, } filter_info->priority = (uint8_t)filter->priority; - if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG) filter_info->tcp_flags = filter->tcp_flags; else filter_info->tcp_flags = 0; @@ -3827,7 +3827,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev, ttqf &= ~E1000_TTQF_MASK_ENABLE; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) { if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) @@ -3861,7 +3861,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev, */ static int igb_add_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -3942,7 +3942,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev, */ static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4056,7 +4056,7 @@ igb_remove_flex_filter(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, bool add) { struct e1000_filter_info *filter_info = @@ -4130,7 +4130,7 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - struct rte_eth_flex_filter *filter) + struct igb_flow_flex_filter *filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = @@ -4180,7 +4180,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, void *arg) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_flex_filter *filter; + struct igb_flow_flex_filter *filter; int ret = 0; MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); @@ -4194,7 +4194,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, return -EINVAL; } - filter = (struct rte_eth_flex_filter *)arg; + filter = (struct igb_flow_flex_filter *)arg; if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN || filter->len % sizeof(uint64_t) != 0) { PMD_DRV_LOG(ERR, "filter's length is out of range"); @@ -4226,14 +4226,14 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev, /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ static inline int -ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, +ntuple_filter_to_5tuple_82576(struct igb_flow_ntuple_filter *filter, struct e1000_5tuple_filter_info *filter_info) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) return -EINVAL; if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK) return -EINVAL; /* flags is invalid. */ switch (filter->dst_ip_mask) { @@ -4302,7 +4302,7 @@ ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, } filter_info->priority = (uint8_t)filter->priority; - if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG) filter_info->tcp_flags = filter->tcp_flags; else filter_info->tcp_flags = 0; @@ -4363,7 +4363,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) { if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) imir_ext |= E1000_IMIREXT_CTRL_URG; if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) @@ -4396,7 +4396,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, */ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4483,7 +4483,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, */ static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -4568,7 +4568,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * * @param * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * ntuple_filter: Pointer to struct igb_flow_ntuple_filter * add: if true, add filter, if false, remove filter * * @return @@ -4577,15 +4577,15 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) */ int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, + struct igb_flow_ntuple_filter *ntuple_filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; switch (ntuple_filter->flags) { - case RTE_5TUPLE_FLAGS: - case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + case IGB_5TUPLE_FLAGS: + case (IGB_5TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG): if (hw->mac.type != e1000_82576) return -ENOTSUP; if (add) @@ -4595,8 +4595,8 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, ret = igb_remove_5tuple_filter_82576(dev, ntuple_filter); break; - case RTE_2TUPLE_FLAGS: - case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + case IGB_2TUPLE_FLAGS: + case (IGB_2TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG): if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && hw->mac.type != e1000_i210 && hw->mac.type != e1000_i211) @@ -4627,7 +4627,7 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, */ static int igb_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct igb_flow_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = @@ -4714,17 +4714,17 @@ igb_ntuple_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct igb_flow_ntuple_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = igb_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct igb_flow_ntuple_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: ret = igb_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); + (struct igb_flow_ntuple_filter *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); @@ -4780,7 +4780,7 @@ igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4796,11 +4796,11 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) { PMD_DRV_LOG(ERR, "mac compare is unsupported."); return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) { PMD_DRV_LOG(ERR, "drop option is unsupported."); return -EINVAL; } @@ -4895,12 +4895,12 @@ igb_ethertype_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct igb_flow_ethertype_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = igb_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct igb_flow_ethertype_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 43fef889b..eec7ae3db 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -91,7 +91,7 @@ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct igb_flow_ntuple_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -216,7 +216,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -225,7 +225,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* Not supported last point for range */ if (item->last) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, "Not supported last point for range"); @@ -248,7 +248,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -258,12 +258,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = tcp_mask->hdr.dst_port; filter->src_port_mask = tcp_mask->hdr.src_port; if (tcp_mask->hdr.tcp_flags == 0xFF) { - filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags |= IGB_NTUPLE_FLAGS_TCP_FLAG; } else if (!tcp_mask->hdr.tcp_flags) { - filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags &= ~IGB_NTUPLE_FLAGS_TCP_FLAG; } else { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -286,7 +286,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -311,7 +311,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -331,7 +331,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -347,7 +347,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, item, "Not supported action."); @@ -360,7 +360,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -370,7 +370,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -379,7 +379,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -388,7 +388,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -396,7 +396,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (attr->priority > 0xFFFF) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Error priority."); @@ -413,7 +413,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct igb_flow_ntuple_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -428,7 +428,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, /* Igb doesn't support many priorities. */ if (filter->priority > E1000_2TUPLE_MAX_PRI) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Priority not supported by ntuple filter"); @@ -437,18 +437,20 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " "supported by ntuple filter"); return -rte_errno; } - filter->flags |= RTE_5TUPLE_FLAGS; + filter->flags |= IGB_5TUPLE_FLAGS; } else { if (filter->src_ip_mask || filter->dst_ip_mask || filter->src_port_mask) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "only two tuple are " @@ -456,14 +458,15 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; } if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, + sizeof(struct igb_flow_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " "supported by ntuple filter"); return -rte_errno; } - filter->flags |= RTE_2TUPLE_FLAGS; + filter->flags |= IGB_2TUPLE_FLAGS; } return 0; @@ -489,7 +492,7 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, const struct rte_flow_action *actions, - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -572,13 +575,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* If mask bits of destination MAC address - * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + * are full of 1, set IGB_ETHTYPE_FLAGS_MAC. */ if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; - filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + filter->flags |= IGB_ETHTYPE_FLAGS_MAC; } else { - filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + filter->flags &= ~IGB_ETHTYPE_FLAGS_MAC; } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); @@ -609,7 +612,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; filter->queue = act_q->index; } else { - filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + filter->flags |= IGB_ETHTYPE_FLAGS_DROP; } /* Check if the next non-void item is END */ @@ -671,7 +674,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ethertype_filter *filter, + struct igb_flow_ethertype_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -688,7 +691,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { memset(filter, 0, sizeof( - struct rte_eth_ethertype_filter)); + struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -698,7 +701,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, } else { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { memset(filter, 0, sizeof( - struct rte_eth_ethertype_filter)); + struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -709,23 +712,23 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || filter->ether_type == RTE_ETHER_TYPE_IPV6) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "IPv4/IPv6 not supported by ethertype filter"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "mac compare is unsupported"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct igb_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "drop option is unsupported"); @@ -759,7 +762,7 @@ static int cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -883,7 +886,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -894,7 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_PATTERN(item, pattern, index); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -907,7 +910,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* check if the first not void action is QUEUE. */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -921,7 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -931,7 +934,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -940,7 +943,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -949,7 +952,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -962,7 +965,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } else if (attr->priority == (uint32_t)~0U) { filter->hig_pri = 1; } else { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Not support priority."); @@ -977,7 +980,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct igb_flow_syn_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -990,7 +993,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " @@ -999,7 +1002,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev, } } else { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct igb_flow_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not " @@ -1041,7 +1044,7 @@ static int cons_parse_flex_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -1102,7 +1105,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, if (!raw_mask->length || !raw_mask->relative) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1116,7 +1119,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, for (j = 0; j < raw_spec->length; j++) { if (raw_mask->pattern[j] != 0xFF) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1140,8 +1143,8 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, } if ((raw_spec->length + offset + total_offset) > - RTE_FLEX_FILTER_MAXLEN) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + IGB_FLEX_FILTER_MAXLEN) { + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by flex filter"); @@ -1204,7 +1207,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* check if the first not void action is QUEUE. */ NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1218,7 +1221,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1228,7 +1231,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -1237,7 +1240,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -1246,7 +1249,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -1254,7 +1257,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr, } if (attr->priority > 0xFFFF) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Error priority."); @@ -1271,7 +1274,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_flex_filter *filter, + struct igb_flow_flex_filter *filter, struct rte_flow_error *error) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1283,7 +1286,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, actions, filter, error); if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(filter, 0, sizeof(struct igb_flow_flex_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported by flex filter"); @@ -1441,10 +1444,10 @@ igb_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - struct rte_eth_flex_filter flex_filter; + struct igb_flow_ntuple_filter ntuple_filter; + struct igb_flow_ethertype_filter ethertype_filter; + struct igb_flow_syn_filter syn_filter; + struct igb_flow_flex_filter flex_filter; struct igb_rte_flow_rss_conf rss_conf; struct rte_flow *flow = NULL; struct igb_ntuple_filter_ele *ntuple_filter_ptr; @@ -1471,7 +1474,7 @@ igb_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&igb_flow_list, igb_flow_mem_ptr, entries); - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter)); ret = igb_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) { @@ -1486,17 +1489,17 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct igb_flow_ntuple_filter)); TAILQ_INSERT_TAIL(&igb_filter_ntuple_list, ntuple_filter_ptr, entries); flow->rule = ntuple_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_NTUPLE; + flow->filter_type = IGB_FILTER_NTUPLE; return flow; } goto out; } - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter)); ret = igb_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) { @@ -1513,17 +1516,17 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, - sizeof(struct rte_eth_ethertype_filter)); + sizeof(struct igb_flow_ethertype_filter)); TAILQ_INSERT_TAIL(&igb_filter_ethertype_list, ethertype_filter_ptr, entries); flow->rule = ethertype_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + flow->filter_type = IGB_FILTER_ETHERTYPE; return flow; } goto out; } - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter)); ret = igb_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) { @@ -1538,18 +1541,18 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, - sizeof(struct rte_eth_syn_filter)); + sizeof(struct igb_flow_syn_filter)); TAILQ_INSERT_TAIL(&igb_filter_syn_list, syn_filter_ptr, entries); flow->rule = syn_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_SYN; + flow->filter_type = IGB_FILTER_SYN; return flow; } goto out; } - memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter)); ret = igb_parse_flex_filter(dev, attr, pattern, actions, &flex_filter, error); if (!ret) { @@ -1564,11 +1567,11 @@ igb_flow_create(struct rte_eth_dev *dev, rte_memcpy(&flex_filter_ptr->filter_info, &flex_filter, - sizeof(struct rte_eth_flex_filter)); + sizeof(struct igb_flow_flex_filter)); TAILQ_INSERT_TAIL(&igb_filter_flex_list, flex_filter_ptr, entries); flow->rule = flex_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_FLEXIBLE; + flow->filter_type = IGB_FILTER_FLEXIBLE; return flow; } } @@ -1590,7 +1593,7 @@ igb_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_HASH; + flow->filter_type = IGB_FILTER_HASH; return flow; } } @@ -1618,32 +1621,32 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - struct rte_eth_flex_filter flex_filter; + struct igb_flow_ntuple_filter ntuple_filter; + struct igb_flow_ethertype_filter ethertype_filter; + struct igb_flow_syn_filter syn_filter; + struct igb_flow_flex_filter flex_filter; struct igb_rte_flow_rss_conf rss_conf; int ret; - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter)); ret = igb_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) return 0; - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter)); ret = igb_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) return 0; - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter)); ret = igb_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) return 0; - memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter)); ret = igb_parse_flex_filter(dev, attr, pattern, actions, &flex_filter, error); if (!ret) @@ -1664,7 +1667,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, { int ret; struct rte_flow *pmd_flow = flow; - enum rte_filter_type filter_type = pmd_flow->filter_type; + enum igb_filter_type filter_type = pmd_flow->filter_type; struct igb_ntuple_filter_ele *ntuple_filter_ptr; struct igb_ethertype_filter_ele *ethertype_filter_ptr; struct igb_eth_syn_filter_ele *syn_filter_ptr; @@ -1673,7 +1676,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, struct igb_rss_conf_ele *rss_filter_ptr; switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: + case IGB_FILTER_NTUPLE: ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) pmd_flow->rule; ret = igb_add_del_ntuple_filter(dev, @@ -1684,7 +1687,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(ntuple_filter_ptr); } break; - case RTE_ETH_FILTER_ETHERTYPE: + case IGB_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) pmd_flow->rule; ret = igb_add_del_ethertype_filter(dev, @@ -1695,7 +1698,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(ethertype_filter_ptr); } break; - case RTE_ETH_FILTER_SYN: + case IGB_FILTER_SYN: syn_filter_ptr = (struct igb_eth_syn_filter_ele *) pmd_flow->rule; ret = eth_igb_syn_filter_set(dev, @@ -1706,7 +1709,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(syn_filter_ptr); } break; - case RTE_ETH_FILTER_FLEXIBLE: + case IGB_FILTER_FLEXIBLE: flex_filter_ptr = (struct igb_flex_filter_ele *) pmd_flow->rule; ret = eth_igb_add_del_flex_filter(dev, @@ -1717,7 +1720,7 @@ igb_flow_destroy(struct rte_eth_dev *dev, rte_free(flex_filter_ptr); } break; - case RTE_ETH_FILTER_HASH: + case IGB_FILTER_HASH: rss_filter_ptr = (struct igb_rss_conf_ele *) pmd_flow->rule; ret = igb_config_rss_filter(dev, @@ -1836,7 +1839,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) struct igb_flex_filter_ele *flex_filter_ptr; struct igb_rss_conf_ele *rss_filter_ptr; struct igb_flow_mem *igb_flow_mem_ptr; - enum rte_filter_type filter_type; + enum igb_filter_type filter_type; struct rte_flow *pmd_flow; TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { @@ -1845,7 +1848,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) filter_type = pmd_flow->filter_type; switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: + case IGB_FILTER_NTUPLE: ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) pmd_flow->rule; @@ -1853,7 +1856,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) ntuple_filter_ptr, entries); rte_free(ntuple_filter_ptr); break; - case RTE_ETH_FILTER_ETHERTYPE: + case IGB_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) pmd_flow->rule; @@ -1861,7 +1864,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) ethertype_filter_ptr, entries); rte_free(ethertype_filter_ptr); break; - case RTE_ETH_FILTER_SYN: + case IGB_FILTER_SYN: syn_filter_ptr = (struct igb_eth_syn_filter_ele *) pmd_flow->rule; @@ -1869,7 +1872,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) syn_filter_ptr, entries); rte_free(syn_filter_ptr); break; - case RTE_ETH_FILTER_FLEXIBLE: + case IGB_FILTER_FLEXIBLE: flex_filter_ptr = (struct igb_flex_filter_ele *) pmd_flow->rule; @@ -1877,7 +1880,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev) flex_filter_ptr, entries); rte_free(flex_filter_ptr); break; - case RTE_ETH_FILTER_HASH: + case IGB_FILTER_HASH: rss_filter_ptr = (struct igb_rss_conf_ele *) pmd_flow->rule; From patchwork Tue Sep 29 07:48:35 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79149 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 215C2A04C0; Tue, 29 Sep 2020 10:10:22 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C08AD1D90B; Tue, 29 Sep 2020 10:09:45 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id F188A1D5C9 for ; Tue, 29 Sep 2020 10:09:40 +0200 (CEST) IronPort-SDR: FpGHWUy9GVJETTB+ESMkv+ymizOe/ZFYxvAP4Zng9Ag3OZslVzrnBWEKxXcj6LP1OU8GphLzGr EJvTiulEafNw== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="163018508" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="163018508" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:09:39 -0700 IronPort-SDR: 8k8qeGdPCbhNMn6ZHvuMYHZ3ssXQetzZ+57gZsTKQ3D2gdSx0pB9rOtSJR9kkmoDyD0cvkuI/O mYAr9KrZhCsw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="415289256" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga001.fm.intel.com with ESMTP; 29 Sep 2020 01:09:36 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Jeff Guo , Haiyue Wang , Chenxu Di Date: Tue, 29 Sep 2020 07:48:35 +0000 Message-Id: <20200929074835.39854-3-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200929074835.39854-1-chenxux.di@intel.com> References: <20200929074835.39854-1-chenxux.di@intel.com> Subject: [dpdk-dev] [RFC 4/5] net/ixgbe: decouple dependency from superseded structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The legacy filter API will be removed, the associated rte_eth_ctrl.h will also be removed. This patch replaces these superseded structures by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte to change. The "rte_eth_conf.fdir_conf" field will also be removed, but IXGBE generic flow still needs to depend on these configurations. So this patch also defines a private API to passthrough user's configurations. Signed-off-by: Chenxu Di --- drivers/net/ixgbe/ixgbe_ethdev.c | 79 +++---- drivers/net/ixgbe/ixgbe_ethdev.h | 217 +++++++++++++++++- drivers/net/ixgbe/ixgbe_fdir.c | 143 ++++++------ drivers/net/ixgbe/ixgbe_flow.c | 235 ++++++++++---------- drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 4 +- drivers/net/ixgbe/rte_pmd_ixgbe.c | 72 ++++++ drivers/net/ixgbe/rte_pmd_ixgbe.h | 172 ++++++++++++++ drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 1 + 8 files changed, 692 insertions(+), 231 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 0f065bbc0..977900c8f 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -301,7 +301,7 @@ static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter); + struct ixgbe_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -313,12 +313,12 @@ static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter); + struct ixgbe_ntuple_filter *filter); static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter); + struct ixgbe_flow_ethertype_filter *filter); static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, @@ -2571,6 +2571,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t intr_vector = 0; int err; bool link_up = false, negotiate = 0; @@ -2665,7 +2666,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* Configure DCB hw */ ixgbe_configure_dcb(dev); - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + if (adapter->fdir_conf.mode != IXGBE_FDIR_MODE_NONE) { err = ixgbe_fdir_configure(dev); if (err) goto error; @@ -6368,7 +6369,7 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, int ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct ixgbe_syn_filter *filter, bool add) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -6407,7 +6408,7 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev, static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter) + struct ixgbe_syn_filter *filter) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); @@ -6442,17 +6443,17 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct ixgbe_syn_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, + (struct ixgbe_syn_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: ret = ixgbe_syn_filter_get(dev, - (struct rte_eth_syn_filter *)arg); + (struct ixgbe_syn_filter *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); @@ -6652,9 +6653,11 @@ ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, return NULL; } -/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +/* translate elements in struct ixgbe_ntuple_filter to + * struct ixgbe_5tuple_filter_info + */ static inline int -ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, +ntuple_filter_to_5tuple(struct ixgbe_ntuple_filter *filter, struct ixgbe_5tuple_filter_info *filter_info) { if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || @@ -6737,7 +6740,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, * * @param * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * ntuple_filter: Pointer to struct ixgbe_ntuple_filter * add: if true, add filter, if false, remove filter * * @return @@ -6746,7 +6749,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, */ int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, + struct ixgbe_ntuple_filter *ntuple_filter, bool add) { struct ixgbe_filter_info *filter_info = @@ -6755,7 +6758,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter; int ret; - if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + if (ntuple_filter->flags != IXGBE_5TUPLE_FLAGS) { PMD_DRV_LOG(ERR, "only 5tuple is supported."); return -EINVAL; } @@ -6809,7 +6812,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, */ static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) + struct ixgbe_ntuple_filter *ntuple_filter) { struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -6869,17 +6872,17 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct ixgbe_ntuple_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, + (struct ixgbe_ntuple_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: ret = ixgbe_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); + (struct ixgbe_ntuple_filter *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); @@ -6891,7 +6894,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct ixgbe_flow_ethertype_filter *filter, bool add) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -6912,11 +6915,11 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) { PMD_DRV_LOG(ERR, "mac compare is unsupported."); return -EINVAL; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + if (filter->flags & IXGBE_ETHTYPE_FLAGS_DROP) { PMD_DRV_LOG(ERR, "drop option is unsupported."); return -EINVAL; } @@ -6965,7 +6968,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter) + struct ixgbe_flow_ethertype_filter *filter) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_filter_info *filter_info = @@ -7020,17 +7023,17 @@ ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, switch (filter_op) { case RTE_ETH_FILTER_ADD: ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct ixgbe_flow_ethertype_filter *)arg, TRUE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, + (struct ixgbe_flow_ethertype_filter *)arg, FALSE); break; case RTE_ETH_FILTER_GET: ret = ixgbe_get_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg); + (struct ixgbe_flow_ethertype_filter *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); @@ -7912,7 +7915,7 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, static int ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct ixgbe_l2_tunnel_cfg *l2_tunnel) { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -7948,7 +7951,7 @@ ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, static int ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct ixgbe_l2_tunnel_cfg *l2_tunnel) { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -8052,7 +8055,7 @@ ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, /* Add l2 tunnel filter */ int ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel, + struct ixgbe_l2_tunnel_cfg *l2_tunnel, bool restore) { int ret; @@ -8109,7 +8112,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, /* Delete l2 tunnel filter */ int ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct ixgbe_l2_tunnel_cfg *l2_tunnel) { int ret; struct ixgbe_l2_tn_info *l2_tn_info = @@ -8161,13 +8164,13 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ADD: ret = ixgbe_dev_l2_tunnel_filter_add (dev, - (struct rte_eth_l2_tunnel_conf *)arg, + (struct ixgbe_l2_tunnel_cfg *)arg, FALSE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_dev_l2_tunnel_filter_del (dev, - (struct rte_eth_l2_tunnel_conf *)arg); + (struct ixgbe_l2_tunnel_cfg *)arg); break; default: PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); @@ -8249,7 +8252,7 @@ ixgbe_dev_l2_tunnel_forwarding_disable static int ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel, + struct ixgbe_l2_tunnel_cfg *l2_tunnel, bool en) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -8290,7 +8293,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, /* Enable l2 tunnel tag insertion */ static int ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct ixgbe_l2_tunnel_cfg *l2_tunnel) { int ret = 0; @@ -8311,7 +8314,7 @@ ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, static int ixgbe_dev_l2_tunnel_insertion_disable (struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct ixgbe_l2_tunnel_cfg *l2_tunnel) { int ret = 0; @@ -8425,11 +8428,11 @@ ixgbe_dev_l2_tunnel_offload_set if (en) ret = ixgbe_dev_l2_tunnel_insertion_enable( dev, - l2_tunnel); + (struct ixgbe_l2_tunnel_cfg *)l2_tunnel); else ret = ixgbe_dev_l2_tunnel_insertion_disable( dev, - l2_tunnel); + (struct ixgbe_l2_tunnel_cfg *)l2_tunnel); } if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { @@ -8831,7 +8834,7 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) struct ixgbe_l2_tn_info *l2_tn_info = IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); struct ixgbe_l2_tn_filter *node; - struct rte_eth_l2_tunnel_conf l2_tn_conf; + struct ixgbe_l2_tunnel_cfg l2_tn_conf; TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; @@ -8938,7 +8941,7 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) struct ixgbe_l2_tn_info *l2_tn_info = IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); struct ixgbe_l2_tn_filter *l2_tn_filter; - struct rte_eth_l2_tunnel_conf l2_tn_conf; + struct ixgbe_l2_tunnel_cfg l2_tn_conf; int ret = 0; while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 9bdef87fb..aebcf0fff 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -169,6 +169,126 @@ struct ixgbe_hw_fdir_mask { uint8_t tunnel_type_mask; }; +/** + * Flow Director setting modes: none, signature or perfect. + */ +enum ixgbe_fdir_mode { + /* Disable FDIR support. */ + IXGBE_FDIR_MODE_NONE = 0, + /* Enable FDIR signature filter mode. */ + IXGBE_FDIR_MODE_SIGNATURE, + /* Enable FDIR perfect filter mode. */ + IXGBE_FDIR_MODE_PERFECT, + /* Enable FDIR filter mode - MAC VLAN. */ + IXGBE_FDIR_MODE_PERFECT_MAC_VLAN, + /* Enable FDIR filter mode - tunnel. */ + IXGBE_FDIR_MODE_PERFECT_TUNNEL, +}; + +/* Select report mode of FDIR hash information in RX descriptors. */ +enum ixgbe_fdir_status_mode { + IXGBE_FDIR_NO_REPORT_STATUS = 0, /* Never report FDIR hash. */ + IXGBE_FDIR_REPORT_STATUS, /* Only report FDIR hash for matching pkts. */ + IXGBE_FDIR_REPORT_STATUS_ALWAYS, /* Always report FDIR hash. */ +}; + +/* A structure used to define the input for IPV4 flow */ +struct ixgbe_ipv4_flow { + uint32_t src_ip; /* IPv4 source address in big endian. */ + uint32_t dst_ip; /* IPv4 destination address in big endian. */ + uint8_t tos; /* Type of service to match. */ + uint8_t ttl; /* Time to live to match. */ + uint8_t proto; /* Protocol, next header in big endian. */ +}; + +/* A structure used to define the input for IPV6 flow */ +struct ixgbe_ipv6_flow { + uint32_t src_ip[4]; /* IPv6 source address in big endian. */ + uint32_t dst_ip[4]; /* IPv6 destination address in big endian. */ + uint8_t tc; /* Traffic class to match. */ + uint8_t proto; /* Protocol, next header to match. */ + uint8_t hop_limits; /* Hop limits to match. */ +}; + +/* A structure used to configure FDIR masks that are used by the device + * to match the various fields of RX packet headers. + */ +struct ixgbe_fdir_masks { + /* Bit mask for vlan_tci in big endian */ + uint16_t vlan_tci_mask; + /* Bit mask for ipv4 flow in big endian. */ + struct ixgbe_ipv4_flow ipv4_mask; + /* Bit mask for ipv6 flow in big endian. */ + struct ixgbe_ipv6_flow ipv6_mask; + /* Bit mask for L4 source port in big endian. */ + uint16_t src_port_mask; + /* Bit mask for L4 destination port in big endian. */ + uint16_t dst_port_mask; + /* 6 bit mask for proper 6 bytes of Mac address, bit 0 matches the + * first byte on the wire + */ + uint8_t mac_addr_byte_mask; + /* Bit mask for tunnel ID in big endian. */ + uint32_t tunnel_id_mask; + /* 1 - Match tunnel type, 0 - Ignore tunnel type. */ + uint8_t tunnel_type_mask; +}; + +#define IXGBE_FDIR_MAX_FLEXLEN 16 /* Max length of flexbytes. */ + +/* Payload type */ +enum ixgbe_payload_type { + IXGBE_PAYLOAD_UNKNOWN = 0, + IXGBE_RAW_PAYLOAD, + IXGBE_L2_PAYLOAD, + IXGBE_L3_PAYLOAD, + IXGBE_L4_PAYLOAD, + IXGBE_PAYLOAD_MAX = 8, +}; + +/* A structure used to select bytes extracted from the protocol layers to + * flexible payload for filter + */ +struct ixgbe_flex_payload_cfg { + enum ixgbe_payload_type type; /* Payload type */ + uint16_t src_offset[IXGBE_FDIR_MAX_FLEXLEN]; + /* Offset in bytes from the beginning of packet's payload + * src_offset[i] indicates the flexbyte i's offset in original + * packet payload. + */ +}; + +/* A structure used to define FDIR masks for flexible payload + * for each flow type + */ +struct ixgbe_fdir_flex_mask { + uint16_t flow_type; + uint8_t mask[IXGBE_FDIR_MAX_FLEXLEN]; + /* Mask for the whole flexible payload */ +}; + +/* A structure used to define all flexible payload related setting + * include flex payload and flex mask + */ +struct ixgbe_fdir_flex_conf { + uint16_t nb_payloads; /* The number of following payload cfg */ + uint16_t nb_flexmasks; /* The number of following mask */ + struct ixgbe_flex_payload_cfg flex_set[IXGBE_PAYLOAD_MAX]; + /* Flex payload configuration for each payload type */ + struct ixgbe_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX]; + /* Flex mask configuration for each flow type */ +}; + +struct ixgbe_fdir_conf { + enum ixgbe_fdir_mode mode; /* Flow Director mode. */ + enum ixgbe_fdir_pballoc_type pballoc; /* Space for FDIR filters. */ + enum ixgbe_fdir_status_mode status; /* How to report FDIR hash. */ + /* RX queue of packets matching a "drop" filter in perfect mode. */ + uint8_t drop_queue; + struct ixgbe_fdir_masks mask; + struct ixgbe_fdir_flex_conf flex_conf; /* Flex payload configuration. */ +}; + struct ixgbe_fdir_filter { TAILQ_ENTRY(ixgbe_fdir_filter) entries; union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ @@ -185,7 +305,7 @@ struct ixgbe_fdir_rule { union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */ bool b_mask; /* If TRUE, mask has meaning. */ - enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ + enum ixgbe_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ uint32_t fdirflags; /* drop or forward */ uint32_t soft_id; /* an unique value for this rule */ uint8_t queue; /* assigned rx queue */ @@ -361,8 +481,90 @@ struct ixgbe_l2_tn_info { uint16_t e_tag_ether_type; /* ether type for e-tag */ }; +struct ixgbe_syn_filter { + /* 1 - higher priority than other filters, 0 - lower priority. */ + uint8_t hig_pri; + /* Queue assigned to when match */ + uint16_t queue; +}; + +/* Define all structures for ntuple Filter type. */ + +/* If set, dst_ip is part of ntuple */ +#define IXGBE_NTUPLE_FLAGS_DST_IP 0x0001 +/* If set, src_ip is part of ntuple */ +#define IXGBE_NTUPLE_FLAGS_SRC_IP 0x0002 +/* If set, dst_port is part of ntuple */ +#define IXGBE_NTUPLE_FLAGS_DST_PORT 0x0004 +/* If set, src_port is part of ntuple */ +#define IXGBE_NTUPLE_FLAGS_SRC_PORT 0x0008 +/* If set, protocol is part of ntuple */ +#define IXGBE_NTUPLE_FLAGS_PROTO 0x0010 +/* If set, tcp flag is involved */ +#define IXGBE_NTUPLE_FLAGS_TCP_FLAG 0x0020 + +#define IXGBE_5TUPLE_FLAGS ( \ + IXGBE_NTUPLE_FLAGS_DST_IP | \ + IXGBE_NTUPLE_FLAGS_SRC_IP | \ + IXGBE_NTUPLE_FLAGS_DST_PORT | \ + IXGBE_NTUPLE_FLAGS_SRC_PORT | \ + IXGBE_NTUPLE_FLAGS_PROTO) + +struct ixgbe_ntuple_filter { + uint16_t flags; /* Flags from IXGBE_NTUPLE_FLAGS_* */ + uint32_t dst_ip; /* Destination IP address in big endian. */ + uint32_t dst_ip_mask; /* Mask of destination IP address. */ + uint32_t src_ip; /* Source IP address in big endian. */ + uint32_t src_ip_mask; /* Mask of destination IP address. */ + uint16_t dst_port; /* Destination port in big endian. */ + uint16_t dst_port_mask; /* Mask of destination port. */ + uint16_t src_port; /* Source Port in big endian. */ + uint16_t src_port_mask; /* Mask of source port. */ + uint8_t proto; /* L4 protocol. */ + uint8_t proto_mask; /* Mask of L4 protocol. */ + /* tcp_flags only meaningful when the proto is TCP. + * The packet matched above ntuple fields and contain + * any set bit in tcp_flags will hit this filter. + */ + uint8_t tcp_flags; + /* seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint16_t priority; + uint16_t queue; /* Queue assigned to when match*/ +}; + +struct ixgbe_l2_tunnel_cfg { + enum rte_eth_tunnel_type l2_tunnel_type; + uint16_t ether_type; /* ether type in l2 header */ + uint32_t tunnel_id; /* port tag id for e-tag */ + uint16_t vf_id; /* VF id for tag insertion */ + uint32_t pool; /* destination pool for tag based forwarding */ +}; + +#define IXGBE_ETHTYPE_FLAGS_MAC 0x0001 /* If set, compare mac */ +#define IXGBE_ETHTYPE_FLAGS_DROP 0x0002 /* If set, drop packet when match */ + +struct ixgbe_flow_ethertype_filter { + struct rte_ether_addr mac_addr; /* Mac address to match. */ + uint16_t ether_type; /* Ether type to match */ + uint16_t flags; /* Flags from IXGBE_ETHTYPE_FLAGS_* */ + uint16_t queue; /* Queue assigned to when match*/ +}; + +enum ixgbe_filter_type { + IXGBE_FILTER_NONE = 0, + IXGBE_FILTER_ETHERTYPE, + IXGBE_FILTER_SYN, + IXGBE_FILTER_NTUPLE, + IXGBE_FILTER_FDIR, + IXGBE_FILTER_HASH, + IXGBE_FILTER_L2_TUNNEL, + IXGBE_FILTER_MAX +}; + struct rte_flow { - enum rte_filter_type filter_type; + enum ixgbe_filter_type filter_type; void *rule; }; @@ -514,6 +716,7 @@ struct ixgbe_adapter { uint8_t mac_ctrl_frame_fwd; rte_atomic32_t link_thread_running; pthread_t link_thread_tid; + struct ixgbe_fdir_conf fdir_conf; }; struct ixgbe_vf_representor { @@ -670,21 +873,21 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter, + struct ixgbe_ntuple_filter *filter, bool add); int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, + struct ixgbe_flow_ethertype_filter *filter, bool add); int ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, + struct ixgbe_syn_filter *filter, bool add); int ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel, + struct ixgbe_l2_tunnel_cfg *l2_tunnel, bool restore); int ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel); + struct ixgbe_l2_tunnel_cfg *l2_tunnel); void ixgbe_filterlist_init(void); void ixgbe_filterlist_flush(void); /* diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 6faaa8f06..76fb47d49 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -81,11 +81,11 @@ static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash); static int fdir_set_input_mask(struct rte_eth_dev *dev, - const struct rte_eth_fdir_masks *input_mask); + const struct ixgbe_fdir_masks *input_mask); static int fdir_set_input_mask_82599(struct rte_eth_dev *dev); static int fdir_set_input_mask_x550(struct rte_eth_dev *dev); static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, - const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl); + const struct ixgbe_fdir_flex_conf *conf, uint32_t *fdirctrl); static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl); static int ixgbe_fdir_filter_to_atr_input( const struct rte_eth_fdir_filter *fdir_filter, @@ -94,13 +94,13 @@ static int ixgbe_fdir_filter_to_atr_input( static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, uint32_t key); static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, - enum rte_fdir_pballoc_type pballoc); + enum ixgbe_fdir_pballoc_type pballoc); static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - enum rte_fdir_pballoc_type pballoc); + enum ixgbe_fdir_pballoc_type pballoc); static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, uint32_t fdircmd, uint32_t fdirhash, - enum rte_fdir_mode mode); + enum ixgbe_fdir_mode mode); static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, uint32_t fdirhash); @@ -171,20 +171,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) * flexbytes matching field, and drop queue (only for perfect matching mode). */ static inline int -configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) +configure_fdir_flags(const struct ixgbe_fdir_conf *conf, uint32_t *fdirctrl) { *fdirctrl = 0; switch (conf->pballoc) { - case RTE_FDIR_PBALLOC_64K: + case IXGBE_FDIR_PBALLOC_64K: /* 8k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; break; - case RTE_FDIR_PBALLOC_128K: + case IXGBE_FDIR_PBALLOC_128K: /* 16k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; break; - case RTE_FDIR_PBALLOC_256K: + case IXGBE_FDIR_PBALLOC_256K: /* 32k - 1 signature filters */ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; break; @@ -196,14 +196,14 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) /* status flags: write hash & swindex in the rx descriptor */ switch (conf->status) { - case RTE_FDIR_NO_REPORT_STATUS: + case IXGBE_FDIR_NO_REPORT_STATUS: /* do nothing, default mode */ break; - case RTE_FDIR_REPORT_STATUS: + case IXGBE_FDIR_REPORT_STATUS: /* report status when the packet matches a fdir rule */ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; break; - case RTE_FDIR_REPORT_STATUS_ALWAYS: + case IXGBE_FDIR_REPORT_STATUS_ALWAYS: /* always report status */ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS; break; @@ -216,14 +216,14 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << IXGBE_FDIRCTRL_FLEX_SHIFT; - if (conf->mode >= RTE_FDIR_MODE_PERFECT && - conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + if (conf->mode >= IXGBE_FDIR_MODE_PERFECT && + conf->mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) { *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); - if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + if (conf->mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); - else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + else if (conf->mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL) *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); } @@ -264,6 +264,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; /* * mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf @@ -325,7 +326,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M); *reg = ~(info->mask.dst_ipv4_mask); - if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { + if (adapter->fdir_conf.mode == IXGBE_FDIR_MODE_SIGNATURE) { /* * Store source and destination IPv6 masks (bit reversed) */ @@ -348,19 +349,20 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; /* mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf */ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; uint32_t fdiripv6m; - enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode; uint16_t mac_mask; PMD_INIT_FUNC_TRACE(); /* set the default UDP port for VxLAN */ - if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + if (mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL) IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT); /* some bits must be set for mac vlan or tunnel mode */ @@ -384,11 +386,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev) fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; - if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + if (mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE | IXGBE_FDIRIP6M_TNI_VNI; - if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + if (mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL) { fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC; mac_mask = info->mask.mac_addr_byte_mask & (IXGBE_FDIRIP6M_INNER_MAC >> @@ -436,7 +438,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev) static int ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev, - const struct rte_eth_fdir_masks *input_mask) + const struct ixgbe_fdir_masks *input_mask) { struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); @@ -459,7 +461,7 @@ ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev, static int ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev, - const struct rte_eth_fdir_masks *input_mask) + const struct ixgbe_fdir_masks *input_mask) { struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); @@ -475,15 +477,16 @@ ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev, static int ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev, - const struct rte_eth_fdir_masks *input_mask) + const struct ixgbe_fdir_masks *input_mask) { - enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + struct ixgbe_adapter *adapter = dev->data->dev_private; + enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode; - if (mode >= RTE_FDIR_MODE_SIGNATURE && - mode <= RTE_FDIR_MODE_PERFECT) + if (mode >= IXGBE_FDIR_MODE_SIGNATURE && + mode <= IXGBE_FDIR_MODE_PERFECT) return ixgbe_fdir_store_input_mask_82599(dev, input_mask); - else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && - mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) return ixgbe_fdir_store_input_mask_x550(dev, input_mask); PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); @@ -493,13 +496,14 @@ ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev, int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev) { - enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + struct ixgbe_adapter *adapter = dev->data->dev_private; + enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode; - if (mode >= RTE_FDIR_MODE_SIGNATURE && - mode <= RTE_FDIR_MODE_PERFECT) + if (mode >= IXGBE_FDIR_MODE_SIGNATURE && + mode <= IXGBE_FDIR_MODE_PERFECT) return fdir_set_input_mask_82599(dev); - else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && - mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) return fdir_set_input_mask_x550(dev); PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); @@ -533,7 +537,7 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, static int fdir_set_input_mask(struct rte_eth_dev *dev, - const struct rte_eth_fdir_masks *input_mask) + const struct ixgbe_fdir_masks *input_mask) { int ret; @@ -550,13 +554,13 @@ fdir_set_input_mask(struct rte_eth_dev *dev, */ static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, - const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl) + const struct ixgbe_fdir_flex_conf *conf, uint32_t *fdirctrl) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); - const struct rte_eth_flex_payload_cfg *flex_cfg; - const struct rte_eth_fdir_flex_mask *flex_mask; + const struct ixgbe_flex_payload_cfg *flex_cfg; + const struct ixgbe_fdir_flex_mask *flex_mask; uint32_t fdirm; uint16_t flexbytes = 0; uint16_t i; @@ -570,7 +574,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, for (i = 0; i < conf->nb_payloads; i++) { flex_cfg = &conf->flex_set[i]; - if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) { + if (flex_cfg->type != IXGBE_RAW_PAYLOAD) { PMD_DRV_LOG(ERR, "unsupported payload type."); return -EINVAL; } @@ -615,10 +619,11 @@ int ixgbe_fdir_configure(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; int err; uint32_t fdirctrl, pbsize; int i; - enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode; PMD_INIT_FUNC_TRACE(); @@ -633,11 +638,11 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) if (hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a && - mode != RTE_FDIR_MODE_SIGNATURE && - mode != RTE_FDIR_MODE_PERFECT) + mode != IXGBE_FDIR_MODE_SIGNATURE && + mode != IXGBE_FDIR_MODE_PERFECT) return -ENOSYS; - err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); + err = configure_fdir_flags(&adapter->fdir_conf, &fdirctrl); if (err) return err; @@ -659,13 +664,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) for (i = 1; i < 8; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask); + err = fdir_set_input_mask(dev, &adapter->fdir_conf.mask); if (err < 0) { PMD_INIT_LOG(ERR, " Error on setting FD mask"); return err; } err = ixgbe_set_fdir_flex_conf(dev, - &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl); + &adapter->fdir_conf.flex_conf, &fdirctrl); if (err < 0) { PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments."); return err; @@ -894,13 +899,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - enum rte_fdir_pballoc_type pballoc) + enum ixgbe_fdir_pballoc_type pballoc) { - if (pballoc == RTE_FDIR_PBALLOC_256K) + if (pballoc == IXGBE_FDIR_PBALLOC_256K) return ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & PERFECT_BUCKET_256KB_HASH_MASK; - else if (pballoc == RTE_FDIR_PBALLOC_128K) + else if (pballoc == IXGBE_FDIR_PBALLOC_128K) return ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & PERFECT_BUCKET_128KB_HASH_MASK; @@ -937,15 +942,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd) */ static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, - enum rte_fdir_pballoc_type pballoc) + enum ixgbe_fdir_pballoc_type pballoc) { uint32_t bucket_hash, sig_hash; - if (pballoc == RTE_FDIR_PBALLOC_256K) + if (pballoc == IXGBE_FDIR_PBALLOC_256K) bucket_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & SIG_BUCKET_256KB_HASH_MASK; - else if (pballoc == RTE_FDIR_PBALLOC_128K) + else if (pballoc == IXGBE_FDIR_PBALLOC_128K) bucket_hash = ixgbe_atr_compute_hash_82599(input, IXGBE_ATR_BUCKET_HASH_KEY) & SIG_BUCKET_128KB_HASH_MASK; @@ -970,7 +975,7 @@ static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, uint32_t fdircmd, uint32_t fdirhash, - enum rte_fdir_mode mode) + enum ixgbe_fdir_mode mode) { uint32_t fdirport, fdirvlan; u32 addr_low, addr_high; @@ -978,7 +983,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, int err = 0; volatile uint32_t *reg; - if (mode == RTE_FDIR_MODE_PERFECT) { + if (mode == IXGBE_FDIR_MODE_PERFECT) { /* record the IPv4 address (big-endian) * can not use IXGBE_WRITE_REG. */ @@ -992,8 +997,8 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; fdirport |= IXGBE_NTOHS(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); - } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && - mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + } else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) { /* for mac vlan and tunnel modes */ addr_low = ((u32)input->formatted.inner_mac[0] | ((u32)input->formatted.inner_mac[1] << 8) | @@ -1002,7 +1007,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, addr_high = ((u32)input->formatted.inner_mac[4] | ((u32)input->formatted.inner_mac[5] << 8)); - if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high); IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); @@ -1226,6 +1231,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, bool update) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t fdircmd_flags; uint32_t fdirhash; uint8_t queue; @@ -1233,11 +1239,11 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, int err; struct ixgbe_hw_fdir_info *info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); - enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode; struct ixgbe_fdir_filter *node; bool add_node = FALSE; - if (fdir_mode == RTE_FDIR_MODE_NONE || + if (fdir_mode == IXGBE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; @@ -1257,16 +1263,16 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, IXGBE_ATR_FLOW_TYPE_IPV6) && (info->mask.src_port_mask != 0 || info->mask.dst_port_mask != 0) && - (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && - rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) { + (rule->mode != IXGBE_FDIR_MODE_PERFECT_MAC_VLAN && + rule->mode != IXGBE_FDIR_MODE_PERFECT_TUNNEL)) { PMD_DRV_LOG(ERR, "By this device," " IPv4 is not supported without" " L4 protocol and ports masked!"); return -ENOTSUP; } - if (fdir_mode >= RTE_FDIR_MODE_PERFECT && - fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + if (fdir_mode >= IXGBE_FDIR_MODE_PERFECT && + fdir_mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) is_perfect = TRUE; if (is_perfect) { @@ -1277,12 +1283,12 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, return -ENOTSUP; } fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir, - dev->data->dev_conf.fdir_conf.pballoc); + adapter->fdir_conf.pballoc); fdirhash |= rule->soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; } else fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir, - dev->data->dev_conf.fdir_conf.pballoc); + adapter->fdir_conf.pballoc); if (del) { err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir); @@ -1300,7 +1306,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; if (rule->fdirflags & IXGBE_FDIRCMD_DROP) { if (is_perfect) { - queue = dev->data->dev_conf.fdir_conf.drop_queue; + queue = adapter->fdir_conf.drop_queue; fdircmd_flags |= IXGBE_FDIRCMD_DROP; } else { PMD_DRV_LOG(ERR, "Drop option is not supported in" @@ -1587,12 +1593,13 @@ ixgbe_fdir_filter_restore(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode; struct ixgbe_fdir_filter *node; bool is_perfect = FALSE; - enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; - if (fdir_mode >= RTE_FDIR_MODE_PERFECT && - fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + if (fdir_mode >= IXGBE_FDIR_MODE_PERFECT && + fdir_mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) is_perfect = TRUE; if (is_perfect) { diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index b2a2bfc02..61bdca7e6 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -52,17 +52,17 @@ /* ntuple filter list structure */ struct ixgbe_ntuple_filter_ele { TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; - struct rte_eth_ntuple_filter filter_info; + struct ixgbe_ntuple_filter filter_info; }; /* ethertype filter list structure */ struct ixgbe_ethertype_filter_ele { TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; - struct rte_eth_ethertype_filter filter_info; + struct ixgbe_flow_ethertype_filter filter_info; }; /* syn filter list structure */ struct ixgbe_eth_syn_filter_ele { TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; - struct rte_eth_syn_filter filter_info; + struct ixgbe_syn_filter filter_info; }; /* fdir filter list structure */ struct ixgbe_fdir_rule_ele { @@ -72,7 +72,7 @@ struct ixgbe_fdir_rule_ele { /* l2_tunnel filter list structure */ struct ixgbe_eth_l2_tunnel_conf_ele { TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; - struct rte_eth_l2_tunnel_conf filter_info; + struct ixgbe_l2_tunnel_cfg filter_info; }; /* rss filter list structure */ struct ixgbe_rss_conf_ele { @@ -172,7 +172,7 @@ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct ixgbe_ntuple_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -225,7 +225,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* check if the next not void item is END */ act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -391,7 +391,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -406,7 +406,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* get the TCP/UDP/SCTP info */ if (item->type != RTE_FLOW_ITEM_TYPE_END && (!item->spec || !item->mask)) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid ntuple mask"); @@ -415,7 +415,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /*Not supported last point for range*/ if (item->last) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, "Not supported last point for range"); @@ -437,7 +437,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -456,11 +456,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = tcp_mask->hdr.dst_port; filter->src_port_mask = tcp_mask->hdr.src_port; if (tcp_mask->hdr.tcp_flags == 0xFF) { - filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags |= IXGBE_NTUPLE_FLAGS_TCP_FLAG; } else if (!tcp_mask->hdr.tcp_flags) { - filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + filter->flags &= ~IXGBE_NTUPLE_FLAGS_TCP_FLAG; } else { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -481,7 +481,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -513,7 +513,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(filter, 0, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -533,7 +533,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* check if the next not void item is END */ item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -548,7 +548,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, */ act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, item, "Not supported action."); @@ -560,7 +560,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* check if the next not void item is END */ act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -570,7 +570,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -579,7 +579,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -588,7 +588,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -596,7 +596,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (attr->priority > 0xFFFF) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Error priority."); @@ -616,7 +616,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ntuple_filter *filter, + struct ixgbe_ntuple_filter *filter, struct rte_flow_error *error) { int ret; @@ -636,8 +636,8 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, #endif /* Ixgbe doesn't support tcp flags. */ - if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + if (filter->flags & IXGBE_NTUPLE_FLAGS_TCP_FLAG) { + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Not supported by ntuple filter"); @@ -647,7 +647,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, /* Ixgbe doesn't support many priorities. */ if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO || filter->priority > IXGBE_MAX_N_TUPLE_PRIO) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof(struct ixgbe_ntuple_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Priority not supported by ntuple filter"); @@ -658,7 +658,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, return -rte_errno; /* fixed value for ixgbe */ - filter->flags = RTE_5TUPLE_FLAGS; + filter->flags = IXGBE_5TUPLE_FLAGS; return 0; } @@ -682,7 +682,7 @@ static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, const struct rte_flow_action *actions, - struct rte_eth_ethertype_filter *filter, + struct ixgbe_flow_ethertype_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -761,13 +761,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* If mask bits of destination MAC address - * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + * are full of 1, set IXGBE_ETHTYPE_FLAGS_MAC. */ if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; - filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + filter->flags |= IXGBE_ETHTYPE_FLAGS_MAC; } else { - filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + filter->flags &= ~IXGBE_ETHTYPE_FLAGS_MAC; } filter->ether_type = rte_be_to_cpu_16(eth_spec->type); @@ -795,7 +795,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; filter->queue = act_q->index; } else { - filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + filter->flags |= IXGBE_ETHTYPE_FLAGS_DROP; } /* Check if the next non-void item is END */ @@ -856,7 +856,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_ethertype_filter *filter, + struct ixgbe_flow_ethertype_filter *filter, struct rte_flow_error *error) { int ret; @@ -871,8 +871,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, return ret; /* Ixgbe doesn't support MAC address. */ - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Not supported by ethertype filter"); @@ -880,7 +880,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, } if (filter->queue >= dev->data->nb_rx_queues) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue index much too big"); @@ -889,23 +889,23 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || filter->ether_type == RTE_ETHER_TYPE_IPV6) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "IPv4/IPv6 not supported by ethertype filter"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "mac compare is unsupported"); return -rte_errno; } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { - memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + if (filter->flags & IXGBE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "drop option is unsupported"); @@ -939,7 +939,7 @@ static int cons_parse_syn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct ixgbe_syn_filter *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -1058,7 +1058,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -1068,7 +1068,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* check if the next not void item is END */ item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by syn filter"); @@ -1078,7 +1078,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* check if the first not void action is QUEUE. */ act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1088,7 +1088,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, act_q = (const struct rte_flow_action_queue *)act->conf; filter->queue = act_q->index; if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1098,7 +1098,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* check if the next not void item is END */ act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1108,7 +1108,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -1117,7 +1117,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -1126,7 +1126,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -1139,7 +1139,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, } else if (attr->priority == (uint32_t)~0U) { filter->hig_pri = 1; } else { - memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(filter, 0, sizeof(struct ixgbe_syn_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Not support priority."); @@ -1154,7 +1154,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_syn_filter *filter, + struct ixgbe_syn_filter *filter, struct rte_flow_error *error) { int ret; @@ -1197,7 +1197,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_l2_tunnel_conf *filter, + struct ixgbe_l2_tunnel_cfg *filter, struct rte_flow_error *error) { const struct rte_flow_item *item; @@ -1231,7 +1231,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* The first not void item should be e-tag. */ item = next_no_void_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by L2 tunnel filter"); @@ -1239,7 +1239,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, } if (!item->spec || !item->mask) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by L2 tunnel filter"); return -rte_errno; @@ -1261,7 +1261,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, e_tag_mask->in_ecid_e || e_tag_mask->ecid_e || e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by L2 tunnel filter"); @@ -1278,7 +1278,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* check if the next not void item is END */ item = next_no_void_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by L2 tunnel filter"); @@ -1288,7 +1288,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* parse attr */ /* must be input direction */ if (!attr->ingress) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, "Only support ingress."); @@ -1297,7 +1297,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* not supported */ if (attr->egress) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Not support egress."); @@ -1306,7 +1306,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* not supported */ if (attr->transfer) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer."); @@ -1315,7 +1315,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* not supported */ if (attr->priority) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, "Not support priority."); @@ -1326,7 +1326,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, act = next_no_void_action(actions, NULL); if (act->type != RTE_FLOW_ACTION_TYPE_VF && act->type != RTE_FLOW_ACTION_TYPE_PF) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1343,7 +1343,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, /* check if the next not void item is END */ act = next_no_void_action(actions, act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1358,7 +1358,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_eth_l2_tunnel_conf *l2_tn_filter, + struct ixgbe_l2_tunnel_cfg *l2_tn_filter, struct rte_flow_error *error) { int ret = 0; @@ -1372,7 +1372,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, if (hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a) { - memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Not supported by L2 tunnel filter"); @@ -1451,7 +1451,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, rule->queue = act_q->index; } else { /* drop */ /* signature mode does not support drop action. */ - if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + if (rule->mode == IXGBE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1674,9 +1674,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } if (signature_match(pattern)) - rule->mode = RTE_FDIR_MODE_SIGNATURE; + rule->mode = IXGBE_FDIR_MODE_SIGNATURE; else - rule->mode = RTE_FDIR_MODE_PERFECT; + rule->mode = IXGBE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1719,7 +1719,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, /* Ether type should be masked. */ if (eth_mask->type || - rule->mode == RTE_FDIR_MODE_SIGNATURE) { + rule->mode == IXGBE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1728,7 +1728,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + rule->mode = IXGBE_FDIR_MODE_PERFECT_MAC_VLAN; /** * src MAC address must be masked, @@ -1759,7 +1759,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, * IPv6 is not supported. */ item = next_no_fuzzy_pattern(pattern, item); - if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (rule->mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1902,7 +1902,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, * 2. not support last * 3. mask must not null */ - if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + if (rule->mode != IXGBE_FDIR_MODE_SIGNATURE || item->last || !item->mask) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2372,7 +2372,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL; + rule->mode = IXGBE_FDIR_MODE_PERFECT_TUNNEL; /* Skip MAC. */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { @@ -2767,7 +2767,8 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, { int ret; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + struct ixgbe_adapter *adapter = dev->data->dev_private; + enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode; if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X540 && @@ -2796,7 +2797,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, rule->ixgbe_fdir.formatted.dst_port != 0)) return -ENOTSUP; - if (fdir_mode == RTE_FDIR_MODE_NONE || + if (fdir_mode == IXGBE_FDIR_MODE_NONE || fdir_mode != rule->mode) return -ENOTSUP; @@ -3019,11 +3020,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; + struct ixgbe_ntuple_filter ntuple_filter; + struct ixgbe_flow_ethertype_filter ethertype_filter; + struct ixgbe_syn_filter syn_filter; struct ixgbe_fdir_rule fdir_rule; - struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_l2_tunnel_cfg l2_tn_filter; struct ixgbe_hw_fdir_info *fdir_info = IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); struct ixgbe_rte_flow_rss_conf rss_conf; @@ -3053,7 +3054,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&ixgbe_flow_list, ixgbe_flow_mem_ptr, entries); - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct ixgbe_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); @@ -3074,17 +3075,18 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct ixgbe_ntuple_filter)); TAILQ_INSERT_TAIL(&filter_ntuple_list, ntuple_filter_ptr, entries); flow->rule = ntuple_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_NTUPLE; + flow->filter_type = IXGBE_FILTER_NTUPLE; return flow; } goto out; } - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, + sizeof(struct ixgbe_flow_ethertype_filter)); ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) { @@ -3100,17 +3102,17 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, - sizeof(struct rte_eth_ethertype_filter)); + sizeof(struct ixgbe_flow_ethertype_filter)); TAILQ_INSERT_TAIL(&filter_ethertype_list, ethertype_filter_ptr, entries); flow->rule = ethertype_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + flow->filter_type = IXGBE_FILTER_ETHERTYPE; return flow; } goto out; } - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct ixgbe_syn_filter)); ret = ixgbe_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) { @@ -3124,12 +3126,12 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, - sizeof(struct rte_eth_syn_filter)); + sizeof(struct ixgbe_syn_filter)); TAILQ_INSERT_TAIL(&filter_syn_list, syn_filter_ptr, entries); flow->rule = syn_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_SYN; + flow->filter_type = IXGBE_FILTER_SYN; return flow; } goto out; @@ -3192,7 +3194,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&filter_fdir_list, fdir_rule_ptr, entries); flow->rule = fdir_rule_ptr; - flow->filter_type = RTE_ETH_FILTER_FDIR; + flow->filter_type = IXGBE_FILTER_FDIR; return flow; } @@ -3211,7 +3213,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, goto out; } - memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, actions, &l2_tn_filter, error); if (!ret) { @@ -3225,11 +3227,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, } rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, - sizeof(struct rte_eth_l2_tunnel_conf)); + sizeof(struct ixgbe_l2_tunnel_cfg)); TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, l2_tn_filter_ptr, entries); flow->rule = l2_tn_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL; + flow->filter_type = IXGBE_FILTER_L2_TUNNEL; return flow; } } @@ -3251,7 +3253,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_HASH; + flow->filter_type = IXGBE_FILTER_HASH; return flow; } } @@ -3279,27 +3281,28 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; - struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_ntuple_filter ntuple_filter; + struct ixgbe_flow_ethertype_filter ethertype_filter; + struct ixgbe_syn_filter syn_filter; + struct ixgbe_l2_tunnel_cfg l2_tn_filter; struct ixgbe_fdir_rule fdir_rule; struct ixgbe_rte_flow_rss_conf rss_conf; int ret; - memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(&ntuple_filter, 0, sizeof(struct ixgbe_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); if (!ret) return 0; - memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + memset(ðertype_filter, 0, + sizeof(struct ixgbe_flow_ethertype_filter)); ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, actions, ðertype_filter, error); if (!ret) return 0; - memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + memset(&syn_filter, 0, sizeof(struct ixgbe_syn_filter)); ret = ixgbe_parse_syn_filter(dev, attr, pattern, actions, &syn_filter, error); if (!ret) @@ -3311,7 +3314,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev, if (!ret) return 0; - memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg)); ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, actions, &l2_tn_filter, error); if (!ret) @@ -3332,12 +3335,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, { int ret; struct rte_flow *pmd_flow = flow; - enum rte_filter_type filter_type = pmd_flow->filter_type; - struct rte_eth_ntuple_filter ntuple_filter; - struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_syn_filter syn_filter; + enum ixgbe_filter_type filter_type = pmd_flow->filter_type; + struct ixgbe_ntuple_filter ntuple_filter; + struct ixgbe_flow_ethertype_filter ethertype_filter; + struct ixgbe_syn_filter syn_filter; struct ixgbe_fdir_rule fdir_rule; - struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_l2_tunnel_cfg l2_tn_filter; struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; @@ -3349,12 +3352,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, struct ixgbe_rss_conf_ele *rss_filter_ptr; switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: + case IXGBE_FILTER_NTUPLE: ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) pmd_flow->rule; rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, - sizeof(struct rte_eth_ntuple_filter)); + sizeof(struct ixgbe_ntuple_filter)); ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); if (!ret) { TAILQ_REMOVE(&filter_ntuple_list, @@ -3362,12 +3365,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(ntuple_filter_ptr); } break; - case RTE_ETH_FILTER_ETHERTYPE: + case IXGBE_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) pmd_flow->rule; rte_memcpy(ðertype_filter, ðertype_filter_ptr->filter_info, - sizeof(struct rte_eth_ethertype_filter)); + sizeof(struct ixgbe_flow_ethertype_filter)); ret = ixgbe_add_del_ethertype_filter(dev, ðertype_filter, FALSE); if (!ret) { @@ -3376,12 +3379,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(ethertype_filter_ptr); } break; - case RTE_ETH_FILTER_SYN: + case IXGBE_FILTER_SYN: syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) pmd_flow->rule; rte_memcpy(&syn_filter, &syn_filter_ptr->filter_info, - sizeof(struct rte_eth_syn_filter)); + sizeof(struct ixgbe_syn_filter)); ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); if (!ret) { TAILQ_REMOVE(&filter_syn_list, @@ -3389,7 +3392,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(syn_filter_ptr); } break; - case RTE_ETH_FILTER_FDIR: + case IXGBE_FILTER_FDIR: fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; rte_memcpy(&fdir_rule, &fdir_rule_ptr->filter_info, @@ -3403,11 +3406,11 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, fdir_info->mask_added = false; } break; - case RTE_ETH_FILTER_L2_TUNNEL: + case IXGBE_FILTER_L2_TUNNEL: l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) pmd_flow->rule; rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, - sizeof(struct rte_eth_l2_tunnel_conf)); + sizeof(struct ixgbe_l2_tunnel_cfg)); ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); if (!ret) { TAILQ_REMOVE(&filter_l2_tunnel_list, @@ -3415,7 +3418,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, rte_free(l2_tn_filter_ptr); } break; - case RTE_ETH_FILTER_HASH: + case IXGBE_FILTER_HASH: rss_filter_ptr = (struct ixgbe_rss_conf_ele *) pmd_flow->rule; ret = ixgbe_config_rss_filter(dev, diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index a97c27189..e6713a728 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -278,10 +278,10 @@ static inline int ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 - struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + struct ixgbe_adapter *adapter = dev->data->dev_private; /* no fdir support */ - if (fconf->mode != RTE_FDIR_MODE_NONE) + if (adapter->fdir_conf.mode != IXGBE_FDIR_MODE_NONE) return -1; return 0; diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c index d2f708242..c83ab7eaa 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.c +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -1172,3 +1172,75 @@ rte_pmd_ixgbe_get_fdir_stats(uint16_t port, return 0; } + +int +rte_pmd_ixgbe_update_fdir_conf(uint16_t port, + struct rte_pmd_ixgbe_fdir_conf conf) +{ + struct ixgbe_adapter *ad; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + ad = dev->data->dev_private; + + switch (conf.mode) { + case RTE_PMD_IXGBE_FDIR_MODE_NONE: + ad->fdir_conf.mode = IXGBE_FDIR_MODE_NONE; + break; + case RTE_PMD_IXGBE_FDIR_MODE_SIGNATURE: + ad->fdir_conf.mode = IXGBE_FDIR_MODE_SIGNATURE; + break; + case RTE_PMD_IXGBE_FDIR_MODE_PERFECT: + ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT; + break; + case RTE_PMD_IXGBE_FDIR_MODE_PERFECT_MAC_VLAN: + ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT_MAC_VLAN; + break; + case RTE_PMD_IXGBE_FDIR_MODE_PERFECT_TUNNEL: + ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT_TUNNEL; + break; + default: + return -EINVAL; + } + + switch (conf.status) { + case RTE_PMD_IXGBE_FDIR_NO_REPORT_STATUS: + ad->fdir_conf.status = IXGBE_FDIR_NO_REPORT_STATUS; + break; + case RTE_PMD_IXGBE_FDIR_REPORT_STATUS: + ad->fdir_conf.status = IXGBE_FDIR_REPORT_STATUS; + break; + case RTE_PMD_IXGBE_FDIR_REPORT_STATUS_ALWAYS: + ad->fdir_conf.status = IXGBE_FDIR_REPORT_STATUS_ALWAYS; + break; + default: + return -EINVAL; + } + + switch (conf.pballoc) { + case RTE_PMD_IXGBE_FDIR_PBALLOC_64K: + ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_64K; + break; + case RTE_PMD_IXGBE_FDIR_PBALLOC_128K: + ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_128K; + break; + case RTE_PMD_IXGBE_FDIR_PBALLOC_256K: + ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_256K; + break; + default: + return -EINVAL; + } + + ad->fdir_conf.drop_queue = conf.drop_queue; + rte_memcpy(&ad->fdir_conf.mask, &conf.mask, + sizeof(struct ixgbe_fdir_masks)); + rte_memcpy(&ad->fdir_conf.flex_conf, &conf.flex_conf, + sizeof(struct ixgbe_fdir_flex_conf)); + + return 0; +} diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h index 90fc8160b..79bd8c8da 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.h +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -729,6 +729,138 @@ __rte_experimental int rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable); +enum rte_pmd_ixgbe_fdir_mode { + /* Disable FDIR support. */ + RTE_PMD_IXGBE_FDIR_MODE_NONE = 0, + /* Enable FDIR signature filter mode. */ + RTE_PMD_IXGBE_FDIR_MODE_SIGNATURE, + /* Enable FDIR perfect filter mode. */ + RTE_PMD_IXGBE_FDIR_MODE_PERFECT, + /* Enable FDIR filter mode - MAC VLAN. */ + RTE_PMD_IXGBE_FDIR_MODE_PERFECT_MAC_VLAN, + /* Enable FDIR filter mode - tunnel. */ + RTE_PMD_IXGBE_FDIR_MODE_PERFECT_TUNNEL, +}; + +/* A structure used to define the input for IPV4 flow */ +struct rte_pmd_ixgbe_ipv4_flow { + uint32_t src_ip; /* IPv4 source address in big endian. */ + uint32_t dst_ip; /* IPv4 destination address in big endian. */ + uint8_t tos; /* Type of service to match. */ + uint8_t ttl; /* Time to live to match. */ + uint8_t proto; /* Protocol, next header in big endian. */ +}; + +/* A structure used to define the input for IPV6 flow */ +struct rte_pmd_ixgbe_ipv6_flow { + uint32_t src_ip[4]; /* IPv6 source address in big endian. */ + uint32_t dst_ip[4]; /* IPv6 destination address in big endian. */ + uint8_t tc; /* Traffic class to match. */ + uint8_t proto; /* Protocol, next header to match. */ + uint8_t hop_limits; /* Hop limits to match. */ +}; +struct rte_pmd_ixgbe_fdir_masks { + /* Bit mask for vlan_tci in big endian */ + uint16_t vlan_tci_mask; + /* Bit mask for ipv4 flow in big endian. */ + struct rte_pmd_ixgbe_ipv4_flow ipv4_mask; + /* Bit mask for ipv6 flow in big endian. */ + struct rte_pmd_ixgbe_ipv6_flow ipv6_mask; + /* Bit mask for L4 source port in big endian. */ + uint16_t src_port_mask; + /* Bit mask for L4 destination port in big endian. */ + uint16_t dst_port_mask; + /* 6 bit mask for proper 6 bytes of Mac address, bit 0 matches the + * first byte on the wire + */ + uint8_t mac_addr_byte_mask; + /* Bit mask for tunnel ID in big endian. */ + uint32_t tunnel_id_mask; + /* 1 - Match tunnel type, 0 - Ignore tunnel type. */ + uint8_t tunnel_type_mask; +}; + +#define RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN 16 /* Max length of flexbytes. */ + +/* Payload type */ +enum rte_pmd_ixgbe_payload_type { + RTE_PMD_IXGBE_PAYLOAD_UNKNOWN = 0, + RTE_PMD_IXGBE_RAW_PAYLOAD, + RTE_PMD_IXGBE_L2_PAYLOAD, + RTE_PMD_IXGBE_L3_PAYLOAD, + RTE_PMD_IXGBE_L4_PAYLOAD, + RTE_PMD_IXGBE_PAYLOAD_MAX = 8, +}; + +/* A structure used to select bytes extracted from the protocol layers to + * flexible payload for filter + */ +struct rte_pmd_ixgbe_flex_payload_cfg { + enum rte_pmd_ixgbe_payload_type type; /* Payload type */ + uint16_t src_offset[RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN]; + /* Offset in bytes from the beginning of packet's payload + * src_offset[i] indicates the flexbyte i's offset in original + * packet payload. + */ +}; + +/* A structure used to define FDIR masks for flexible payload + * for each flow type + */ +struct rte_pmd_ixgbe_fdir_flex_mask { + uint16_t flow_type; + uint8_t mask[RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN]; + /* Mask for the whole flexible payload */ +}; + +/* A structure used to define all flexible payload related setting + * include flex payload and flex mask + */ +struct rte_pmd_ixgbe_fdir_flex_conf { + uint16_t nb_payloads; /* The number of following payload cfg */ + uint16_t nb_flexmasks; /* The number of following mask */ + struct rte_pmd_ixgbe_flex_payload_cfg flex_set[RTE_PMD_IXGBE_PAYLOAD_MAX]; + /* Flex payload configuration for each payload type */ + struct rte_pmd_ixgbe_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX]; + /* Flex mask configuration for each flow type */ +}; + +#define RTE_PMD_IXGBE_UINT64_BIT (CHAR_BIT * sizeof(uint64_t)) +#define RTE_PMD_IXGBE_FLOW_MASK_ARRAY_SIZE \ + (RTE_ALIGN(RTE_ETH_FLOW_MAX, RTE_PMD_IXGBE_UINT64_BIT) \ + / RTE_PMD_IXGBE_UINT64_BIT) + +struct rte_pmd_ixgbe_fdir_info { + enum rte_pmd_ixgbe_fdir_mode mode; /* Flow director mode */ + struct rte_pmd_ixgbe_fdir_masks mask; + /* Flex payload configuration information */ + struct rte_pmd_ixgbe_fdir_flex_conf flex_conf; + uint32_t guarant_spc; /* Guaranteed spaces.*/ + uint32_t best_spc; /* Best effort spaces.*/ + /* Bit mask for every supported flow type. */ + uint64_t flow_types_mask[RTE_PMD_IXGBE_FLOW_MASK_ARRAY_SIZE]; + uint32_t max_flexpayload; /**< Total flex payload in bytes. */ + /* Flexible payload unit in bytes. Size and alignments of all flex + * payload segments should be multiplies of this value. + */ + uint32_t flex_payload_unit; + /* Max number of flexible payload continuous segments. + * Each segment should be a multiple of flex_payload_unit. + */ + uint32_t max_flex_payload_segment_num; + /* Maximum src_offset in bytes allowed. It indicates that + * src_offset[i] in struct rte_eth_flex_payload_cfg should be less + *than this value. + */ + uint16_t flex_payload_limit; + /* Flex bitmask unit in bytes. Size of flex bitmasks should be a + * multiply of this value. + */ + uint32_t flex_bitmask_unit; + /* Max supported size of flex bitmasks in flex_bitmask_unit */ + uint32_t max_flex_bitmask_num; +}; + /** * Get port fdir info * @@ -761,4 +893,44 @@ __rte_experimental int rte_pmd_ixgbe_get_fdir_stats(uint16_t port, struct rte_eth_fdir_stats *fdir_stats); + + +enum rte_pmd_ixgbe_fdir_status_mode { + RTE_PMD_IXGBE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */ + /* Only report FDIR hash for matching pkts. */ + RTE_PMD_IXGBE_FDIR_REPORT_STATUS, + RTE_PMD_IXGBE_FDIR_REPORT_STATUS_ALWAYS, /* Always report FDIR hash. */ +}; + +enum rte_pmd_ixgbe_fdir_pballoc_type { + RTE_PMD_IXGBE_FDIR_PBALLOC_NONE = 0, + RTE_PMD_IXGBE_FDIR_PBALLOC_64K = 1, + RTE_PMD_IXGBE_FDIR_PBALLOC_128K = 2, + RTE_PMD_IXGBE_FDIR_PBALLOC_256K = 3, +}; + +struct rte_pmd_ixgbe_fdir_conf { + enum rte_pmd_ixgbe_fdir_mode mode; /* Flow Director mode. */ + /* Space for FDIR filters. */ + enum rte_pmd_ixgbe_fdir_pballoc_type pballoc; + /* How to report FDIR hash. */ + enum rte_pmd_ixgbe_fdir_status_mode status; + /* RX queue of packets matching a "drop" filter in perfect mode. */ + uint8_t drop_queue; + struct rte_pmd_ixgbe_fdir_masks mask; + /* Flex payload configuration. */ + struct rte_pmd_ixgbe_fdir_flex_conf flex_conf; +}; + +/** + * @param conf + * Flow Director configuration. + * @return + * - (0) if successful. + * - (-EINVAL) if bad parameter. + */ +__rte_experimental +int +rte_pmd_ixgbe_update_fdir_conf(uint16_t port, + struct rte_pmd_ixgbe_fdir_conf conf); #endif /* _PMD_IXGBE_H_ */ diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map index 9402802b0..80bd47121 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map +++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -45,4 +45,5 @@ EXPERIMENTAL { rte_pmd_ixgbe_mdio_unlocked_read; rte_pmd_ixgbe_mdio_unlocked_write; rte_pmd_ixgbe_upd_fctrl_sbp; + rte_pmd_ixgbe_update_fdir_conf; }; From patchwork Tue Sep 29 07:53:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chenxu Di X-Patchwork-Id: 79151 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id ECD69A04C0; Tue, 29 Sep 2020 10:11:19 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CCC5D1D945; Tue, 29 Sep 2020 10:10:30 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 1C4011D938 for ; Tue, 29 Sep 2020 10:10:26 +0200 (CEST) IronPort-SDR: YjtQHu5NfaxPPTYFqVn+acCFSqargwmJZoWna4iy+k0OwEoV9Dj3tbrQ3npdnUpxLQKyCNsyRO MMecj5DTMqfA== X-IronPort-AV: E=McAfee;i="6000,8403,9758"; a="159516221" X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="159516221" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Sep 2020 01:10:23 -0700 IronPort-SDR: NsU0IVqiPyvg2IceZ9MvH5UVCpqyGYKbIvNnh3yf+FmY/v0QTkfs0MLA1If+2Mc1iBg00xMAZz /4rmuT8zaBhQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,317,1596524400"; d="scan'208";a="345186740" Received: from unknown (HELO localhost.localdomain) ([10.239.255.61]) by fmsmga002.fm.intel.com with ESMTP; 29 Sep 2020 01:10:20 -0700 From: Chenxu Di To: dev@dpdk.org Cc: junyux.jiang@intel.com, shougangx.wang@intel.com, Wenzhuo Lu , Beilei Xing , Chenxu Di Date: Tue, 29 Sep 2020 07:53:30 +0000 Message-Id: <20200929075330.40201-1-chenxux.di@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [RFC 5/5] app/testpmd: add support for ixgbe FDIR configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" As annouced in deprecation.rst, the "rte_eth_conf.fdir_conf" field will be removed, but IXGBE generic flow still needs to depend on these configurations. This patch adds setting FDIR configuration for IXGBE PMD by calling private API. Signed-off-by: Chenxu Di --- app/test-pmd/parameters.c | 63 ++++++++++++++++++++++++++++++++++++++- app/test-pmd/testpmd.c | 28 +++++++++++++++++ app/test-pmd/testpmd.h | 3 ++ 3 files changed, 93 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 8c2aa13dd..f9547a4d2 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -42,6 +42,9 @@ #ifdef RTE_LIBRTE_PMD_BOND #include #endif +#ifdef RTE_LIBRTE_IXGBE_PMD +#include +#endif #include #include "testpmd.h" @@ -935,6 +938,29 @@ launch_args_parse(int argc, char** argv) "none, signature, perfect, perfect-mac-vlan" " or perfect-tunnel\n", optarg); +#ifdef RTE_LIBRTE_IXGBE_PMD + if (!strcmp(optarg, "signature")) + ixgbe_fdir_conf.mode = + RTE_PMD_IXGBE_FDIR_MODE_SIGNATURE; + else if (!strcmp(optarg, "perfect")) + ixgbe_fdir_conf.mode = + RTE_PMD_IXGBE_FDIR_MODE_PERFECT; + else if (!strcmp(optarg, "perfect-mac-vlan")) + ixgbe_fdir_conf.mode = + RTE_PMD_IXGBE_FDIR_MODE_PERFECT_MAC_VLAN; + else if (!strcmp(optarg, "perfect-tunnel")) + ixgbe_fdir_conf.mode = + RTE_PMD_IXGBE_FDIR_MODE_PERFECT_TUNNEL; + else if (!strcmp(optarg, "none")) + ixgbe_fdir_conf.mode = + RTE_PMD_IXGBE_FDIR_MODE_NONE; + else + rte_exit(EXIT_FAILURE, + "pkt-mode-invalid %s invalid - must be: " + "none, signature, perfect, perfect-mac-vlan" + " or perfect-tunnel\n", + optarg); +#endif } if (!strcmp(lgopts[opt_idx].name, "pkt-filter-report-hash")) { @@ -952,6 +978,22 @@ launch_args_parse(int argc, char** argv) "pkt-filter-report-hash %s invalid " "- must be: none or match or always\n", optarg); +#ifdef RTE_LIBRTE_IXGBE_PMD + if (!strcmp(optarg, "none")) + ixgbe_fdir_conf.status = + RTE_PMD_IXGBE_FDIR_NO_REPORT_STATUS; + else if (!strcmp(optarg, "match")) + ixgbe_fdir_conf.status = + RTE_PMD_IXGBE_FDIR_REPORT_STATUS; + else if (!strcmp(optarg, "always")) + ixgbe_fdir_conf.status = + RTE_PMD_IXGBE_FDIR_REPORT_STATUS_ALWAYS; + else + rte_exit(EXIT_FAILURE, + "pkt-filter-report-hash %s invalid " + "- must be: none or match or always\n", + optarg); +#endif } if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) { if (!strcmp(optarg, "64K")) @@ -967,12 +1009,31 @@ launch_args_parse(int argc, char** argv) rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -" " must be: 64K or 128K or 256K\n", optarg); +#ifdef RTE_LIBRTE_IXGBE_PMD + if (!strcmp(optarg, "64K")) + ixgbe_fdir_conf.pballoc = + RTE_PMD_IXGBE_FDIR_PBALLOC_64K; + else if (!strcmp(optarg, "128K")) + ixgbe_fdir_conf.pballoc = + RTE_PMD_IXGBE_FDIR_PBALLOC_128K; + else if (!strcmp(optarg, "256K")) + ixgbe_fdir_conf.pballoc = + RTE_PMD_IXGBE_FDIR_PBALLOC_256K; + else + rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -" + " must be: 64K or 128K or 256K\n", + optarg); +#endif } if (!strcmp(lgopts[opt_idx].name, "pkt-filter-drop-queue")) { n = atoi(optarg); - if (n >= 0) + if (n >= 0) { fdir_conf.drop_queue = (uint8_t) n; +#ifdef RTE_LIBRTE_IXGBE_PMD + ixgbe_fdir_conf.drop_queue = (uint8_t) n; +#endif + } else rte_exit(EXIT_FAILURE, "drop queue %d invalid - must" diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index ee3dd27d8..155063dbc 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -459,6 +459,31 @@ struct rte_fdir_conf fdir_conf = { .drop_queue = 127, }; +#ifdef RTE_LIBRTE_IXGBE_PMD +struct rte_pmd_ixgbe_fdir_conf ixgbe_fdir_conf = { + .mode = RTE_PMD_IXGBE_FDIR_MODE_NONE, + .pballoc = RTE_PMD_IXGBE_FDIR_PBALLOC_64K, + .status = RTE_PMD_IXGBE_FDIR_REPORT_STATUS, + .mask = { + .vlan_tci_mask = 0xFFEF, + .ipv4_mask = { + .src_ip = 0xFFFFFFFF, + .dst_ip = 0xFFFFFFFF, + }, + .ipv6_mask = { + .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + }, + .src_port_mask = 0xFFFF, + .dst_port_mask = 0xFFFF, + .mac_addr_byte_mask = 0xFF, + .tunnel_type_mask = 1, + .tunnel_id_mask = 0xFFFFFFFF, + }, + .drop_queue = 127, +}; +#endif + volatile int test_done = 1; /* stop packet forwarding when set to 1. */ struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; @@ -3342,6 +3367,9 @@ init_port_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; port->dev_conf.fdir_conf = fdir_conf; +#ifdef RTE_LIBRTE_IXGBE_PMD + rte_pmd_ixgbe_update_fdir_conf(pid, ixgbe_fdir_conf); +#endif ret = eth_dev_info_get_print_err(pid, &port->dev_info); if (ret != 0) diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index f139fe7a0..8adb31b4f 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -409,6 +409,9 @@ extern uint8_t bitrate_enabled; #endif extern struct rte_fdir_conf fdir_conf; +#ifdef RTE_LIBRTE_IXGBE_PMD +extern struct rte_pmd_ixgbe_fdir_conf ixgbe_fdir_conf; +#endif /* * Configuration of packet segments used by the "txonly" processing engine.