From patchwork Fri Sep 25 06:05:25 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 7177 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 21EED8E97; Fri, 25 Sep 2015 08:05:49 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 8CD44234 for ; Fri, 25 Sep 2015 08:05:47 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP; 24 Sep 2015 23:05:46 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.17,585,1437462000"; d="scan'208";a="796983321" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga001.fm.intel.com with ESMTP; 24 Sep 2015 23:05:46 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t8P65h73008908; Fri, 25 Sep 2015 14:05:43 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t8P65eEt001139; Fri, 25 Sep 2015 14:05:42 +0800 Received: (from wenzhuol@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t8P65efu001135; Fri, 25 Sep 2015 14:05:40 +0800 From: Wenzhuo Lu To: dev@dpdk.org Date: Fri, 25 Sep 2015 14:05:25 +0800 Message-Id: <1443161125-1035-7-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1443161125-1035-1-git-send-email-wenzhuo.lu@intel.com> References: <1443161125-1035-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH 6/6] ixgbe: implementation for fdir new modes' config X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Implement the new CLIs for fdir mac vlan and cloud modes, including flow_director_filter and flow_director_mask. Set the mask of fdir. Add, delete or update the entities of filter. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.h | 3 + drivers/net/ixgbe/ixgbe_fdir.c | 241 ++++++++++++++++++++++++++++++++------- 2 files changed, 202 insertions(+), 42 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index c3d4f4f..008b64d 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask { uint16_t src_port_mask; uint16_t dst_port_mask; uint16_t flex_bytes_mask; + uint8_t mac_addr_mask; + uint32_t tni_vni_mask; + uint8_t tunnel_type_mask; }; struct ixgbe_hw_fdir_info { diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 5c8b833..a07a7b7 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -105,6 +105,8 @@ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\ } while (0) +#define DEFAULT_VXLAN_PORT 4789 + static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash); static int fdir_set_input_mask_82599(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask); @@ -113,7 +115,8 @@ static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl); static int ixgbe_fdir_filter_to_atr_input( const struct rte_eth_fdir_filter *fdir_filter, - union ixgbe_atr_input *input); + union ixgbe_atr_input *input, + enum rte_fdir_mode mode); static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, uint32_t key); static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, @@ -122,7 +125,8 @@ static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, enum rte_fdir_pballoc_type pballoc); static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, - uint32_t fdircmd, uint32_t fdirhash); + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode); static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, uint32_t fdirhash); @@ -243,9 +247,15 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << IXGBE_FDIRCTRL_FLEX_SHIFT; - if (conf->mode == RTE_FDIR_MODE_PERFECT) { + if (conf->mode >= RTE_FDIR_MODE_PERFECT) { *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + else if (conf->mode == RTE_FDIR_MODE_PERFECT_CLOUD) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); } return 0; @@ -294,8 +304,18 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev, uint16_t dst_ipv6m = 0; uint16_t src_ipv6m = 0; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + PMD_INIT_FUNC_TRACE(); + /* set the default UDP port for VxLAN */ + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT); + + /* some bits must be set for mac vlan or cloud mode */ + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN + || mode == RTE_FDIR_MODE_PERFECT_CLOUD) + fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P; + /* * Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that @@ -323,26 +343,36 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask, - input_mask->src_port_mask); - - /* write all the same so that UDP, TCP and SCTP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); - info->mask.src_port_mask = input_mask->src_port_mask; - info->mask.dst_port_mask = input_mask->dst_port_mask; + if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode != RTE_FDIR_MODE_PERFECT_CLOUD) { + /* + * store the TCP/UDP port masks, + * bit reversed from port layout + */ + fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask, + input_mask->src_port_mask); - /* Store source and destination IPv4 masks (big-endian) */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip)); - IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip)); - info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; - info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; + /* + * write all the same so that UDP, + * TCP and SCTP use the same mask + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + info->mask.src_port_mask = input_mask->src_port_mask; + info->mask.dst_port_mask = input_mask->dst_port_mask; + + /* Store source and destination IPv4 masks (big-endian) */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, + ~(input_mask->ipv4_mask.src_ip)); + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, + ~(input_mask->ipv4_mask.dst_ip)); + info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; + info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; + } - if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { + if (mode == RTE_FDIR_MODE_SIGNATURE) { /* - * IPv6 mask is only meaningful in signature mode * Store source and destination IPv6 masks (bit reversed) */ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m); @@ -354,6 +384,69 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev, info->mask.dst_ipv6_mask = dst_ipv6m; } + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN + || mode == RTE_FDIR_MODE_PERFECT_CLOUD) { + fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE | + IXGBE_FDIRIP6M_TNI_VNI; + + switch (input_mask->mac_addr_mask & 0xFF) { + case 0x00: + /* Mask inner MAC */ + fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC; + break; + case 0xFF: + break; + default: + PMD_INIT_LOG(ERR, "invalid mac_addr_mask"); + return -EINVAL; + } + info->mask.mac_addr_mask = input_mask->mac_addr_mask; + + if (mode == RTE_FDIR_MODE_PERFECT_CLOUD) { + switch (input_mask->tunnel_type_mask) { + case 0: + /* Mask turnnel type */ + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + break; + case 1: + break; + default: + PMD_INIT_LOG(ERR, "invalid tunnel_type_mask"); + return -EINVAL; + } + info->mask.tunnel_type_mask = + input_mask->tunnel_type_mask; + + switch (input_mask->tni_vni_mask & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + PMD_INIT_LOG(ERR, "invalid tni_vni_mask"); + return -EINVAL; + } + info->mask.tni_vni_mask = + input_mask->tni_vni_mask; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + } + return IXGBE_SUCCESS; } @@ -431,6 +524,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) int err; uint32_t fdirctrl, pbsize; int i; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; PMD_INIT_FUNC_TRACE(); @@ -440,6 +534,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) hw->mac.type != ixgbe_mac_X550EM_x) return -ENOSYS; + /* x550 supports mac-vlan and cloud mode but others not */ + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) + return -ENOSYS; + err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); if (err) return err; @@ -488,7 +589,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) */ static int ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, - union ixgbe_atr_input *input) + union ixgbe_atr_input *input, enum rte_fdir_mode mode) { input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci; input->formatted.flex_bytes = (uint16_t)( @@ -521,8 +622,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6; break; default: - PMD_DRV_LOG(ERR, " Error on flow_type input"); - return -EINVAL; + break; } switch (fdir_filter->input.flow_type) { @@ -558,8 +658,23 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, sizeof(input->formatted.dst_ip)); break; default: - PMD_DRV_LOG(ERR, " Error on flow_type input"); - return -EINVAL; + break; + } + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + } else if (mode == RTE_FDIR_MODE_PERFECT_CLOUD) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.cloud_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + input->formatted.tunnel_type = + fdir_filter->input.flow.cloud_flow.tunnel_type; + input->formatted.tni_vni = + fdir_filter->input.flow.cloud_flow.tni_vni; } return 0; @@ -743,20 +858,51 @@ atr_compute_sig_hash_82599(union ixgbe_atr_input *input, static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, uint8_t queue, - uint32_t fdircmd, uint32_t fdirhash) + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode) { uint32_t fdirport, fdirvlan; + u32 addr_low, addr_high; + u32 cloud_type = 0; int err = 0; - /* record the IPv4 address (big-endian) */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); - - /* record source and destination port (little-endian)*/ - fdirport = IXGBE_NTOHS(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= IXGBE_NTOHS(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + if (mode == RTE_FDIR_MODE_PERFECT) { + /* record the IPv4 address (big-endian) */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } else { + /* for mac vlan and cloud modes */ + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); + } else { + /* cloud mode */ + if (input->formatted.tunnel_type != + RTE_FDIR_TUNNEL_TYPE_NVGRE) + cloud_type = 0x80000000; + cloud_type |= addr_high; + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), + input->formatted.tni_vni); + } + } /* record vlan (little-endian) and flex_bytes(big-endian) */ fdirvlan = input->formatted.flex_bytes; @@ -917,12 +1063,13 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, return -ENOTSUP; } - if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) + if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT) is_perfect = TRUE; memset(&input, 0, sizeof(input)); - err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input); + err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input, + dev->data->dev_conf.fdir_conf.mode); if (err) return err; @@ -966,7 +1113,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, if (is_perfect) { err = fdir_write_perfect_filter_82599(hw, &input, queue, - fdircmd_flags, fdirhash); + fdircmd_flags, fdirhash, + dev->data->dev_conf.fdir_conf.mode); } else { err = fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags, fdirhash); @@ -1018,7 +1166,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; max_num = (1 << (FDIRENTRIES_NUM_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); - if (fdir_info->mode == RTE_FDIR_MODE_PERFECT) + if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT) fdir_info->guarant_spc = max_num; else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE) fdir_info->guarant_spc = max_num * 4; @@ -1032,11 +1180,20 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info fdir_info->mask.ipv6_mask.dst_ip); fdir_info->mask.src_port_mask = info->mask.src_port_mask; fdir_info->mask.dst_port_mask = info->mask.dst_port_mask; + fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask; + fdir_info->mask.tni_vni_mask = info->mask.tni_vni_mask; + fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask; fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN; - fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; + + if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN + || fdir_info->mode == RTE_FDIR_MODE_PERFECT_CLOUD) + fdir_info->flow_types_mask[0] = 0; + else + fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; + fdir_info->flex_payload_unit = sizeof(uint16_t); fdir_info->max_flex_payload_segment_num = 1; - fdir_info->flex_payload_limit = 62; + fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF; fdir_info->flex_conf.nb_payloads = 1; fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD; fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; @@ -1095,7 +1252,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); max_num = (1 << (FDIRENTRIES_NUM_SHIFT + (reg & FDIRCTRL_PBALLOC_MASK))); - if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) + if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT) fdir_stats->guarant_cnt = max_num - fdir_stats->free; else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;