[dpdk-dev,v2,6/6] ixgbe: implementation for fdir new modes' config

Message ID 1443504682-17752-7-git-send-email-wenzhuo.lu@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Wenzhuo Lu Sept. 29, 2015, 5:31 a.m. UTC
  Implement the new CLIs for fdir mac vlan and tunnel modes, including
flow_director_filter and flow_director_mask. Set the mask of fdir.
Add, delete or update the entities of filter.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
 drivers/net/ixgbe/ixgbe_fdir.c   | 241 ++++++++++++++++++++++++++++++++-------
 2 files changed, 202 insertions(+), 42 deletions(-)
  

Comments

Ananyev, Konstantin Oct. 20, 2015, 1:55 p.m. UTC | #1
Hi Wenzhuo,
Few questions/comments from me, see below.
Thanks
Konstantin

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> Sent: Tuesday, September 29, 2015 6:31 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new modes' config
> 
> Implement the new CLIs for fdir mac vlan and tunnel modes, including
> flow_director_filter and flow_director_mask. Set the mask of fdir.
> Add, delete or update the entities of filter.
> 
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
>  drivers/net/ixgbe/ixgbe_fdir.c   | 241 ++++++++++++++++++++++++++++++++-------
>  2 files changed, 202 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
> index c3d4f4f..9cc45a0 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> @@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask {
>  	uint16_t src_port_mask;
>  	uint16_t dst_port_mask;
>  	uint16_t flex_bytes_mask;
> +	uint8_t  mac_addr_mask;
> +	uint32_t tunnel_id_mask;
> +	uint8_t  tunnel_type_mask;
>  };
> 
>  struct ixgbe_hw_fdir_info {
> diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
> index 5c8b833..87e7081 100644
> --- a/drivers/net/ixgbe/ixgbe_fdir.c
> +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> @@ -105,6 +105,8 @@
>  	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
>  } while (0)
> 
> +#define DEFAULT_VXLAN_PORT 4789
> +
>  static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
>  static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
>  		const struct rte_eth_fdir_masks *input_mask);
> @@ -113,7 +115,8 @@ static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
>  static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
>  static int ixgbe_fdir_filter_to_atr_input(
>  		const struct rte_eth_fdir_filter *fdir_filter,
> -		union ixgbe_atr_input *input);
> +		union ixgbe_atr_input *input,
> +		enum rte_fdir_mode mode);
>  static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
>  				 uint32_t key);
>  static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
> @@ -122,7 +125,8 @@ static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
>  		enum rte_fdir_pballoc_type pballoc);
>  static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
>  			union ixgbe_atr_input *input, uint8_t queue,
> -			uint32_t fdircmd, uint32_t fdirhash);
> +			uint32_t fdircmd, uint32_t fdirhash,
> +			enum rte_fdir_mode mode);
>  static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
>  		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
>  		uint32_t fdirhash);
> @@ -243,9 +247,15 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
>  	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
>  		     IXGBE_FDIRCTRL_FLEX_SHIFT;
> 
> -	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
> +	if (conf->mode >= RTE_FDIR_MODE_PERFECT) {

I think better  if (conf->mode >= RTE_FDIR_MODE_PERFECT  && conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
To make sure that future expansion of RTE_FDIR_MODE_* wouldn't break that code.

>  		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
>  		*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
> +		if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
> +					<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> +		else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
> +					<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
>  	}
> 
>  	return 0;
> @@ -294,8 +304,18 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
>  	uint16_t dst_ipv6m = 0;
>  	uint16_t src_ipv6m = 0;
> 
> +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> +
>  	PMD_INIT_FUNC_TRACE();
> 
> +	/* set the default UDP port for VxLAN */
> +	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);

Hmm, why is that done by default?
As I understand it is x550 specific register and is not present in older HW (82599), no?

> +
> +	/* some bits must be set for mac vlan or tunnel mode */
> +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
> +
>  	/*
>  	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
>  	 * are zero, then assume a full mask for that field. Also assume that
> @@ -323,26 +343,36 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> 
> -	/* store the TCP/UDP port masks, bit reversed from port layout */
> -	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
> -					 input_mask->src_port_mask);
> -
> -	/* write all the same so that UDP, TCP and SCTP use the same mask */
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> -	info->mask.src_port_mask = input_mask->src_port_mask;
> -	info->mask.dst_port_mask = input_mask->dst_port_mask;
> +	if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
> +		mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
> +		/*
> +		 * store the TCP/UDP port masks,
> +		 * bit reversed from port layout
> +		 */
> +		fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
> +						 input_mask->src_port_mask);
> 
> -	/* Store source and destination IPv4 masks (big-endian) */
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
> -	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> -	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> +		/*
> +		 * write all the same so that UDP,
> +		 * TCP and SCTP use the same mask
> +		 */
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> +		info->mask.src_port_mask = input_mask->src_port_mask;
> +		info->mask.dst_port_mask = input_mask->dst_port_mask;
> +
> +		/* Store source and destination IPv4 masks (big-endian) */
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M,
> +				~(input_mask->ipv4_mask.src_ip));
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M,
> +				~(input_mask->ipv4_mask.dst_ip));
> +		info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> +		info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> +	}
> 
> -	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
> +	if (mode == RTE_FDIR_MODE_SIGNATURE) {
>  		/*
> -		 * IPv6 mask is only meaningful in signature mode
>  		 * Store source and destination IPv6 masks (bit reversed)
>  		 */
>  		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
> @@ -354,6 +384,69 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
>  		info->mask.dst_ipv6_mask = dst_ipv6m;
>  	}
> 
> +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> +		fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
> +		fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
> +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> +			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
> +					IXGBE_FDIRIP6M_TNI_VNI;
> +
> +		switch (input_mask->mac_addr_mask & 0xFF) {
> +		case 0x00:
> +			/* Mask inner MAC */
> +			fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
> +			break;
> +		case 0xFF:
> +			break;
> +		default:
> +			PMD_INIT_LOG(ERR, "invalid mac_addr_mask");
> +			return -EINVAL;

I thought it is possible to mask any byte in MAC...
Am I missing something here? 

> +		}
> +		info->mask.mac_addr_mask = input_mask->mac_addr_mask;
> +
> +		if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> +			switch (input_mask->tunnel_type_mask) {
> +			case 0:
> +				/* Mask turnnel type */
> +				fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
> +				break;
> +			case 1:
> +				break;
> +			default:
> +				PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
> +				return -EINVAL;
> +			}
> +			info->mask.tunnel_type_mask =
> +				input_mask->tunnel_type_mask;
> +
> +			switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
> +			case 0x0:
> +				/* Mask vxlan id */
> +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
> +				break;
> +			case 0x00FFFFFF:
> +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
> +				break;
> +			case 0xFFFFFFFF:
> +				break;
> +			default:
> +				PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
> +				return -EINVAL;
> +			}
> +			info->mask.tunnel_id_mask =
> +				input_mask->tunnel_id_mask;
> +		}
> +
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
> +
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
> +	}

Probably worth to put into a separate function: fdir_set_input_mask_x550() or something.

> +
>  	return IXGBE_SUCCESS;
>  }
> 
> @@ -431,6 +524,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
>  	int err;
>  	uint32_t fdirctrl, pbsize;
>  	int i;
> +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> 
>  	PMD_INIT_FUNC_TRACE();
> 
> @@ -440,6 +534,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
>  		hw->mac.type != ixgbe_mac_X550EM_x)
>  		return -ENOSYS;
> 
> +	/* x550 supports mac-vlan and tunnel mode but other NICs not */
> +	if (hw->mac.type != ixgbe_mac_X550 &&
> +		hw->mac.type != ixgbe_mac_X550EM_x &&
> +		mode != RTE_FDIR_MODE_SIGNATURE &&
> +		mode != RTE_FDIR_MODE_PERFECT)
> +		return -ENOSYS;
> +
>  	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
>  	if (err)
>  		return err;
> @@ -488,7 +589,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
>   */
>  static int
>  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> -		union ixgbe_atr_input *input)
> +		union ixgbe_atr_input *input, enum rte_fdir_mode mode)
>  {
>  	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
>  	input->formatted.flex_bytes = (uint16_t)(
> @@ -521,8 +622,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
>  		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
>  		break;
>  	default:
> -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> -		return -EINVAL;
> +		break;
>  	}
> 
>  	switch (fdir_filter->input.flow_type) {
> @@ -558,8 +658,23 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
>  			   sizeof(input->formatted.dst_ip));
>  		break;
>  	default:
> -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> -		return -EINVAL;
> +		break;
> +	}
> +
> +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> +		rte_memcpy(
> +			input->formatted.inner_mac,
> +			fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
> +			sizeof(input->formatted.inner_mac));
> +	} else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> +		rte_memcpy(
> +			input->formatted.inner_mac,
> +			fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
> +			sizeof(input->formatted.inner_mac));
> +		input->formatted.tunnel_type =
> +			fdir_filter->input.flow.tunnel_flow.tunnel_type;
> +		input->formatted.tni_vni =
> +			fdir_filter->input.flow.tunnel_flow.tunnel_id;
>  	}
> 
>  	return 0;
> @@ -743,20 +858,51 @@ atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
>  static int
>  fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
>  			union ixgbe_atr_input *input, uint8_t queue,
> -			uint32_t fdircmd, uint32_t fdirhash)
> +			uint32_t fdircmd, uint32_t fdirhash,
> +			enum rte_fdir_mode mode)
>  {
>  	uint32_t fdirport, fdirvlan;
> +	u32 addr_low, addr_high;
> +	u32 tunnel_type = 0;
>  	int err = 0;
> 
> -	/* record the IPv4 address (big-endian) */
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
> -
> -	/* record source and destination port (little-endian)*/
> -	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> -	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> -	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> -	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> +	if (mode == RTE_FDIR_MODE_PERFECT) {
> +		/* record the IPv4 address (big-endian) */
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
> +				input->formatted.src_ip[0]);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
> +				input->formatted.dst_ip[0]);
> +
> +		/* record source and destination port (little-endian)*/
> +		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> +		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> +		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> +		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> +	} else {
else if (mode == MAC_VLAN || mode == TUNNEL) 

Again, to avoid breakage with future expansions.

> +		/* for mac vlan and tunnel modes */
> +		addr_low = ((u32)input->formatted.inner_mac[0] |
> +			    ((u32)input->formatted.inner_mac[1] << 8) |
> +			    ((u32)input->formatted.inner_mac[2] << 16) |
> +			    ((u32)input->formatted.inner_mac[3] << 24));
> +		addr_high = ((u32)input->formatted.inner_mac[4] |
> +			     ((u32)input->formatted.inner_mac[5] << 8));
> +
> +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
> +		} else {
> +			/* tunnel mode */
> +			if (input->formatted.tunnel_type !=
> +				RTE_FDIR_TUNNEL_TYPE_NVGRE)
> +				tunnel_type = 0x80000000;
> +			tunnel_type |= addr_high;
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
> +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
> +					input->formatted.tni_vni);
> +		}
> +	}
> 
>  	/* record vlan (little-endian) and flex_bytes(big-endian) */
>  	fdirvlan = input->formatted.flex_bytes;
> @@ -917,12 +1063,13 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
>  		return -ENOTSUP;
>  	}
> 
> -	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
> +	if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT)
>  		is_perfect = TRUE;
> 
>  	memset(&input, 0, sizeof(input));
> 
> -	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
> +	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
> +					dev->data->dev_conf.fdir_conf.mode);
>  	if (err)
>  		return err;
> 
> @@ -966,7 +1113,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
> 
>  	if (is_perfect) {
>  		err = fdir_write_perfect_filter_82599(hw, &input, queue,
> -				fdircmd_flags, fdirhash);
> +				fdircmd_flags, fdirhash,
> +				dev->data->dev_conf.fdir_conf.mode);
>  	} else {
>  		err = fdir_add_signature_filter_82599(hw, &input, queue,
>  				fdircmd_flags, fdirhash);
> @@ -1018,7 +1166,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
>  	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
>  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
>  			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
> -	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
> +	if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT)
>  		fdir_info->guarant_spc = max_num;
>  	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
>  		fdir_info->guarant_spc = max_num * 4;
> @@ -1032,11 +1180,20 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
>  			fdir_info->mask.ipv6_mask.dst_ip);
>  	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
>  	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
> +	fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask;
> +	fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
> +	fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
>  	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
> -	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> +
> +	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> +		|| fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> +		fdir_info->flow_types_mask[0] = 0;
> +	else
> +		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> +
>  	fdir_info->flex_payload_unit = sizeof(uint16_t);
>  	fdir_info->max_flex_payload_segment_num = 1;
> -	fdir_info->flex_payload_limit = 62;
> +	fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
>  	fdir_info->flex_conf.nb_payloads = 1;
>  	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
>  	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
> @@ -1095,7 +1252,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
>  	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
>  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
>  			(reg & FDIRCTRL_PBALLOC_MASK)));
> -	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
> +	if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT)
>  			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
>  	else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
>  		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
> --
> 1.9.3
  
Wenzhuo Lu Oct. 21, 2015, 1:48 a.m. UTC | #2
Hi Konstantin,

> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Tuesday, October 20, 2015 9:56 PM
> To: Lu, Wenzhuo; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new
> modes' config
> 
> Hi Wenzhuo,
> Few questions/comments from me, see below.
> Thanks
> Konstantin
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> > Sent: Tuesday, September 29, 2015 6:31 AM
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new
> > modes' config
> >
> > Implement the new CLIs for fdir mac vlan and tunnel modes, including
> > flow_director_filter and flow_director_mask. Set the mask of fdir.
> > Add, delete or update the entities of filter.
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
> >  drivers/net/ixgbe/ixgbe_fdir.c   | 241
> ++++++++++++++++++++++++++++++++-------
> >  2 files changed, 202 insertions(+), 42 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h
> > b/drivers/net/ixgbe/ixgbe_ethdev.h
> > index c3d4f4f..9cc45a0 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> > @@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask {
> >  	uint16_t src_port_mask;
> >  	uint16_t dst_port_mask;
> >  	uint16_t flex_bytes_mask;
> > +	uint8_t  mac_addr_mask;
> > +	uint32_t tunnel_id_mask;
> > +	uint8_t  tunnel_type_mask;
> >  };
> >
> >  struct ixgbe_hw_fdir_info {
> > diff --git a/drivers/net/ixgbe/ixgbe_fdir.c
> > b/drivers/net/ixgbe/ixgbe_fdir.c index 5c8b833..87e7081 100644
> > --- a/drivers/net/ixgbe/ixgbe_fdir.c
> > +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> > @@ -105,6 +105,8 @@
> >  	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\  } while (0)
> >
> > +#define DEFAULT_VXLAN_PORT 4789
> > +
> >  static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t
> > fdirhash);  static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
> >  		const struct rte_eth_fdir_masks *input_mask); @@ -113,7
> +115,8 @@
> > static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,  static
> > int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);  static
> > int ixgbe_fdir_filter_to_atr_input(
> >  		const struct rte_eth_fdir_filter *fdir_filter,
> > -		union ixgbe_atr_input *input);
> > +		union ixgbe_atr_input *input,
> > +		enum rte_fdir_mode mode);
> >  static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input
> *atr_input,
> >  				 uint32_t key);
> >  static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input
> > *input, @@ -122,7 +125,8 @@ static uint32_t
> atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
> >  		enum rte_fdir_pballoc_type pballoc);  static int
> > fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
> >  			union ixgbe_atr_input *input, uint8_t queue,
> > -			uint32_t fdircmd, uint32_t fdirhash);
> > +			uint32_t fdircmd, uint32_t fdirhash,
> > +			enum rte_fdir_mode mode);
> >  static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
> >  		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
> >  		uint32_t fdirhash);
> > @@ -243,9 +247,15 @@ configure_fdir_flags(const struct rte_fdir_conf
> *conf, uint32_t *fdirctrl)
> >  	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t))
> <<
> >  		     IXGBE_FDIRCTRL_FLEX_SHIFT;
> >
> > -	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
> > +	if (conf->mode >= RTE_FDIR_MODE_PERFECT) {
> 
> I think better  if (conf->mode >= RTE_FDIR_MODE_PERFECT  && conf->mode
> <= RTE_FDIR_MODE_PERFECT_TUNNEL) To make sure that future expansion
> of RTE_FDIR_MODE_* wouldn't break that code.
Yes, you're right. I'll change it.

> 
> >  		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
> >  		*fdirctrl |= (conf->drop_queue <<
> IXGBE_FDIRCTRL_DROP_Q_SHIFT);
> > +		if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
> > +					<<
> IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> > +		else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
> > +					<<
> IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> >  	}
> >
> >  	return 0;
> > @@ -294,8 +304,18 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> *dev,
> >  	uint16_t dst_ipv6m = 0;
> >  	uint16_t src_ipv6m = 0;
> >
> > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> > +
> >  	PMD_INIT_FUNC_TRACE();
> >
> > +	/* set the default UDP port for VxLAN */
> > +	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
> 
> Hmm, why is that done by default?
> As I understand it is x550 specific register and is not present in older HW
> (82599), no?
Yes, the older HW doesn't support VxLAN. I'll correct it.

> 
> > +
> > +	/* some bits must be set for mac vlan or tunnel mode */
> > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > +		fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
> > +
> >  	/*
> >  	 * Program the relevant mask registers.  If src/dst_port or
> src/dst_addr
> >  	 * are zero, then assume a full mask for that field. Also assume
> > that @@ -323,26 +343,36 @@ fdir_set_input_mask_82599(struct
> > rte_eth_dev *dev,
> >
> >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> >
> > -	/* store the TCP/UDP port masks, bit reversed from port layout */
> > -	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
> > -					 input_mask->src_port_mask);
> > -
> > -	/* write all the same so that UDP, TCP and SCTP use the same mask
> */
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > -	info->mask.src_port_mask = input_mask->src_port_mask;
> > -	info->mask.dst_port_mask = input_mask->dst_port_mask;
> > +	if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
> > +		mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > +		/*
> > +		 * store the TCP/UDP port masks,
> > +		 * bit reversed from port layout
> > +		 */
> > +		fdirtcpm = reverse_fdir_bitmasks(input_mask-
> >dst_port_mask,
> > +						 input_mask->src_port_mask);
> >
> > -	/* Store source and destination IPv4 masks (big-endian) */
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask-
> >ipv4_mask.src_ip));
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask-
> >ipv4_mask.dst_ip));
> > -	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > -	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > +		/*
> > +		 * write all the same so that UDP,
> > +		 * TCP and SCTP use the same mask
> > +		 */
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > +		info->mask.src_port_mask = input_mask->src_port_mask;
> > +		info->mask.dst_port_mask = input_mask->dst_port_mask;
> > +
> > +		/* Store source and destination IPv4 masks (big-endian) */
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M,
> > +				~(input_mask->ipv4_mask.src_ip));
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M,
> > +				~(input_mask->ipv4_mask.dst_ip));
> > +		info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > +		info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > +	}
> >
> > -	if (dev->data->dev_conf.fdir_conf.mode ==
> RTE_FDIR_MODE_SIGNATURE) {
> > +	if (mode == RTE_FDIR_MODE_SIGNATURE) {
> >  		/*
> > -		 * IPv6 mask is only meaningful in signature mode
> >  		 * Store source and destination IPv6 masks (bit reversed)
> >  		 */
> >  		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip,
> src_ipv6m); @@
> > -354,6 +384,69 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
> >  		info->mask.dst_ipv6_mask = dst_ipv6m;
> >  	}
> >
> > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > +		fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
> > +		fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
> > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > +			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
> > +					IXGBE_FDIRIP6M_TNI_VNI;
> > +
> > +		switch (input_mask->mac_addr_mask & 0xFF) {
> > +		case 0x00:
> > +			/* Mask inner MAC */
> > +			fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
> > +			break;
> > +		case 0xFF:
> > +			break;
> > +		default:
> > +			PMD_INIT_LOG(ERR, "invalid mac_addr_mask");
> > +			return -EINVAL;
> 
> I thought it is possible to mask any byte in MAC...
> Am I missing something here?
Just leverage the behavior of kernel driver. It only supports 0x00 and 0xFF.

> 
> > +		}
> > +		info->mask.mac_addr_mask = input_mask->mac_addr_mask;
> > +
> > +		if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > +			switch (input_mask->tunnel_type_mask) {
> > +			case 0:
> > +				/* Mask turnnel type */
> > +				fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
> > +				break;
> > +			case 1:
> > +				break;
> > +			default:
> > +				PMD_INIT_LOG(ERR, "invalid
> tunnel_type_mask");
> > +				return -EINVAL;
> > +			}
> > +			info->mask.tunnel_type_mask =
> > +				input_mask->tunnel_type_mask;
> > +
> > +			switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
> > +			case 0x0:
> > +				/* Mask vxlan id */
> > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
> > +				break;
> > +			case 0x00FFFFFF:
> > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
> > +				break;
> > +			case 0xFFFFFFFF:
> > +				break;
> > +			default:
> > +				PMD_INIT_LOG(ERR, "invalid
> tunnel_id_mask");
> > +				return -EINVAL;
> > +			}
> > +			info->mask.tunnel_id_mask =
> > +				input_mask->tunnel_id_mask;
> > +		}
> > +
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
> > +
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
> > +	}
> 
> Probably worth to put into a separate function: fdir_set_input_mask_x550()
> or something.
O, seems this function is too long and complex. I'll split it.

> 
> > +
> >  	return IXGBE_SUCCESS;
> >  }
> >
> > @@ -431,6 +524,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> >  	int err;
> >  	uint32_t fdirctrl, pbsize;
> >  	int i;
> > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> >
> >  	PMD_INIT_FUNC_TRACE();
> >
> > @@ -440,6 +534,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> >  		hw->mac.type != ixgbe_mac_X550EM_x)
> >  		return -ENOSYS;
> >
> > +	/* x550 supports mac-vlan and tunnel mode but other NICs not */
> > +	if (hw->mac.type != ixgbe_mac_X550 &&
> > +		hw->mac.type != ixgbe_mac_X550EM_x &&
> > +		mode != RTE_FDIR_MODE_SIGNATURE &&
> > +		mode != RTE_FDIR_MODE_PERFECT)
> > +		return -ENOSYS;
> > +
> >  	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
> >  	if (err)
> >  		return err;
> > @@ -488,7 +589,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> >   */
> >  static int
> >  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> > -		union ixgbe_atr_input *input)
> > +		union ixgbe_atr_input *input, enum rte_fdir_mode mode)
> >  {
> >  	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
> >  	input->formatted.flex_bytes = (uint16_t)( @@ -521,8 +622,7 @@
> > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> >  		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
> >  		break;
> >  	default:
> > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > -		return -EINVAL;
> > +		break;
> >  	}
> >
> >  	switch (fdir_filter->input.flow_type) { @@ -558,8 +658,23 @@
> > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> >  			   sizeof(input->formatted.dst_ip));
> >  		break;
> >  	default:
> > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > -		return -EINVAL;
> > +		break;
> > +	}
> > +
> > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > +		rte_memcpy(
> > +			input->formatted.inner_mac,
> > +			fdir_filter-
> >input.flow.mac_vlan_flow.mac_addr.addr_bytes,
> > +			sizeof(input->formatted.inner_mac));
> > +	} else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > +		rte_memcpy(
> > +			input->formatted.inner_mac,
> > +			fdir_filter-
> >input.flow.tunnel_flow.mac_addr.addr_bytes,
> > +			sizeof(input->formatted.inner_mac));
> > +		input->formatted.tunnel_type =
> > +			fdir_filter->input.flow.tunnel_flow.tunnel_type;
> > +		input->formatted.tni_vni =
> > +			fdir_filter->input.flow.tunnel_flow.tunnel_id;
> >  	}
> >
> >  	return 0;
> > @@ -743,20 +858,51 @@ atr_compute_sig_hash_82599(union
> ixgbe_atr_input
> > *input,  static int  fdir_write_perfect_filter_82599(struct ixgbe_hw
> > *hw,
> >  			union ixgbe_atr_input *input, uint8_t queue,
> > -			uint32_t fdircmd, uint32_t fdirhash)
> > +			uint32_t fdircmd, uint32_t fdirhash,
> > +			enum rte_fdir_mode mode)
> >  {
> >  	uint32_t fdirport, fdirvlan;
> > +	u32 addr_low, addr_high;
> > +	u32 tunnel_type = 0;
> >  	int err = 0;
> >
> > -	/* record the IPv4 address (big-endian) */
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
> > -
> > -	/* record source and destination port (little-endian)*/
> > -	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > -	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > -	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > +	if (mode == RTE_FDIR_MODE_PERFECT) {
> > +		/* record the IPv4 address (big-endian) */
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
> > +				input->formatted.src_ip[0]);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
> > +				input->formatted.dst_ip[0]);
> > +
> > +		/* record source and destination port (little-endian)*/
> > +		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > +		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > +		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > +	} else {
> else if (mode == MAC_VLAN || mode == TUNNEL)
> 
> Again, to avoid breakage with future expansions.
Agree, I'll change it.

> 
> > +		/* for mac vlan and tunnel modes */
> > +		addr_low = ((u32)input->formatted.inner_mac[0] |
> > +			    ((u32)input->formatted.inner_mac[1] << 8) |
> > +			    ((u32)input->formatted.inner_mac[2] << 16) |
> > +			    ((u32)input->formatted.inner_mac[3] << 24));
> > +		addr_high = ((u32)input->formatted.inner_mac[4] |
> > +			     ((u32)input->formatted.inner_mac[5] << 8));
> > +
> > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> addr_low);
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> addr_high);
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
> > +		} else {
> > +			/* tunnel mode */
> > +			if (input->formatted.tunnel_type !=
> > +				RTE_FDIR_TUNNEL_TYPE_NVGRE)
> > +				tunnel_type = 0x80000000;
> > +			tunnel_type |= addr_high;
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> addr_low);
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> tunnel_type);
> > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
> > +					input->formatted.tni_vni);
> > +		}
> > +	}
> >
> >  	/* record vlan (little-endian) and flex_bytes(big-endian) */
> >  	fdirvlan = input->formatted.flex_bytes; @@ -917,12 +1063,13 @@
> > ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
> >  		return -ENOTSUP;
> >  	}
> >
> > -	if (dev->data->dev_conf.fdir_conf.mode ==
> RTE_FDIR_MODE_PERFECT)
> > +	if (dev->data->dev_conf.fdir_conf.mode >=
> RTE_FDIR_MODE_PERFECT)
> >  		is_perfect = TRUE;
> >
> >  	memset(&input, 0, sizeof(input));
> >
> > -	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
> > +	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
> > +					dev->data->dev_conf.fdir_conf.mode);
> >  	if (err)
> >  		return err;
> >
> > @@ -966,7 +1113,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev
> > *dev,
> >
> >  	if (is_perfect) {
> >  		err = fdir_write_perfect_filter_82599(hw, &input, queue,
> > -				fdircmd_flags, fdirhash);
> > +				fdircmd_flags, fdirhash,
> > +				dev->data->dev_conf.fdir_conf.mode);
> >  	} else {
> >  		err = fdir_add_signature_filter_82599(hw, &input, queue,
> >  				fdircmd_flags, fdirhash);
> > @@ -1018,7 +1166,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev,
> struct rte_eth_fdir_info *fdir_info
> >  	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
> >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> >  			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
> > -	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
> > +	if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT)
> >  		fdir_info->guarant_spc = max_num;
> >  	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
> >  		fdir_info->guarant_spc = max_num * 4; @@ -1032,11
> +1180,20 @@
> > ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info
> *fdir_info
> >  			fdir_info->mask.ipv6_mask.dst_ip);
> >  	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
> >  	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
> > +	fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask;
> > +	fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
> > +	fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
> >  	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
> > -	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > +
> > +	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > +		|| fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > +		fdir_info->flow_types_mask[0] = 0;
> > +	else
> > +		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > +
> >  	fdir_info->flex_payload_unit = sizeof(uint16_t);
> >  	fdir_info->max_flex_payload_segment_num = 1;
> > -	fdir_info->flex_payload_limit = 62;
> > +	fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
> >  	fdir_info->flex_conf.nb_payloads = 1;
> >  	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
> >  	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; @@ -1095,7
> > +1252,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct
> rte_eth_fdir_stats *fdir_st
> >  	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
> >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> >  			(reg & FDIRCTRL_PBALLOC_MASK)));
> > -	if (dev->data->dev_conf.fdir_conf.mode ==
> RTE_FDIR_MODE_PERFECT)
> > +	if (dev->data->dev_conf.fdir_conf.mode >=
> RTE_FDIR_MODE_PERFECT)
> >  			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
> >  	else if (dev->data->dev_conf.fdir_conf.mode ==
> RTE_FDIR_MODE_SIGNATURE)
> >  		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
> > --
> > 1.9.3
  
Ananyev, Konstantin Oct. 21, 2015, 10:19 a.m. UTC | #3
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Wednesday, October 21, 2015 2:48 AM
> To: Ananyev, Konstantin; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new modes' config
> 
> Hi Konstantin,
> 
> > -----Original Message-----
> > From: Ananyev, Konstantin
> > Sent: Tuesday, October 20, 2015 9:56 PM
> > To: Lu, Wenzhuo; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new
> > modes' config
> >
> > Hi Wenzhuo,
> > Few questions/comments from me, see below.
> > Thanks
> > Konstantin
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> > > Sent: Tuesday, September 29, 2015 6:31 AM
> > > To: dev@dpdk.org
> > > Subject: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new
> > > modes' config
> > >
> > > Implement the new CLIs for fdir mac vlan and tunnel modes, including
> > > flow_director_filter and flow_director_mask. Set the mask of fdir.
> > > Add, delete or update the entities of filter.
> > >
> > > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > ---
> > >  drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
> > >  drivers/net/ixgbe/ixgbe_fdir.c   | 241
> > ++++++++++++++++++++++++++++++++-------
> > >  2 files changed, 202 insertions(+), 42 deletions(-)
> > >
> > > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h
> > > b/drivers/net/ixgbe/ixgbe_ethdev.h
> > > index c3d4f4f..9cc45a0 100644
> > > --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> > > +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> > > @@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask {
> > >  	uint16_t src_port_mask;
> > >  	uint16_t dst_port_mask;
> > >  	uint16_t flex_bytes_mask;
> > > +	uint8_t  mac_addr_mask;
> > > +	uint32_t tunnel_id_mask;
> > > +	uint8_t  tunnel_type_mask;
> > >  };
> > >
> > >  struct ixgbe_hw_fdir_info {
> > > diff --git a/drivers/net/ixgbe/ixgbe_fdir.c
> > > b/drivers/net/ixgbe/ixgbe_fdir.c index 5c8b833..87e7081 100644
> > > --- a/drivers/net/ixgbe/ixgbe_fdir.c
> > > +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> > > @@ -105,6 +105,8 @@
> > >  	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\  } while (0)
> > >
> > > +#define DEFAULT_VXLAN_PORT 4789
> > > +
> > >  static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t
> > > fdirhash);  static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
> > >  		const struct rte_eth_fdir_masks *input_mask); @@ -113,7
> > +115,8 @@
> > > static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,  static
> > > int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);  static
> > > int ixgbe_fdir_filter_to_atr_input(
> > >  		const struct rte_eth_fdir_filter *fdir_filter,
> > > -		union ixgbe_atr_input *input);
> > > +		union ixgbe_atr_input *input,
> > > +		enum rte_fdir_mode mode);
> > >  static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input
> > *atr_input,
> > >  				 uint32_t key);
> > >  static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input
> > > *input, @@ -122,7 +125,8 @@ static uint32_t
> > atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
> > >  		enum rte_fdir_pballoc_type pballoc);  static int
> > > fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
> > >  			union ixgbe_atr_input *input, uint8_t queue,
> > > -			uint32_t fdircmd, uint32_t fdirhash);
> > > +			uint32_t fdircmd, uint32_t fdirhash,
> > > +			enum rte_fdir_mode mode);
> > >  static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
> > >  		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
> > >  		uint32_t fdirhash);
> > > @@ -243,9 +247,15 @@ configure_fdir_flags(const struct rte_fdir_conf
> > *conf, uint32_t *fdirctrl)
> > >  	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t))
> > <<
> > >  		     IXGBE_FDIRCTRL_FLEX_SHIFT;
> > >
> > > -	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
> > > +	if (conf->mode >= RTE_FDIR_MODE_PERFECT) {
> >
> > I think better  if (conf->mode >= RTE_FDIR_MODE_PERFECT  && conf->mode
> > <= RTE_FDIR_MODE_PERFECT_TUNNEL) To make sure that future expansion
> > of RTE_FDIR_MODE_* wouldn't break that code.
> Yes, you're right. I'll change it.
> 
> >
> > >  		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
> > >  		*fdirctrl |= (conf->drop_queue <<
> > IXGBE_FDIRCTRL_DROP_Q_SHIFT);
> > > +		if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
> > > +					<<
> > IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> > > +		else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
> > > +					<<
> > IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> > >  	}
> > >
> > >  	return 0;
> > > @@ -294,8 +304,18 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> > *dev,
> > >  	uint16_t dst_ipv6m = 0;
> > >  	uint16_t src_ipv6m = 0;
> > >
> > > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> > > +
> > >  	PMD_INIT_FUNC_TRACE();
> > >
> > > +	/* set the default UDP port for VxLAN */
> > > +	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
> >
> > Hmm, why is that done by default?
> > As I understand it is x550 specific register and is not present in older HW
> > (82599), no?
> Yes, the older HW doesn't support VxLAN. I'll correct it.
> 
> >
> > > +
> > > +	/* some bits must be set for mac vlan or tunnel mode */
> > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > +		fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
> > > +
> > >  	/*
> > >  	 * Program the relevant mask registers.  If src/dst_port or
> > src/dst_addr
> > >  	 * are zero, then assume a full mask for that field. Also assume
> > > that @@ -323,26 +343,36 @@ fdir_set_input_mask_82599(struct
> > > rte_eth_dev *dev,
> > >
> > >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> > >
> > > -	/* store the TCP/UDP port masks, bit reversed from port layout */
> > > -	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
> > > -					 input_mask->src_port_mask);
> > > -
> > > -	/* write all the same so that UDP, TCP and SCTP use the same mask
> > */
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > > -	info->mask.src_port_mask = input_mask->src_port_mask;
> > > -	info->mask.dst_port_mask = input_mask->dst_port_mask;
> > > +	if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
> > > +		mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > +		/*
> > > +		 * store the TCP/UDP port masks,
> > > +		 * bit reversed from port layout
> > > +		 */
> > > +		fdirtcpm = reverse_fdir_bitmasks(input_mask-
> > >dst_port_mask,
> > > +						 input_mask->src_port_mask);
> > >
> > > -	/* Store source and destination IPv4 masks (big-endian) */
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask-
> > >ipv4_mask.src_ip));
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask-
> > >ipv4_mask.dst_ip));
> > > -	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > > -	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > > +		/*
> > > +		 * write all the same so that UDP,
> > > +		 * TCP and SCTP use the same mask
> > > +		 */
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > > +		info->mask.src_port_mask = input_mask->src_port_mask;
> > > +		info->mask.dst_port_mask = input_mask->dst_port_mask;
> > > +
> > > +		/* Store source and destination IPv4 masks (big-endian) */
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M,
> > > +				~(input_mask->ipv4_mask.src_ip));
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M,
> > > +				~(input_mask->ipv4_mask.dst_ip));
> > > +		info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > > +		info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > > +	}
> > >
> > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > RTE_FDIR_MODE_SIGNATURE) {
> > > +	if (mode == RTE_FDIR_MODE_SIGNATURE) {
> > >  		/*
> > > -		 * IPv6 mask is only meaningful in signature mode
> > >  		 * Store source and destination IPv6 masks (bit reversed)
> > >  		 */
> > >  		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip,
> > src_ipv6m); @@
> > > -354,6 +384,69 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
> > >  		info->mask.dst_ipv6_mask = dst_ipv6m;
> > >  	}
> > >
> > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > +		fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
> > > +		fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
> > > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > > +			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
> > > +					IXGBE_FDIRIP6M_TNI_VNI;
> > > +
> > > +		switch (input_mask->mac_addr_mask & 0xFF) {
> > > +		case 0x00:
> > > +			/* Mask inner MAC */
> > > +			fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
> > > +			break;
> > > +		case 0xFF:
> > > +			break;
> > > +		default:
> > > +			PMD_INIT_LOG(ERR, "invalid mac_addr_mask");
> > > +			return -EINVAL;
> >
> > I thought it is possible to mask any byte in MAC...
> > Am I missing something here?
> Just leverage the behavior of kernel driver. It only supports 0x00 and 0xFF.

Ok, probably there is a case when we don't need to follow the kernel :)
My take: let's support all masks properly. 

> 
> >
> > > +		}
> > > +		info->mask.mac_addr_mask = input_mask->mac_addr_mask;
> > > +
> > > +		if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > +			switch (input_mask->tunnel_type_mask) {
> > > +			case 0:
> > > +				/* Mask turnnel type */
> > > +				fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
> > > +				break;
> > > +			case 1:
> > > +				break;
> > > +			default:
> > > +				PMD_INIT_LOG(ERR, "invalid
> > tunnel_type_mask");
> > > +				return -EINVAL;
> > > +			}
> > > +			info->mask.tunnel_type_mask =
> > > +				input_mask->tunnel_type_mask;
> > > +
> > > +			switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
> > > +			case 0x0:
> > > +				/* Mask vxlan id */
> > > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
> > > +				break;
> > > +			case 0x00FFFFFF:
> > > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
> > > +				break;
> > > +			case 0xFFFFFFFF:
> > > +				break;
> > > +			default:
> > > +				PMD_INIT_LOG(ERR, "invalid
> > tunnel_id_mask");
> > > +				return -EINVAL;
> > > +			}
> > > +			info->mask.tunnel_id_mask =
> > > +				input_mask->tunnel_id_mask;
> > > +		}
> > > +
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
> > > +
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
> > > +	}
> >
> > Probably worth to put into a separate function: fdir_set_input_mask_x550()
> > or something.
> O, seems this function is too long and complex. I'll split it.
> 
> >
> > > +
> > >  	return IXGBE_SUCCESS;
> > >  }
> > >
> > > @@ -431,6 +524,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > >  	int err;
> > >  	uint32_t fdirctrl, pbsize;
> > >  	int i;
> > > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> > >
> > >  	PMD_INIT_FUNC_TRACE();
> > >
> > > @@ -440,6 +534,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > >  		hw->mac.type != ixgbe_mac_X550EM_x)
> > >  		return -ENOSYS;
> > >
> > > +	/* x550 supports mac-vlan and tunnel mode but other NICs not */
> > > +	if (hw->mac.type != ixgbe_mac_X550 &&
> > > +		hw->mac.type != ixgbe_mac_X550EM_x &&
> > > +		mode != RTE_FDIR_MODE_SIGNATURE &&
> > > +		mode != RTE_FDIR_MODE_PERFECT)
> > > +		return -ENOSYS;
> > > +
> > >  	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
> > >  	if (err)
> > >  		return err;
> > > @@ -488,7 +589,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > >   */
> > >  static int
> > >  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> > > -		union ixgbe_atr_input *input)
> > > +		union ixgbe_atr_input *input, enum rte_fdir_mode mode)
> > >  {
> > >  	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
> > >  	input->formatted.flex_bytes = (uint16_t)( @@ -521,8 +622,7 @@
> > > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> > >  		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
> > >  		break;
> > >  	default:
> > > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > > -		return -EINVAL;
> > > +		break;
> > >  	}
> > >
> > >  	switch (fdir_filter->input.flow_type) { @@ -558,8 +658,23 @@
> > > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
> > >  			   sizeof(input->formatted.dst_ip));
> > >  		break;
> > >  	default:
> > > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > > -		return -EINVAL;
> > > +		break;
> > > +	}
> > > +
> > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > > +		rte_memcpy(
> > > +			input->formatted.inner_mac,
> > > +			fdir_filter-
> > >input.flow.mac_vlan_flow.mac_addr.addr_bytes,
> > > +			sizeof(input->formatted.inner_mac));
> > > +	} else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > +		rte_memcpy(
> > > +			input->formatted.inner_mac,
> > > +			fdir_filter-
> > >input.flow.tunnel_flow.mac_addr.addr_bytes,
> > > +			sizeof(input->formatted.inner_mac));
> > > +		input->formatted.tunnel_type =
> > > +			fdir_filter->input.flow.tunnel_flow.tunnel_type;
> > > +		input->formatted.tni_vni =
> > > +			fdir_filter->input.flow.tunnel_flow.tunnel_id;
> > >  	}
> > >
> > >  	return 0;
> > > @@ -743,20 +858,51 @@ atr_compute_sig_hash_82599(union
> > ixgbe_atr_input
> > > *input,  static int  fdir_write_perfect_filter_82599(struct ixgbe_hw
> > > *hw,
> > >  			union ixgbe_atr_input *input, uint8_t queue,
> > > -			uint32_t fdircmd, uint32_t fdirhash)
> > > +			uint32_t fdircmd, uint32_t fdirhash,
> > > +			enum rte_fdir_mode mode)
> > >  {
> > >  	uint32_t fdirport, fdirvlan;
> > > +	u32 addr_low, addr_high;
> > > +	u32 tunnel_type = 0;
> > >  	int err = 0;
> > >
> > > -	/* record the IPv4 address (big-endian) */
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
> > > -
> > > -	/* record source and destination port (little-endian)*/
> > > -	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > > -	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > > -	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > > +	if (mode == RTE_FDIR_MODE_PERFECT) {
> > > +		/* record the IPv4 address (big-endian) */
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
> > > +				input->formatted.src_ip[0]);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
> > > +				input->formatted.dst_ip[0]);
> > > +
> > > +		/* record source and destination port (little-endian)*/
> > > +		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > > +		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > > +		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > > +	} else {
> > else if (mode == MAC_VLAN || mode == TUNNEL)
> >
> > Again, to avoid breakage with future expansions.
> Agree, I'll change it.
> 
> >
> > > +		/* for mac vlan and tunnel modes */
> > > +		addr_low = ((u32)input->formatted.inner_mac[0] |
> > > +			    ((u32)input->formatted.inner_mac[1] << 8) |
> > > +			    ((u32)input->formatted.inner_mac[2] << 16) |
> > > +			    ((u32)input->formatted.inner_mac[3] << 24));
> > > +		addr_high = ((u32)input->formatted.inner_mac[4] |
> > > +			     ((u32)input->formatted.inner_mac[5] << 8));
> > > +
> > > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> > addr_low);
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> > addr_high);
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
> > > +		} else {
> > > +			/* tunnel mode */
> > > +			if (input->formatted.tunnel_type !=
> > > +				RTE_FDIR_TUNNEL_TYPE_NVGRE)
> > > +				tunnel_type = 0x80000000;
> > > +			tunnel_type |= addr_high;
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> > addr_low);
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> > tunnel_type);
> > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
> > > +					input->formatted.tni_vni);
> > > +		}
> > > +	}
> > >
> > >  	/* record vlan (little-endian) and flex_bytes(big-endian) */
> > >  	fdirvlan = input->formatted.flex_bytes; @@ -917,12 +1063,13 @@
> > > ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
> > >  		return -ENOTSUP;
> > >  	}
> > >
> > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > RTE_FDIR_MODE_PERFECT)
> > > +	if (dev->data->dev_conf.fdir_conf.mode >=
> > RTE_FDIR_MODE_PERFECT)
> > >  		is_perfect = TRUE;
> > >
> > >  	memset(&input, 0, sizeof(input));
> > >
> > > -	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
> > > +	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
> > > +					dev->data->dev_conf.fdir_conf.mode);
> > >  	if (err)
> > >  		return err;
> > >
> > > @@ -966,7 +1113,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev
> > > *dev,
> > >
> > >  	if (is_perfect) {
> > >  		err = fdir_write_perfect_filter_82599(hw, &input, queue,
> > > -				fdircmd_flags, fdirhash);
> > > +				fdircmd_flags, fdirhash,
> > > +				dev->data->dev_conf.fdir_conf.mode);
> > >  	} else {
> > >  		err = fdir_add_signature_filter_82599(hw, &input, queue,
> > >  				fdircmd_flags, fdirhash);
> > > @@ -1018,7 +1166,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev,
> > struct rte_eth_fdir_info *fdir_info
> > >  	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
> > >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> > >  			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
> > > -	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
> > > +	if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT)
> > >  		fdir_info->guarant_spc = max_num;
> > >  	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
> > >  		fdir_info->guarant_spc = max_num * 4; @@ -1032,11
> > +1180,20 @@
> > > ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info
> > *fdir_info
> > >  			fdir_info->mask.ipv6_mask.dst_ip);
> > >  	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
> > >  	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
> > > +	fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask;
> > > +	fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
> > > +	fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
> > >  	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
> > > -	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > > +
> > > +	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > +		|| fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > +		fdir_info->flow_types_mask[0] = 0;
> > > +	else
> > > +		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > > +
> > >  	fdir_info->flex_payload_unit = sizeof(uint16_t);
> > >  	fdir_info->max_flex_payload_segment_num = 1;
> > > -	fdir_info->flex_payload_limit = 62;
> > > +	fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
> > >  	fdir_info->flex_conf.nb_payloads = 1;
> > >  	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
> > >  	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; @@ -1095,7
> > > +1252,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct
> > rte_eth_fdir_stats *fdir_st
> > >  	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
> > >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> > >  			(reg & FDIRCTRL_PBALLOC_MASK)));
> > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > RTE_FDIR_MODE_PERFECT)
> > > +	if (dev->data->dev_conf.fdir_conf.mode >=
> > RTE_FDIR_MODE_PERFECT)
> > >  			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
> > >  	else if (dev->data->dev_conf.fdir_conf.mode ==
> > RTE_FDIR_MODE_SIGNATURE)
> > >  		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
> > > --
> > > 1.9.3
  
Wenzhuo Lu Oct. 22, 2015, 1:23 a.m. UTC | #4
Hi Konstantin,

> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Wednesday, October 21, 2015 6:19 PM
> To: Lu, Wenzhuo; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir new
> modes' config
> 
> 
> 
> > -----Original Message-----
> > From: Lu, Wenzhuo
> > Sent: Wednesday, October 21, 2015 2:48 AM
> > To: Ananyev, Konstantin; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir
> > new modes' config
> >
> > Hi Konstantin,
> >
> > > -----Original Message-----
> > > From: Ananyev, Konstantin
> > > Sent: Tuesday, October 20, 2015 9:56 PM
> > > To: Lu, Wenzhuo; dev@dpdk.org
> > > Subject: RE: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for
> > > fdir new modes' config
> > >
> > > Hi Wenzhuo,
> > > Few questions/comments from me, see below.
> > > Thanks
> > > Konstantin
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wenzhuo Lu
> > > > Sent: Tuesday, September 29, 2015 6:31 AM
> > > > To: dev@dpdk.org
> > > > Subject: [dpdk-dev] [PATCH v2 6/6] ixgbe: implementation for fdir
> > > > new modes' config
> > > >
> > > > Implement the new CLIs for fdir mac vlan and tunnel modes,
> > > > including flow_director_filter and flow_director_mask. Set the mask of
> fdir.
> > > > Add, delete or update the entities of filter.
> > > >
> > > > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > > ---
> > > >  drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
> > > >  drivers/net/ixgbe/ixgbe_fdir.c   | 241
> > > ++++++++++++++++++++++++++++++++-------
> > > >  2 files changed, 202 insertions(+), 42 deletions(-)
> > > >
> > > > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h
> > > > b/drivers/net/ixgbe/ixgbe_ethdev.h
> > > > index c3d4f4f..9cc45a0 100644
> > > > --- a/drivers/net/ixgbe/ixgbe_ethdev.h
> > > > +++ b/drivers/net/ixgbe/ixgbe_ethdev.h
> > > > @@ -133,6 +133,9 @@ struct ixgbe_hw_fdir_mask {
> > > >  	uint16_t src_port_mask;
> > > >  	uint16_t dst_port_mask;
> > > >  	uint16_t flex_bytes_mask;
> > > > +	uint8_t  mac_addr_mask;
> > > > +	uint32_t tunnel_id_mask;
> > > > +	uint8_t  tunnel_type_mask;
> > > >  };
> > > >
> > > >  struct ixgbe_hw_fdir_info {
> > > > diff --git a/drivers/net/ixgbe/ixgbe_fdir.c
> > > > b/drivers/net/ixgbe/ixgbe_fdir.c index 5c8b833..87e7081 100644
> > > > --- a/drivers/net/ixgbe/ixgbe_fdir.c
> > > > +++ b/drivers/net/ixgbe/ixgbe_fdir.c
> > > > @@ -105,6 +105,8 @@
> > > >  	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\  } while
> > > > (0)
> > > >
> > > > +#define DEFAULT_VXLAN_PORT 4789
> > > > +
> > > >  static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t
> > > > fdirhash);  static int fdir_set_input_mask_82599(struct rte_eth_dev
> *dev,
> > > >  		const struct rte_eth_fdir_masks *input_mask); @@ -113,7
> > > +115,8 @@
> > > > static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
> > > > static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t
> > > > fdirctrl);  static int ixgbe_fdir_filter_to_atr_input(
> > > >  		const struct rte_eth_fdir_filter *fdir_filter,
> > > > -		union ixgbe_atr_input *input);
> > > > +		union ixgbe_atr_input *input,
> > > > +		enum rte_fdir_mode mode);
> > > >  static uint32_t ixgbe_atr_compute_hash_82599(union
> > > > ixgbe_atr_input
> > > *atr_input,
> > > >  				 uint32_t key);
> > > >  static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input
> > > > *input, @@ -122,7 +125,8 @@ static uint32_t
> > > atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
> > > >  		enum rte_fdir_pballoc_type pballoc);  static int
> > > > fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
> > > >  			union ixgbe_atr_input *input, uint8_t queue,
> > > > -			uint32_t fdircmd, uint32_t fdirhash);
> > > > +			uint32_t fdircmd, uint32_t fdirhash,
> > > > +			enum rte_fdir_mode mode);
> > > >  static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
> > > >  		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
> > > >  		uint32_t fdirhash);
> > > > @@ -243,9 +247,15 @@ configure_fdir_flags(const struct
> > > > rte_fdir_conf
> > > *conf, uint32_t *fdirctrl)
> > > >  	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t))
> > > <<
> > > >  		     IXGBE_FDIRCTRL_FLEX_SHIFT;
> > > >
> > > > -	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
> > > > +	if (conf->mode >= RTE_FDIR_MODE_PERFECT) {
> > >
> > > I think better  if (conf->mode >= RTE_FDIR_MODE_PERFECT  &&
> > > conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) To make sure that
> future
> > > expansion of RTE_FDIR_MODE_* wouldn't break that code.
> > Yes, you're right. I'll change it.
> >
> > >
> > > >  		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
> > > >  		*fdirctrl |= (conf->drop_queue <<
> > > IXGBE_FDIRCTRL_DROP_Q_SHIFT);
> > > > +		if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > > > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
> > > > +					<<
> > > IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> > > > +		else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > > +			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
> > > > +					<<
> > > IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
> > > >  	}
> > > >
> > > >  	return 0;
> > > > @@ -294,8 +304,18 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> > > *dev,
> > > >  	uint16_t dst_ipv6m = 0;
> > > >  	uint16_t src_ipv6m = 0;
> > > >
> > > > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> > > > +
> > > >  	PMD_INIT_FUNC_TRACE();
> > > >
> > > > +	/* set the default UDP port for VxLAN */
> > > > +	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
> > >
> > > Hmm, why is that done by default?
> > > As I understand it is x550 specific register and is not present in
> > > older HW (82599), no?
> > Yes, the older HW doesn't support VxLAN. I'll correct it.
> >
> > >
> > > > +
> > > > +	/* some bits must be set for mac vlan or tunnel mode */
> > > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > > +		fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
> > > > +
> > > >  	/*
> > > >  	 * Program the relevant mask registers.  If src/dst_port or
> > > src/dst_addr
> > > >  	 * are zero, then assume a full mask for that field. Also assume
> > > > that @@ -323,26 +343,36 @@ fdir_set_input_mask_82599(struct
> > > > rte_eth_dev *dev,
> > > >
> > > >  	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
> > > >
> > > > -	/* store the TCP/UDP port masks, bit reversed from port layout */
> > > > -	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
> > > > -					 input_mask->src_port_mask);
> > > > -
> > > > -	/* write all the same so that UDP, TCP and SCTP use the same mask
> > > */
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > > > -	info->mask.src_port_mask = input_mask->src_port_mask;
> > > > -	info->mask.dst_port_mask = input_mask->dst_port_mask;
> > > > +	if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
> > > > +		mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > > +		/*
> > > > +		 * store the TCP/UDP port masks,
> > > > +		 * bit reversed from port layout
> > > > +		 */
> > > > +		fdirtcpm = reverse_fdir_bitmasks(input_mask-
> > > >dst_port_mask,
> > > > +						 input_mask->src_port_mask);
> > > >
> > > > -	/* Store source and destination IPv4 masks (big-endian) */
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask-
> > > >ipv4_mask.src_ip));
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask-
> > > >ipv4_mask.dst_ip));
> > > > -	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > > > -	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > > > +		/*
> > > > +		 * write all the same so that UDP,
> > > > +		 * TCP and SCTP use the same mask
> > > > +		 */
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
> > > > +		info->mask.src_port_mask = input_mask->src_port_mask;
> > > > +		info->mask.dst_port_mask = input_mask->dst_port_mask;
> > > > +
> > > > +		/* Store source and destination IPv4 masks (big-endian) */
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M,
> > > > +				~(input_mask->ipv4_mask.src_ip));
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M,
> > > > +				~(input_mask->ipv4_mask.dst_ip));
> > > > +		info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
> > > > +		info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
> > > > +	}
> > > >
> > > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > > RTE_FDIR_MODE_SIGNATURE) {
> > > > +	if (mode == RTE_FDIR_MODE_SIGNATURE) {
> > > >  		/*
> > > > -		 * IPv6 mask is only meaningful in signature mode
> > > >  		 * Store source and destination IPv6 masks (bit reversed)
> > > >  		 */
> > > >  		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip,
> > > src_ipv6m); @@
> > > > -354,6 +384,69 @@ fdir_set_input_mask_82599(struct rte_eth_dev
> *dev,
> > > >  		info->mask.dst_ipv6_mask = dst_ipv6m;
> > > >  	}
> > > >
> > > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > > +		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > > +		fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
> > > > +		fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
> > > > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
> > > > +			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
> > > > +					IXGBE_FDIRIP6M_TNI_VNI;
> > > > +
> > > > +		switch (input_mask->mac_addr_mask & 0xFF) {
> > > > +		case 0x00:
> > > > +			/* Mask inner MAC */
> > > > +			fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
> > > > +			break;
> > > > +		case 0xFF:
> > > > +			break;
> > > > +		default:
> > > > +			PMD_INIT_LOG(ERR, "invalid mac_addr_mask");
> > > > +			return -EINVAL;
> > >
> > > I thought it is possible to mask any byte in MAC...
> > > Am I missing something here?
> > Just leverage the behavior of kernel driver. It only supports 0x00 and 0xFF.
> 
> Ok, probably there is a case when we don't need to follow the kernel :) My
> take: let's support all masks properly.
OK. I'll remove this limitation.

> 
> >
> > >
> > > > +		}
> > > > +		info->mask.mac_addr_mask = input_mask->mac_addr_mask;
> > > > +
> > > > +		if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > > +			switch (input_mask->tunnel_type_mask) {
> > > > +			case 0:
> > > > +				/* Mask turnnel type */
> > > > +				fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
> > > > +				break;
> > > > +			case 1:
> > > > +				break;
> > > > +			default:
> > > > +				PMD_INIT_LOG(ERR, "invalid
> > > tunnel_type_mask");
> > > > +				return -EINVAL;
> > > > +			}
> > > > +			info->mask.tunnel_type_mask =
> > > > +				input_mask->tunnel_type_mask;
> > > > +
> > > > +			switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
> > > > +			case 0x0:
> > > > +				/* Mask vxlan id */
> > > > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
> > > > +				break;
> > > > +			case 0x00FFFFFF:
> > > > +				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
> > > > +				break;
> > > > +			case 0xFFFFFFFF:
> > > > +				break;
> > > > +			default:
> > > > +				PMD_INIT_LOG(ERR, "invalid
> > > tunnel_id_mask");
> > > > +				return -EINVAL;
> > > > +			}
> > > > +			info->mask.tunnel_id_mask =
> > > > +				input_mask->tunnel_id_mask;
> > > > +		}
> > > > +
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
> > > > +
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
> > > > +	}
> > >
> > > Probably worth to put into a separate function:
> > > fdir_set_input_mask_x550() or something.
> > O, seems this function is too long and complex. I'll split it.
> >
> > >
> > > > +
> > > >  	return IXGBE_SUCCESS;
> > > >  }
> > > >
> > > > @@ -431,6 +524,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > > >  	int err;
> > > >  	uint32_t fdirctrl, pbsize;
> > > >  	int i;
> > > > +	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
> > > >
> > > >  	PMD_INIT_FUNC_TRACE();
> > > >
> > > > @@ -440,6 +534,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > > >  		hw->mac.type != ixgbe_mac_X550EM_x)
> > > >  		return -ENOSYS;
> > > >
> > > > +	/* x550 supports mac-vlan and tunnel mode but other NICs not */
> > > > +	if (hw->mac.type != ixgbe_mac_X550 &&
> > > > +		hw->mac.type != ixgbe_mac_X550EM_x &&
> > > > +		mode != RTE_FDIR_MODE_SIGNATURE &&
> > > > +		mode != RTE_FDIR_MODE_PERFECT)
> > > > +		return -ENOSYS;
> > > > +
> > > >  	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
> > > >  	if (err)
> > > >  		return err;
> > > > @@ -488,7 +589,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
> > > >   */
> > > >  static int
> > > >  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter
> *fdir_filter,
> > > > -		union ixgbe_atr_input *input)
> > > > +		union ixgbe_atr_input *input, enum rte_fdir_mode mode)
> > > >  {
> > > >  	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
> > > >  	input->formatted.flex_bytes = (uint16_t)( @@ -521,8 +622,7 @@
> > > > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter
> *fdir_filter,
> > > >  		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
> > > >  		break;
> > > >  	default:
> > > > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > > > -		return -EINVAL;
> > > > +		break;
> > > >  	}
> > > >
> > > >  	switch (fdir_filter->input.flow_type) { @@ -558,8 +658,23 @@
> > > > ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter
> *fdir_filter,
> > > >  			   sizeof(input->formatted.dst_ip));
> > > >  		break;
> > > >  	default:
> > > > -		PMD_DRV_LOG(ERR, " Error on flow_type input");
> > > > -		return -EINVAL;
> > > > +		break;
> > > > +	}
> > > > +
> > > > +	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > > > +		rte_memcpy(
> > > > +			input->formatted.inner_mac,
> > > > +			fdir_filter-
> > > >input.flow.mac_vlan_flow.mac_addr.addr_bytes,
> > > > +			sizeof(input->formatted.inner_mac));
> > > > +	} else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
> > > > +		rte_memcpy(
> > > > +			input->formatted.inner_mac,
> > > > +			fdir_filter-
> > > >input.flow.tunnel_flow.mac_addr.addr_bytes,
> > > > +			sizeof(input->formatted.inner_mac));
> > > > +		input->formatted.tunnel_type =
> > > > +			fdir_filter->input.flow.tunnel_flow.tunnel_type;
> > > > +		input->formatted.tni_vni =
> > > > +			fdir_filter->input.flow.tunnel_flow.tunnel_id;
> > > >  	}
> > > >
> > > >  	return 0;
> > > > @@ -743,20 +858,51 @@ atr_compute_sig_hash_82599(union
> > > ixgbe_atr_input
> > > > *input,  static int  fdir_write_perfect_filter_82599(struct
> > > > ixgbe_hw *hw,
> > > >  			union ixgbe_atr_input *input, uint8_t queue,
> > > > -			uint32_t fdircmd, uint32_t fdirhash)
> > > > +			uint32_t fdircmd, uint32_t fdirhash,
> > > > +			enum rte_fdir_mode mode)
> > > >  {
> > > >  	uint32_t fdirport, fdirvlan;
> > > > +	u32 addr_low, addr_high;
> > > > +	u32 tunnel_type = 0;
> > > >  	int err = 0;
> > > >
> > > > -	/* record the IPv4 address (big-endian) */
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
> > > > -
> > > > -	/* record source and destination port (little-endian)*/
> > > > -	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > > > -	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > > > -	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > > > -	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > > > +	if (mode == RTE_FDIR_MODE_PERFECT) {
> > > > +		/* record the IPv4 address (big-endian) */
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
> > > > +				input->formatted.src_ip[0]);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
> > > > +				input->formatted.dst_ip[0]);
> > > > +
> > > > +		/* record source and destination port (little-endian)*/
> > > > +		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
> > > > +		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
> > > > +		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
> > > > +		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
> > > > +	} else {
> > > else if (mode == MAC_VLAN || mode == TUNNEL)
> > >
> > > Again, to avoid breakage with future expansions.
> > Agree, I'll change it.
> >
> > >
> > > > +		/* for mac vlan and tunnel modes */
> > > > +		addr_low = ((u32)input->formatted.inner_mac[0] |
> > > > +			    ((u32)input->formatted.inner_mac[1] << 8) |
> > > > +			    ((u32)input->formatted.inner_mac[2] << 16) |
> > > > +			    ((u32)input->formatted.inner_mac[3] << 24));
> > > > +		addr_high = ((u32)input->formatted.inner_mac[4] |
> > > > +			     ((u32)input->formatted.inner_mac[5] << 8));
> > > > +
> > > > +		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> > > addr_low);
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> > > addr_high);
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
> > > > +		} else {
> > > > +			/* tunnel mode */
> > > > +			if (input->formatted.tunnel_type !=
> > > > +				RTE_FDIR_TUNNEL_TYPE_NVGRE)
> > > > +				tunnel_type = 0x80000000;
> > > > +			tunnel_type |= addr_high;
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0),
> > > addr_low);
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1),
> > > tunnel_type);
> > > > +			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
> > > > +					input->formatted.tni_vni);
> > > > +		}
> > > > +	}
> > > >
> > > >  	/* record vlan (little-endian) and flex_bytes(big-endian) */
> > > >  	fdirvlan = input->formatted.flex_bytes; @@ -917,12 +1063,13 @@
> > > > ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
> > > >  		return -ENOTSUP;
> > > >  	}
> > > >
> > > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > > RTE_FDIR_MODE_PERFECT)
> > > > +	if (dev->data->dev_conf.fdir_conf.mode >=
> > > RTE_FDIR_MODE_PERFECT)
> > > >  		is_perfect = TRUE;
> > > >
> > > >  	memset(&input, 0, sizeof(input));
> > > >
> > > > -	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
> > > > +	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
> > > > +					dev->data->dev_conf.fdir_conf.mode);
> > > >  	if (err)
> > > >  		return err;
> > > >
> > > > @@ -966,7 +1113,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev
> > > > *dev,
> > > >
> > > >  	if (is_perfect) {
> > > >  		err = fdir_write_perfect_filter_82599(hw, &input, queue,
> > > > -				fdircmd_flags, fdirhash);
> > > > +				fdircmd_flags, fdirhash,
> > > > +				dev->data->dev_conf.fdir_conf.mode);
> > > >  	} else {
> > > >  		err = fdir_add_signature_filter_82599(hw, &input, queue,
> > > >  				fdircmd_flags, fdirhash);
> > > > @@ -1018,7 +1166,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev,
> > > struct rte_eth_fdir_info *fdir_info
> > > >  	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
> > > >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> > > >  			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
> > > > -	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
> > > > +	if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT)
> > > >  		fdir_info->guarant_spc = max_num;
> > > >  	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
> > > >  		fdir_info->guarant_spc = max_num * 4; @@ -1032,11
> > > +1180,20 @@
> > > > ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct
> > > > rte_eth_fdir_info
> > > *fdir_info
> > > >  			fdir_info->mask.ipv6_mask.dst_ip);
> > > >  	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
> > > >  	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
> > > > +	fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask;
> > > > +	fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
> > > > +	fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
> > > >  	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
> > > > -	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > > > +
> > > > +	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
> > > > +		|| fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
> > > > +		fdir_info->flow_types_mask[0] = 0;
> > > > +	else
> > > > +		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
> > > > +
> > > >  	fdir_info->flex_payload_unit = sizeof(uint16_t);
> > > >  	fdir_info->max_flex_payload_segment_num = 1;
> > > > -	fdir_info->flex_payload_limit = 62;
> > > > +	fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
> > > >  	fdir_info->flex_conf.nb_payloads = 1;
> > > >  	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
> > > >  	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; @@
> > > > -1095,7
> > > > +1252,7 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct
> > > rte_eth_fdir_stats *fdir_st
> > > >  	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
> > > >  	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
> > > >  			(reg & FDIRCTRL_PBALLOC_MASK)));
> > > > -	if (dev->data->dev_conf.fdir_conf.mode ==
> > > RTE_FDIR_MODE_PERFECT)
> > > > +	if (dev->data->dev_conf.fdir_conf.mode >=
> > > RTE_FDIR_MODE_PERFECT)
> > > >  			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
> > > >  	else if (dev->data->dev_conf.fdir_conf.mode ==
> > > RTE_FDIR_MODE_SIGNATURE)
> > > >  		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
> > > > --
> > > > 1.9.3
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..9cc45a0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -133,6 +133,9 @@  struct ixgbe_hw_fdir_mask {
 	uint16_t src_port_mask;
 	uint16_t dst_port_mask;
 	uint16_t flex_bytes_mask;
+	uint8_t  mac_addr_mask;
+	uint32_t tunnel_id_mask;
+	uint8_t  tunnel_type_mask;
 };
 
 struct ixgbe_hw_fdir_info {
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 5c8b833..87e7081 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -105,6 +105,8 @@ 
 	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
 } while (0)
 
+#define DEFAULT_VXLAN_PORT 4789
+
 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 		const struct rte_eth_fdir_masks *input_mask);
@@ -113,7 +115,8 @@  static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static int ixgbe_fdir_filter_to_atr_input(
 		const struct rte_eth_fdir_filter *fdir_filter,
-		union ixgbe_atr_input *input);
+		union ixgbe_atr_input *input,
+		enum rte_fdir_mode mode);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 				 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
@@ -122,7 +125,8 @@  static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 		enum rte_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
-			uint32_t fdircmd, uint32_t fdirhash);
+			uint32_t fdircmd, uint32_t fdirhash,
+			enum rte_fdir_mode mode);
 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
 		uint32_t fdirhash);
@@ -243,9 +247,15 @@  configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
 	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
 		     IXGBE_FDIRCTRL_FLEX_SHIFT;
 
-	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+	if (conf->mode >= RTE_FDIR_MODE_PERFECT) {
 		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
 		*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+		if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
+					<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+		else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+			*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
+					<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
 	}
 
 	return 0;
@@ -294,8 +304,18 @@  fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 	uint16_t dst_ipv6m = 0;
 	uint16_t src_ipv6m = 0;
 
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
 	PMD_INIT_FUNC_TRACE();
 
+	/* set the default UDP port for VxLAN */
+	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+
+	/* some bits must be set for mac vlan or tunnel mode */
+	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
+		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
+
 	/*
 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
 	 * are zero, then assume a full mask for that field. Also assume that
@@ -323,26 +343,36 @@  fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
-	/* store the TCP/UDP port masks, bit reversed from port layout */
-	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
-					 input_mask->src_port_mask);
-
-	/* write all the same so that UDP, TCP and SCTP use the same mask */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
-	info->mask.src_port_mask = input_mask->src_port_mask;
-	info->mask.dst_port_mask = input_mask->dst_port_mask;
+	if (mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+		mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
+		/*
+		 * store the TCP/UDP port masks,
+		 * bit reversed from port layout
+		 */
+		fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
+						 input_mask->src_port_mask);
 
-	/* Store source and destination IPv4 masks (big-endian) */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
-	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
-	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+		/*
+		 * write all the same so that UDP,
+		 * TCP and SCTP use the same mask
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+		info->mask.src_port_mask = input_mask->src_port_mask;
+		info->mask.dst_port_mask = input_mask->dst_port_mask;
+
+		/* Store source and destination IPv4 masks (big-endian) */
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M,
+				~(input_mask->ipv4_mask.src_ip));
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M,
+				~(input_mask->ipv4_mask.dst_ip));
+		info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+		info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+	}
 
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+	if (mode == RTE_FDIR_MODE_SIGNATURE) {
 		/*
-		 * IPv6 mask is only meaningful in signature mode
 		 * Store source and destination IPv6 masks (bit reversed)
 		 */
 		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
@@ -354,6 +384,69 @@  fdir_set_input_mask_82599(struct rte_eth_dev *dev,
 		info->mask.dst_ipv6_mask = dst_ipv6m;
 	}
 
+	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
+		|| mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+		fdiripv6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+		fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+			fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
+					IXGBE_FDIRIP6M_TNI_VNI;
+
+		switch (input_mask->mac_addr_mask & 0xFF) {
+		case 0x00:
+			/* Mask inner MAC */
+			fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+			break;
+		case 0xFF:
+			break;
+		default:
+			PMD_INIT_LOG(ERR, "invalid mac_addr_mask");
+			return -EINVAL;
+		}
+		info->mask.mac_addr_mask = input_mask->mac_addr_mask;
+
+		if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+			switch (input_mask->tunnel_type_mask) {
+			case 0:
+				/* Mask turnnel type */
+				fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+				break;
+			case 1:
+				break;
+			default:
+				PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
+				return -EINVAL;
+			}
+			info->mask.tunnel_type_mask =
+				input_mask->tunnel_type_mask;
+
+			switch (input_mask->tunnel_id_mask & 0xFFFFFFFF) {
+			case 0x0:
+				/* Mask vxlan id */
+				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
+				break;
+			case 0x00FFFFFF:
+				fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+				break;
+			case 0xFFFFFFFF:
+				break;
+			default:
+				PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
+				return -EINVAL;
+			}
+			info->mask.tunnel_id_mask =
+				input_mask->tunnel_id_mask;
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
+
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+	}
+
 	return IXGBE_SUCCESS;
 }
 
@@ -431,6 +524,7 @@  ixgbe_fdir_configure(struct rte_eth_dev *dev)
 	int err;
 	uint32_t fdirctrl, pbsize;
 	int i;
+	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -440,6 +534,13 @@  ixgbe_fdir_configure(struct rte_eth_dev *dev)
 		hw->mac.type != ixgbe_mac_X550EM_x)
 		return -ENOSYS;
 
+	/* x550 supports mac-vlan and tunnel mode but other NICs not */
+	if (hw->mac.type != ixgbe_mac_X550 &&
+		hw->mac.type != ixgbe_mac_X550EM_x &&
+		mode != RTE_FDIR_MODE_SIGNATURE &&
+		mode != RTE_FDIR_MODE_PERFECT)
+		return -ENOSYS;
+
 	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
 	if (err)
 		return err;
@@ -488,7 +589,7 @@  ixgbe_fdir_configure(struct rte_eth_dev *dev)
  */
 static int
 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
-		union ixgbe_atr_input *input)
+		union ixgbe_atr_input *input, enum rte_fdir_mode mode)
 {
 	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
 	input->formatted.flex_bytes = (uint16_t)(
@@ -521,8 +622,7 @@  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
 		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
 		break;
 	default:
-		PMD_DRV_LOG(ERR, " Error on flow_type input");
-		return -EINVAL;
+		break;
 	}
 
 	switch (fdir_filter->input.flow_type) {
@@ -558,8 +658,23 @@  ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
 			   sizeof(input->formatted.dst_ip));
 		break;
 	default:
-		PMD_DRV_LOG(ERR, " Error on flow_type input");
-		return -EINVAL;
+		break;
+	}
+
+	if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+		rte_memcpy(
+			input->formatted.inner_mac,
+			fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
+			sizeof(input->formatted.inner_mac));
+	} else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+		rte_memcpy(
+			input->formatted.inner_mac,
+			fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
+			sizeof(input->formatted.inner_mac));
+		input->formatted.tunnel_type =
+			fdir_filter->input.flow.tunnel_flow.tunnel_type;
+		input->formatted.tni_vni =
+			fdir_filter->input.flow.tunnel_flow.tunnel_id;
 	}
 
 	return 0;
@@ -743,20 +858,51 @@  atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
 static int
 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 			union ixgbe_atr_input *input, uint8_t queue,
-			uint32_t fdircmd, uint32_t fdirhash)
+			uint32_t fdircmd, uint32_t fdirhash,
+			enum rte_fdir_mode mode)
 {
 	uint32_t fdirport, fdirvlan;
+	u32 addr_low, addr_high;
+	u32 tunnel_type = 0;
 	int err = 0;
 
-	/* record the IPv4 address (big-endian) */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
-	/* record source and destination port (little-endian)*/
-	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
-	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
-	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+	if (mode == RTE_FDIR_MODE_PERFECT) {
+		/* record the IPv4 address (big-endian) */
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA,
+				input->formatted.src_ip[0]);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA,
+				input->formatted.dst_ip[0]);
+
+		/* record source and destination port (little-endian)*/
+		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+	} else {
+		/* for mac vlan and tunnel modes */
+		addr_low = ((u32)input->formatted.inner_mac[0] |
+			    ((u32)input->formatted.inner_mac[1] << 8) |
+			    ((u32)input->formatted.inner_mac[2] << 16) |
+			    ((u32)input->formatted.inner_mac[3] << 24));
+		addr_high = ((u32)input->formatted.inner_mac[4] |
+			     ((u32)input->formatted.inner_mac[5] << 8));
+
+		if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
+		} else {
+			/* tunnel mode */
+			if (input->formatted.tunnel_type !=
+				RTE_FDIR_TUNNEL_TYPE_NVGRE)
+				tunnel_type = 0x80000000;
+			tunnel_type |= addr_high;
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
+					input->formatted.tni_vni);
+		}
+	}
 
 	/* record vlan (little-endian) and flex_bytes(big-endian) */
 	fdirvlan = input->formatted.flex_bytes;
@@ -917,12 +1063,13 @@  ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 		return -ENOTSUP;
 	}
 
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+	if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT)
 		is_perfect = TRUE;
 
 	memset(&input, 0, sizeof(input));
 
-	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
+	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
+					dev->data->dev_conf.fdir_conf.mode);
 	if (err)
 		return err;
 
@@ -966,7 +1113,8 @@  ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 
 	if (is_perfect) {
 		err = fdir_write_perfect_filter_82599(hw, &input, queue,
-				fdircmd_flags, fdirhash);
+				fdircmd_flags, fdirhash,
+				dev->data->dev_conf.fdir_conf.mode);
 	} else {
 		err = fdir_add_signature_filter_82599(hw, &input, queue,
 				fdircmd_flags, fdirhash);
@@ -1018,7 +1166,7 @@  ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
 	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
 			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
-	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
+	if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT)
 		fdir_info->guarant_spc = max_num;
 	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
 		fdir_info->guarant_spc = max_num * 4;
@@ -1032,11 +1180,20 @@  ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
 			fdir_info->mask.ipv6_mask.dst_ip);
 	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
 	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+	fdir_info->mask.mac_addr_mask = info->mask.mac_addr_mask;
+	fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+	fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
 	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
-	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
+	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN
+		|| fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+		fdir_info->flow_types_mask[0] = 0;
+	else
+		fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
 	fdir_info->flex_payload_unit = sizeof(uint16_t);
 	fdir_info->max_flex_payload_segment_num = 1;
-	fdir_info->flex_payload_limit = 62;
+	fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
 	fdir_info->flex_conf.nb_payloads = 1;
 	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
 	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
@@ -1095,7 +1252,7 @@  ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
 	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
 	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
 			(reg & FDIRCTRL_PBALLOC_MASK)));
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+	if (dev->data->dev_conf.fdir_conf.mode >= RTE_FDIR_MODE_PERFECT)
 			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
 	else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
 		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;