[dpdk-dev,v2,11/18] net/ixgbe: parse n-tuple filter

Message ID 1483084390-53159-12-git-send-email-wei.zhao1@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel compilation fail Compilation issues

Commit Message

Zhao1, Wei Dec. 30, 2016, 7:53 a.m. UTC
  Add rule validate function and check if the rule is a n-tuple rule,
and get the n-tuple info.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

---

v2:add new error set function
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 414 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 409 insertions(+), 5 deletions(-)
  

Comments

Xing, Beilei Jan. 2, 2017, 10:41 a.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> Sent: Friday, December 30, 2016 3:53 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: [dpdk-dev] [PATCH v2 11/18] net/ixgbe: parse n-tuple filter
> 
> Add rule validate function and check if the rule is a n-tuple rule, and get the
> n-tuple info.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> 
> ---
> 
> v2:add new error set function
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 414
> ++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 409 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 0de1318..198cc4b 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -388,6 +388,24 @@ static int ixgbe_dev_udp_tunnel_port_del(struct
> rte_eth_dev *dev,
>  					 struct rte_eth_udp_tunnel *udp_tunnel);  static int
> ixgbe_filter_restore(struct rte_eth_dev *dev);  static void
> ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
> +static int
> +cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +					const struct rte_flow_item pattern[],
> +					const struct rte_flow_action actions[],
> +					struct rte_eth_ntuple_filter *filter,
> +					struct rte_flow_error *error);

Why do you declare cons_parse_ntuple_filter here? And seems it doesn't align with the name rule.

> +static int
> +ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +					const struct rte_flow_item pattern[],
> +					const struct rte_flow_action actions[],
> +					struct rte_eth_ntuple_filter *filter,
> +					struct rte_flow_error *error);
> +static int
> +ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error);
>  static int ixgbe_flow_flush(struct rte_eth_dev *dev,
>  		struct rte_flow_error *error);
>  /*
> @@ -769,7 +787,7 @@ static const struct rte_ixgbe_xstats_name_off
> rte_ixgbevf_stats_strings[] = {
>  #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
>  		sizeof(rte_ixgbevf_stats_strings[0]))
>  static const struct rte_flow_ops ixgbe_flow_ops = {
> -	NULL,
> +	ixgbe_flow_validate,
>  	NULL,
>  	NULL,
>  	ixgbe_flow_flush,
> @@ -8072,6 +8090,390 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev
> *dev)
>  	return 0;
>  }
> 
> +static inline uint32_t
> +rte_be_to_cpu_24(uint32_t x)
> +{
> +	return  ((x & 0x000000ffUL) << 16) |
> +		(x & 0x0000ff00UL) |
> +		((x & 0x00ff0000UL) >> 16);
> +}

Why do you define the function in PMD with rte_ prefixed? Do you want to move it to rte library?

> +
> +
> +/**
> + * Parse the rule to see if it is a n-tuple rule.
> + * And get the n-tuple filter info BTW.
> + */
> +static int
> +cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +			 const struct rte_flow_item pattern[],
> +			 const struct rte_flow_action actions[],
> +			 struct rte_eth_ntuple_filter *filter,
> +			 struct rte_flow_error *error)

How about splitting the function into three functions? Including parse pattern/parse actions/parse attr.

> +
> +/* a specific function for ixgbe because the flags is specific */
> +static int ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +			  const struct rte_flow_item pattern[],
> +			  const struct rte_flow_action actions[],
> +			  struct rte_eth_ntuple_filter *filter,
> +			  struct rte_flow_error *error)
> +{
> +	int ret;
> +
> +	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
> +
> +	if (ret)
> +		return ret;
> +
> +	/* Ixgbe doesn't support tcp flags. */
> +	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM,
> +				   NULL, "Not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +
> +	/* Ixgbe doesn't support many priorities. */
> +	if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
> +	    filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			NULL, "Priority not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +
> +	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
> +		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
> +		filter->priority < IXGBE_5TUPLE_MIN_PRI)
> +		return -rte_errno;
> +
> +	/* fixed value for ixgbe */
> +	filter->flags = RTE_5TUPLE_FLAGS;
> +	return 0;
> +}
> +
> +/**
> + * Check if the flow rule is supported by ixgbe.
> + * It only checkes the format. Don't guarantee the rule can be

Typo: checkes -> checks

> +programmed into
> + * the HW. Because there can be no enough room for the rule.
> + */
> +static int
> +ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error)
> +{
> +	struct rte_eth_ntuple_filter ntuple_filter;
> +	int ret;
> +
> +	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +	ret = ixgbe_parse_ntuple_filter(attr, pattern,
> +				actions, &ntuple_filter, error);
> +	if (!ret)
> +		return 0;
> +
> +	return ret;
> +}
> +
>  /*  Destroy all flow rules associated with a port on ixgbe. */  static int
> ixgbe_flow_flush(struct rte_eth_dev *dev, @@ -8085,15 +8487,17 @@
> ixgbe_flow_flush(struct rte_eth_dev *dev,
> 
>  	ret = ixgbe_clear_all_fdir_filter(dev);
>  	if (ret < 0) {
> -		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE,
> -					NULL, "Failed to flush rule");
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_HANDLE,
> +				NULL, "Failed to flush rule");
>  		return ret;
>  	}
> 
>  	ret = ixgbe_clear_all_l2_tn_filter(dev);
>  	if (ret < 0) {
> -		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE,
> -					NULL, "Failed to flush rule");
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_HANDLE,
> +				NULL, "Failed to flush rule");
>  		return ret;
>  	}
> 
> --
> 2.5.5
  
Xing, Beilei Jan. 2, 2017, 10:45 a.m. UTC | #2
> +
> +		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
> +		filter->src_port_mask  = tcp_mask->hdr.src_port;
> +		if (tcp_mask->hdr.tcp_flags == 0xFF) {

It's better to use UINT8_MAX here.

> +			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
> +		} else if (!tcp_mask->hdr.tcp_flags) {
> +			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
> +		} else {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by ntuple filter");
> +			return -rte_errno;
> +		}
> +
> +	if (attr->priority > 0xFFFF) {

How about UINT16_MAX?

> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> +				   attr, "Error priority.");
> +		return -rte_errno;
> +	}
> +	filter->priority = (uint16_t)attr->priority;
> +
> +	return 0;
> +}
> +
  
Ferruh Yigit Jan. 6, 2017, 4:55 p.m. UTC | #3
On 12/30/2016 7:53 AM, Wei Zhao wrote:
> Add rule validate function and check if the rule is a n-tuple rule,
> and get the n-tuple info.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> 
> ---
> 
> v2:add new error set function
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 414 ++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 409 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 0de1318..198cc4b 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -388,6 +388,24 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
>  					 struct rte_eth_udp_tunnel *udp_tunnel);

<...>

> +
> +/**
> + * Parse the rule to see if it is a n-tuple rule.
> + * And get the n-tuple filter info BTW.
> + */

It would be nice to comment here valid/expected pattern values
(spec/mask/last). Otherwise it is hard to decode from code also it is
good to document intention, so makes easy if there is any defect.

Also valid actions.

> +static int
> +cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> +			 const struct rte_flow_item pattern[],
> +			 const struct rte_flow_action actions[],
> +			 struct rte_eth_ntuple_filter *filter,
> +			 struct rte_flow_error *error)
> +{
> +	const struct rte_flow_item *item;
> +	const struct rte_flow_action *act;
> +	const struct rte_flow_item_ipv4 *ipv4_spec;
> +	const struct rte_flow_item_ipv4 *ipv4_mask;
> +	const struct rte_flow_item_tcp *tcp_spec;
> +	const struct rte_flow_item_tcp *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec;
> +	const struct rte_flow_item_udp *udp_mask;
> +	uint32_t index;
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "NULL pattern.");
> +		return -rte_errno;
> +	}
> +
> +	/* parse pattern */
> +	index = 0;
> +
> +	/* the first not void item can be MAC or IPv4 */
> +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> +
> +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +	/* Skip Ethernet */
> +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> +		/*Not supported last point for range*/
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				item, "Not supported last point for range");
> +			return -rte_errno;
> +
> +		}
> +		/* if the first item is MAC, the content should be NULL */
> +		if (item->spec || item->mask) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by ntuple filter");
> +			return -rte_errno;
> +		}
> +		/* check if the next not void item is IPv4 */
> +		index++;
> +		NEXT_ITEM_OF_PATTERN(item, pattern, index);
> +		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
> +			rte_flow_error_set(error,
> +			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Not supported by ntuple filter");

Wrong indentation.

> +			return -rte_errno;
> +		}
> +	}
> +
> +	/* get the IPv4 info */
> +	if (!item->spec || !item->mask) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Invalid ntuple mask");
> +		return -rte_errno;
> +	}
> +	/*Not supported last point for range*/
> +	if (item->last) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +			item, "Not supported last point for range");
> +		return -rte_errno;
> +
> +	}
> +
> +	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
> +	/**
> +	 * Only support src & dst addresses, protocol,
> +	 * others should be masked.
> +	 */
> +	if (ipv4_mask->hdr.version_ihl ||
> +	    ipv4_mask->hdr.type_of_service ||
> +	    ipv4_mask->hdr.total_length ||
> +	    ipv4_mask->hdr.packet_id ||
> +	    ipv4_mask->hdr.fragment_offset ||
> +	    ipv4_mask->hdr.time_to_live ||
> +	    ipv4_mask->hdr.hdr_checksum) {
> +			rte_flow_error_set(error,
> +			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +
> +	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
> +	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
> +	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
> +
> +	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
> +	filter->dst_ip = ipv4_spec->hdr.dst_addr;
> +	filter->src_ip = ipv4_spec->hdr.src_addr;
> +	filter->proto  = ipv4_spec->hdr.next_proto_id;
> +
> +	/* check if the next not void item is TCP or UDP */
> +	index++;
> +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> +	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
> +	    item->type != RTE_FLOW_ITEM_TYPE_UDP) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));

Sometimes meset filter before return from error, sometimes not. Is
memset required at all?

> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +
> +	/* get the TCP/UDP info */
> +	if (!item->spec || !item->mask) {

For example there is no memset here for filter ..

> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Invalid ntuple mask");
> +		return -rte_errno;
> +	}
> +
> +	/*Not supported last point for range*/
> +	if (item->last) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +			item, "Not supported last point for range");
> +		return -rte_errno;
> +
> +	}
> +
> +	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
> +		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
> +
> +		/**
> +		 * Only support src & dst ports, tcp flags,
> +		 * others should be masked.
> +		 */
> +		if (tcp_mask->hdr.sent_seq ||
> +		    tcp_mask->hdr.recv_ack ||
> +		    tcp_mask->hdr.data_off ||
> +		    tcp_mask->hdr.rx_win ||
> +		    tcp_mask->hdr.cksum ||
> +		    tcp_mask->hdr.tcp_urp) {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by ntuple filter");
> +			return -rte_errno;
> +		}
> +
> +		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
> +		filter->src_port_mask  = tcp_mask->hdr.src_port;
> +		if (tcp_mask->hdr.tcp_flags == 0xFF) {
> +			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
> +		} else if (!tcp_mask->hdr.tcp_flags) {
> +			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
> +		} else {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by ntuple filter");
> +			return -rte_errno;
> +		}
> +
> +		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
> +		filter->dst_port  = tcp_spec->hdr.dst_port;
> +		filter->src_port  = tcp_spec->hdr.src_port;
> +		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
> +	} else {
> +		udp_mask = (const struct rte_flow_item_udp *)item->mask;
> +
> +		/**
> +		 * Only support src & dst ports,
> +		 * others should be masked.
> +		 */
> +		if (udp_mask->hdr.dgram_len ||
> +		    udp_mask->hdr.dgram_cksum) {
> +			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				item, "Not supported by ntuple filter");
> +			return -rte_errno;
> +		}
> +
> +		filter->dst_port_mask = udp_mask->hdr.dst_port;
> +		filter->src_port_mask = udp_mask->hdr.src_port;
> +
> +		udp_spec = (const struct rte_flow_item_udp *)item->spec;
> +		filter->dst_port = udp_spec->hdr.dst_port;
> +		filter->src_port = udp_spec->hdr.src_port;
> +	}
> +
> +	/* check if the next not void item is END */
> +	index++;
> +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> +	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM,
> +			item, "Not supported by ntuple filter");
> +		return -rte_errno;
> +	}
> +
> +	/* parse action */
> +	index = 0;
> +
> +	if (!actions) {

Although there is no harm, I would do input check at the beginning of
the function, to not do extra work if we hit this case.

> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				   NULL, "NULL action.");
> +		return -rte_errno;
> +	}
> +
> +	/**
> +	 * n-tuple only supports forwarding,
> +	 * check if the first not void action is QUEUE.
> +	 */
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION,
> +			item, "Not supported action.");
> +		return -rte_errno;
> +	}
> +	filter->queue =
> +		((const struct rte_flow_action_queue *)act->conf)->index;
> +
> +	/* check if the next not void item is END */
> +	index++;
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION,
> +			act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +
> +	/* parse attr */
> +	/* must be input direction */

May be good idea to check if attr is NULL.

> +	if (!attr->ingress) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> +				   attr, "Only support ingress.");
> +		return -rte_errno;
> +	}
> +
> +	/* not supported */
> +	if (attr->egress) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> +				   attr, "Not support egress.");
> +		return -rte_errno;
> +	}
> +
> +	if (attr->priority > 0xFFFF) {
> +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> +				   attr, "Error priority.");
> +		return -rte_errno;
> +	}
> +	filter->priority = (uint16_t)attr->priority;

Should check attr->group? Do we support groups?

> +
> +	return 0;
> +}
> +

<...>
  
Zhao1, Wei Jan. 11, 2017, 8:27 a.m. UTC | #4
Hi, yigit

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Saturday, January 7, 2017 12:56 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2 11/18] net/ixgbe: parse n-tuple filter
> 
> On 12/30/2016 7:53 AM, Wei Zhao wrote:
> > Add rule validate function and check if the rule is a n-tuple rule,
> > and get the n-tuple info.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> >
> > ---
> >
> > v2:add new error set function
> > ---
> >  drivers/net/ixgbe/ixgbe_ethdev.c | 414
> > ++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 409 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c
> > b/drivers/net/ixgbe/ixgbe_ethdev.c
> > index 0de1318..198cc4b 100644
> > --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> > @@ -388,6 +388,24 @@ static int ixgbe_dev_udp_tunnel_port_del(struct
> rte_eth_dev *dev,
> >  					 struct rte_eth_udp_tunnel
> *udp_tunnel);
> 
> <...>
> 
> > +
> > +/**
> > + * Parse the rule to see if it is a n-tuple rule.
> > + * And get the n-tuple filter info BTW.
> > + */
> 
> It would be nice to comment here valid/expected pattern values
> (spec/mask/last). Otherwise it is hard to decode from code also it is good to
> document intention, so makes easy if there is any defect.
> 

I will do  as your suggestion in v3.

> Also valid actions.
> 
> > +static int
> > +cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
> > +			 const struct rte_flow_item pattern[],
> > +			 const struct rte_flow_action actions[],
> > +			 struct rte_eth_ntuple_filter *filter,
> > +			 struct rte_flow_error *error)
> > +{
> > +	const struct rte_flow_item *item;
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_item_ipv4 *ipv4_spec;
> > +	const struct rte_flow_item_ipv4 *ipv4_mask;
> > +	const struct rte_flow_item_tcp *tcp_spec;
> > +	const struct rte_flow_item_tcp *tcp_mask;
> > +	const struct rte_flow_item_udp *udp_spec;
> > +	const struct rte_flow_item_udp *udp_mask;
> > +	uint32_t index;
> > +
> > +	if (!pattern) {
> > +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > +				   NULL, "NULL pattern.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/* parse pattern */
> > +	index = 0;
> > +
> > +	/* the first not void item can be MAC or IPv4 */
> > +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> > +
> > +	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Not supported by ntuple filter");
> > +		return -rte_errno;
> > +	}
> > +	/* Skip Ethernet */
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> > +		/*Not supported last point for range*/
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +				item, "Not supported last point for range");
> > +			return -rte_errno;
> > +
> > +		}
> > +		/* if the first item is MAC, the content should be NULL */
> > +		if (item->spec || item->mask) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by ntuple filter");
> > +			return -rte_errno;
> > +		}
> > +		/* check if the next not void item is IPv4 */
> > +		index++;
> > +		NEXT_ITEM_OF_PATTERN(item, pattern, index);
> > +		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
> > +			rte_flow_error_set(error,
> > +			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Not supported by ntuple filter");
> 
> Wrong indentation.

I will do  as your suggestion in v3.

> 
> > +			return -rte_errno;
> > +		}
> > +	}
> > +
> > +	/* get the IPv4 info */
> > +	if (!item->spec || !item->mask) {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Invalid ntuple mask");
> > +		return -rte_errno;
> > +	}
> > +	/*Not supported last point for range*/
> > +	if (item->last) {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +			item, "Not supported last point for range");
> > +		return -rte_errno;
> > +
> > +	}
> > +
> > +	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
> > +	/**
> > +	 * Only support src & dst addresses, protocol,
> > +	 * others should be masked.
> > +	 */
> > +	if (ipv4_mask->hdr.version_ihl ||
> > +	    ipv4_mask->hdr.type_of_service ||
> > +	    ipv4_mask->hdr.total_length ||
> > +	    ipv4_mask->hdr.packet_id ||
> > +	    ipv4_mask->hdr.fragment_offset ||
> > +	    ipv4_mask->hdr.time_to_live ||
> > +	    ipv4_mask->hdr.hdr_checksum) {
> > +			rte_flow_error_set(error,
> > +			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Not supported by ntuple filter");
> > +		return -rte_errno;
> > +	}
> > +
> > +	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
> > +	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
> > +	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
> > +
> > +	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
> > +	filter->dst_ip = ipv4_spec->hdr.dst_addr;
> > +	filter->src_ip = ipv4_spec->hdr.src_addr;
> > +	filter->proto  = ipv4_spec->hdr.next_proto_id;
> > +
> > +	/* check if the next not void item is TCP or UDP */
> > +	index++;
> > +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> > +	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
> > +	    item->type != RTE_FLOW_ITEM_TYPE_UDP) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> 
> Sometimes meset filter before return from error, sometimes not. Is memset
> required at all?

Not all necessary, at the beginning ,filter is not config any value, so it do not need to memset .
> 
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Not supported by ntuple filter");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/* get the TCP/UDP info */
> > +	if (!item->spec || !item->mask) {
> 
> For example there is no memset here for filter ..
> 
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Invalid ntuple mask");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/*Not supported last point for range*/
> > +	if (item->last) {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> > +			item, "Not supported last point for range");
> > +		return -rte_errno;
> > +
> > +	}
> > +
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
> > +		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
> > +
> > +		/**
> > +		 * Only support src & dst ports, tcp flags,
> > +		 * others should be masked.
> > +		 */
> > +		if (tcp_mask->hdr.sent_seq ||
> > +		    tcp_mask->hdr.recv_ack ||
> > +		    tcp_mask->hdr.data_off ||
> > +		    tcp_mask->hdr.rx_win ||
> > +		    tcp_mask->hdr.cksum ||
> > +		    tcp_mask->hdr.tcp_urp) {
> > +			memset(filter, 0, sizeof(struct
> rte_eth_ntuple_filter));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by ntuple filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
> > +		filter->src_port_mask  = tcp_mask->hdr.src_port;
> > +		if (tcp_mask->hdr.tcp_flags == 0xFF) {
> > +			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
> > +		} else if (!tcp_mask->hdr.tcp_flags) {
> > +			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
> > +		} else {
> > +			memset(filter, 0, sizeof(struct
> rte_eth_ntuple_filter));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by ntuple filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
> > +		filter->dst_port  = tcp_spec->hdr.dst_port;
> > +		filter->src_port  = tcp_spec->hdr.src_port;
> > +		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
> > +	} else {
> > +		udp_mask = (const struct rte_flow_item_udp *)item->mask;
> > +
> > +		/**
> > +		 * Only support src & dst ports,
> > +		 * others should be masked.
> > +		 */
> > +		if (udp_mask->hdr.dgram_len ||
> > +		    udp_mask->hdr.dgram_cksum) {
> > +			memset(filter, 0, sizeof(struct
> rte_eth_ntuple_filter));
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ITEM,
> > +				item, "Not supported by ntuple filter");
> > +			return -rte_errno;
> > +		}
> > +
> > +		filter->dst_port_mask = udp_mask->hdr.dst_port;
> > +		filter->src_port_mask = udp_mask->hdr.src_port;
> > +
> > +		udp_spec = (const struct rte_flow_item_udp *)item->spec;
> > +		filter->dst_port = udp_spec->hdr.dst_port;
> > +		filter->src_port = udp_spec->hdr.src_port;
> > +	}
> > +
> > +	/* check if the next not void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_PATTERN(item, pattern, index);
> > +	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ITEM,
> > +			item, "Not supported by ntuple filter");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/* parse action */
> > +	index = 0;
> > +
> > +	if (!actions) {
> 
> Although there is no harm, I would do input check at the beginning of the
> function, to not do extra work if we hit this case.

I will do  as your suggestion in v3.

> 
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > +				   NULL, "NULL action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/**
> > +	 * n-tuple only supports forwarding,
> > +	 * check if the first not void action is QUEUE.
> > +	 */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			item, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	filter->queue =
> > +		((const struct rte_flow_action_queue *)act->conf)->index;
> > +
> > +	/* check if the next not void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/* parse attr */
> > +	/* must be input direction */
> 
> May be good idea to check if attr is NULL.

I will do  as your suggestion in v3. Add it at the beginning of function.
> 
> > +	if (!attr->ingress) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> > +				   attr, "Only support ingress.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	/* not supported */
> > +	if (attr->egress) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> > +				   attr, "Not support egress.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	if (attr->priority > 0xFFFF) {
> > +		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> > +				   attr, "Error priority.");
> > +		return -rte_errno;
> > +	}
> > +	filter->priority = (uint16_t)attr->priority;
> 
> Should check attr->group? Do we support groups?

No , we do not.
> 
> > +
> > +	return 0;
> > +}
> > +
> 
> <...>
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 0de1318..198cc4b 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -388,6 +388,24 @@  static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 					 struct rte_eth_udp_tunnel *udp_tunnel);
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter,
+					struct rte_flow_error *error);
+static int
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter,
+					struct rte_flow_error *error);
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error);
 /*
@@ -769,7 +787,7 @@  static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
 		sizeof(rte_ixgbevf_stats_strings[0]))
 static const struct rte_flow_ops ixgbe_flow_ops = {
-	NULL,
+	ixgbe_flow_validate,
 	NULL,
 	NULL,
 	ixgbe_flow_flush,
@@ -8072,6 +8090,390 @@  ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static inline uint32_t
+rte_be_to_cpu_24(uint32_t x)
+{
+	return  ((x & 0x000000ffUL) << 16) |
+		(x & 0x0000ff00UL) |
+		((x & 0x00ff0000UL) >> 16);
+}
+
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+	do {		\
+		item = pattern + index;\
+		while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+		index++;				\
+		item = pattern + index;		\
+		}						\
+	} while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+	do {								\
+		act = actions + index;					\
+		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+		index++;					\
+		act = actions + index;				\
+		}							\
+	} while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	uint32_t index;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	/* parse pattern */
+	index = 0;
+
+	/* the first not void item can be MAC or IPv4 */
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+
+		}
+		/* if the first item is MAC, the content should be NULL */
+		if (item->spec || item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		index++;
+		NEXT_ITEM_OF_PATTERN(item, pattern, index);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+	}
+
+	/* get the IPv4 info */
+	if (!item->spec || !item->mask) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ntuple mask");
+		return -rte_errno;
+	}
+	/*Not supported last point for range*/
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		return -rte_errno;
+
+	}
+
+	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+	/**
+	 * Only support src & dst addresses, protocol,
+	 * others should be masked.
+	 */
+	if (ipv4_mask->hdr.version_ihl ||
+	    ipv4_mask->hdr.type_of_service ||
+	    ipv4_mask->hdr.total_length ||
+	    ipv4_mask->hdr.packet_id ||
+	    ipv4_mask->hdr.fragment_offset ||
+	    ipv4_mask->hdr.time_to_live ||
+	    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+	filter->dst_ip = ipv4_spec->hdr.dst_addr;
+	filter->src_ip = ipv4_spec->hdr.src_addr;
+	filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+	/* check if the next not void item is TCP or UDP */
+	index++;
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	/* get the TCP/UDP info */
+	if (!item->spec || !item->mask) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ntuple mask");
+		return -rte_errno;
+	}
+
+	/*Not supported last point for range*/
+	if (item->last) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		return -rte_errno;
+
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		/**
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+		if (tcp_mask->hdr.tcp_flags == 0xFF) {
+			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (!tcp_mask->hdr.tcp_flags) {
+			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+	} else {
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = (const struct rte_flow_item_udp *)item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	}
+
+	/* check if the next not void item is END */
+	index++;
+	NEXT_ITEM_OF_PATTERN(item, pattern, index);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	/* parse action */
+	index = 0;
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	/**
+	 * n-tuple only supports forwarding,
+	 * check if the first not void action is QUEUE.
+	 */
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			item, "Not supported action.");
+		return -rte_errno;
+	}
+	filter->queue =
+		((const struct rte_flow_action_queue *)act->conf)->index;
+
+	/* check if the next not void item is END */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	/* parse attr */
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Error priority.");
+		return -rte_errno;
+	}
+	filter->priority = (uint16_t)attr->priority;
+
+	return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			  const struct rte_flow_item pattern[],
+			  const struct rte_flow_action actions[],
+			  struct rte_eth_ntuple_filter *filter,
+			  struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+	if (ret)
+		return ret;
+
+	/* Ixgbe doesn't support tcp flags. */
+	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	/* Ixgbe doesn't support many priorities. */
+	if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+	    filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			NULL, "Priority not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+		filter->priority < IXGBE_5TUPLE_MIN_PRI)
+		return -rte_errno;
+
+	/* fixed value for ixgbe */
+	filter->flags = RTE_5TUPLE_FLAGS;
+	return 0;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	struct rte_eth_ntuple_filter ntuple_filter;
+	int ret;
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = ixgbe_parse_ntuple_filter(attr, pattern,
+				actions, &ntuple_filter, error);
+	if (!ret)
+		return 0;
+
+	return ret;
+}
+
 /*  Destroy all flow rules associated with a port on ixgbe. */
 static int
 ixgbe_flow_flush(struct rte_eth_dev *dev,
@@ -8085,15 +8487,17 @@  ixgbe_flow_flush(struct rte_eth_dev *dev,
 
 	ret = ixgbe_clear_all_fdir_filter(dev);
 	if (ret < 0) {
-		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
-					NULL, "Failed to flush rule");
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE,
+				NULL, "Failed to flush rule");
 		return ret;
 	}
 
 	ret = ixgbe_clear_all_l2_tn_filter(dev);
 	if (ret < 0) {
-		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
-					NULL, "Failed to flush rule");
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE,
+				NULL, "Failed to flush rule");
 		return ret;
 	}