[dpdk-dev,v3,2/5] net/i40e: parse QinQ pattern

Message ID 1490718059-380-3-git-send-email-bernard.iremonger@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/Intel-compilation fail apply patch file failure
ci/checkpatch warning coding style issues

Commit Message

Iremonger, Bernard March 28, 2017, 4:20 p.m. UTC
  add QinQ pattern.
add i40e_flow_parse_qinq_pattern function.
add i40e_flow_parse_qinq_filter function.

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 drivers/net/i40e/i40e_flow.c | 187 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 185 insertions(+), 2 deletions(-)
  

Comments

Wenzhuo Lu March 29, 2017, 1:25 a.m. UTC | #1
Hi Bernard,

> -----Original Message-----
> From: Iremonger, Bernard
> Sent: Wednesday, March 29, 2017 12:21 AM
> To: dev@dpdk.org; Xing, Beilei; Wu, Jingjing
> Cc: Zhang, Helin; Lu, Wenzhuo; Iremonger, Bernard
> Subject: [PATCH v3 2/5] net/i40e: parse QinQ pattern
> 
> add QinQ pattern.
> add i40e_flow_parse_qinq_pattern function.
> add i40e_flow_parse_qinq_filter function.
> 
> Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> ---
>  drivers/net/i40e/i40e_flow.c | 187
> ++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 185 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index
> be243e172..39b09ead5 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c

> +	/* Check specification and mask to get the filter type */
> +	if (vlan_spec && vlan_mask &&
The previous code already checked the vlan_spec and vlan_mask should not be NULL. Seems not necessary to check it again.

> +	    (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
The vlan_mask here should be inner vlan mask.  The outer vlan mask is lost. Should we store the outer vlan mask and check it?

> +			/* There is an inner and outer vlan */
> +		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
> +			& I40E_TCI_MASK;
> +		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
> +			& I40E_TCI_MASK;
> +		if (i_eth_spec && i_eth_mask)
> +			filter->filter_type =
> +				I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> +		else {
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   NULL,
> +					   "Invalid filter type");
> +			return -rte_errno;
> +		}
> +	} else if ((!vlan_spec && !vlan_mask) ||
> +		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
> +		if (i_eth_spec && i_eth_mask) {
The similar concern as above.

> +			filter->filter_type =
> I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> +		} else {
> +			rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +				   "Invalid filter type");
> +			return -rte_errno;
> +		}
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +				   "Not supported by tunnel filter.");
> +		return -rte_errno;
> +	}
> +
> +	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
> +
> +	return 0;
> +}
  
Xing, Beilei March 29, 2017, 2:46 a.m. UTC | #2
Hi Bernard,

> -----Original Message-----
> From: Iremonger, Bernard
> Sent: Wednesday, March 29, 2017 12:21 AM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>
> Cc: Zhang, Helin <helin.zhang@intel.com>; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Iremonger, Bernard
> <bernard.iremonger@intel.com>
> Subject: [PATCH v3 2/5] net/i40e: parse QinQ pattern
> 
> add QinQ pattern.
> add i40e_flow_parse_qinq_pattern function.
> add i40e_flow_parse_qinq_filter function.
> 
> Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> ---
>  drivers/net/i40e/i40e_flow.c | 187
> ++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 185 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index be243e172..39b09ead5 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -1,7 +1,7 @@
>  /*-
>   *   BSD LICENSE
>   *
> - *   Copyright (c) 2016 Intel Corporation. All rights reserved.
> + *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
>   *
>   *   Redistribution and use in source and binary forms, with or without
>   *   modification, are permitted provided that the following conditions
> @@ -128,6 +128,18 @@ static int i40e_flow_destroy_tunnel_filter(struct
> i40e_pf *pf,  static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);  static
> int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);  static int
> i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
> +static int
> +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> +			      const struct rte_flow_attr *attr,
> +			      const struct rte_flow_item pattern[],
> +			      const struct rte_flow_action actions[],
> +			      struct rte_flow_error *error,
> +			      union i40e_filter_t *filter);
> +static int
> +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
> +			      const struct rte_flow_item *pattern,
> +			      struct rte_flow_error *error,
> +			      struct i40e_tunnel_filter_conf *filter);
> 
>  const struct rte_flow_ops i40e_flow_ops = {
>  	.validate = i40e_flow_validate,
> @@ -318,6 +330,14 @@ static enum rte_flow_item_type pattern_mpls_4[] =
> {
>  	RTE_FLOW_ITEM_TYPE_END,
>  };
> 
> +/* Pattern matched QINQ */
> +static enum rte_flow_item_type pattern_qinq_1[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_VLAN,
> +	RTE_FLOW_ITEM_TYPE_VLAN,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
>  static struct i40e_valid_pattern i40e_supported_patterns[] = {
>  	/* Ethertype */
>  	{ pattern_ethertype, i40e_flow_parse_ethertype_filter }, @@ -348,6
> +368,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
>  	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
>  	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
>  	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
> +	/* QINQ */
> +	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
>  };
> 
>  #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> @@ -1171,7 +1193,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev
> *dev,
>  	return 0;
>  }
> 
> -/* Parse to get the action info of a tunnle filter
> +/* Parse to get the action info of a tunnel filter
>   * Tunnel action only supports PF, VF and QUEUE.
>   */
>  static int
> @@ -1748,6 +1770,167 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev
> *dev,  }
> 
>  static int
> +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
> +			      const struct rte_flow_item *pattern,
> +			      struct rte_flow_error *error,
> +			      struct i40e_tunnel_filter_conf *filter) {
> +	const struct rte_flow_item *item = pattern;
> +	const struct rte_flow_item_eth *eth_spec;
> +	const struct rte_flow_item_eth *eth_mask;
> +	const struct rte_flow_item_eth *i_eth_spec = NULL;
> +	const struct rte_flow_item_eth *i_eth_mask = NULL;
> +	const struct rte_flow_item_vlan *vlan_spec = NULL;
> +	const struct rte_flow_item_vlan *vlan_mask = NULL;
> +	const struct rte_flow_item_vlan *i_vlan_spec = NULL;
> +	const struct rte_flow_item_vlan *o_vlan_spec = NULL;
> +
> +	enum rte_flow_item_type item_type;
> +	bool vlan_flag = 0;

Seems vlan_flag is not used at last.

> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Not support range");
> +			return -rte_errno;
> +		}
> +		item_type = item->type;
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = (const struct rte_flow_item_eth *)item-
> >spec;
> +			eth_mask = (const struct rte_flow_item_eth *)item-
> >mask;
> +			if ((!eth_spec && eth_mask) ||
> +			    (eth_spec && !eth_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ether spec/mask");
> +				return -rte_errno;
> +			}
> +
> +			if (eth_spec && eth_mask) {
> +				/* DST address of inner MAC shouldn't be
> masked.
> +				 * SRC address of Inner MAC should be
> masked.
> +				 */
> +				if (!is_broadcast_ether_addr(&eth_mask-
> >dst) ||
> +				    !is_zero_ether_addr(&eth_mask->src) ||
> +				    eth_mask->type) {
> +					rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ether spec/mask");
> +					return -rte_errno;
> +				}
> +
> +				rte_memcpy(&filter->outer_mac,
> +						   &eth_spec->dst,
> +						   ETHER_ADDR_LEN);
> +			}
> +
> +			i_eth_spec = eth_spec;
> +			i_eth_mask = eth_mask;
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_VLAN:
> +			vlan_spec =
> +				(const struct rte_flow_item_vlan *)item-
> >spec;
> +			vlan_mask =
> +				(const struct rte_flow_item_vlan *)item-
> >mask;
> +
> +			if (!(vlan_spec && vlan_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid vlan item");
> +				return -rte_errno;
> +			}
> +
> +			if (!vlan_flag) {
> +				o_vlan_spec = vlan_spec;
> +				vlan_flag = 1;
> +			} else {
> +				i_vlan_spec = vlan_spec;
> +				vlan_flag = 0;
> +			}
> +			break;
> +
> +		default:
> +			break;
> +		}
> +	}
> +
> +	/* Check specification and mask to get the filter type */
> +	if (vlan_spec && vlan_mask &&
> +	    (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
> +			/* There is an inner and outer vlan */
> +		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
> +			& I40E_TCI_MASK;
> +		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
> +			& I40E_TCI_MASK;
> +		if (i_eth_spec && i_eth_mask)
> +			filter->filter_type =
> +				I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> +		else {
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   NULL,
> +					   "Invalid filter type");
> +			return -rte_errno;
> +		}
> +	} else if ((!vlan_spec && !vlan_mask) ||
> +		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
> +		if (i_eth_spec && i_eth_mask) {
> +			filter->filter_type =
> I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> +		} else {
> +			rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +				   "Invalid filter type");
> +			return -rte_errno;
> +		}
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +				   "Not supported by tunnel filter.");
> +		return -rte_errno;
> +	}
> +
> +	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
> +
> +	return 0;
> +}
> +
> +static int
> +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> +			      const struct rte_flow_attr *attr,
> +			      const struct rte_flow_item pattern[],
> +			      const struct rte_flow_action actions[],
> +			      struct rte_flow_error *error,
> +			      union i40e_filter_t *filter)
> +{
> +	struct i40e_tunnel_filter_conf *tunnel_filter =
> +		&filter->consistent_tunnel_filter;
> +	int ret;
> +
> +	ret = i40e_flow_parse_qinq_pattern(dev, pattern,
> +					     error, tunnel_filter);
> +	if (ret)
> +		return ret;
> +
> +	ret = i40e_flow_parse_tunnel_action(dev, actions, error,
> tunnel_filter);
> +	if (ret)
> +		return ret;
> +
> +	ret = i40e_flow_parse_attr(attr, error);
> +	if (ret)
> +		return ret;
> +
> +	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
> +
> +	return ret;
> +}
> +
> +static int
>  i40e_flow_validate(struct rte_eth_dev *dev,
>  		   const struct rte_flow_attr *attr,
>  		   const struct rte_flow_item pattern[],
> --
> 2.11.0
  
Xing, Beilei March 29, 2017, 6:16 a.m. UTC | #3
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xing, Beilei
> Sent: Wednesday, March 29, 2017 10:46 AM
> To: Iremonger, Bernard <bernard.iremonger@intel.com>; dev@dpdk.org;
> Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Helin <helin.zhang@intel.com>; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v3 2/5] net/i40e: parse QinQ pattern
> 
> Hi Bernard,
> 
> > -----Original Message-----
> > From: Iremonger, Bernard
> > Sent: Wednesday, March 29, 2017 12:21 AM
> > To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>
> > Cc: Zhang, Helin <helin.zhang@intel.com>; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>; Iremonger, Bernard
> > <bernard.iremonger@intel.com>
> > Subject: [PATCH v3 2/5] net/i40e: parse QinQ pattern
> >
> > add QinQ pattern.
> > add i40e_flow_parse_qinq_pattern function.
> > add i40e_flow_parse_qinq_filter function.
> >
> > Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> > ---
> >  drivers/net/i40e/i40e_flow.c | 187
> > ++++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 185 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index be243e172..39b09ead5 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -1,7 +1,7 @@
> >  /*-
> >   *   BSD LICENSE
> >   *
> > - *   Copyright (c) 2016 Intel Corporation. All rights reserved.
> > + *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
> >   *
> >   *   Redistribution and use in source and binary forms, with or without
> >   *   modification, are permitted provided that the following conditions
> > @@ -128,6 +128,18 @@ static int i40e_flow_destroy_tunnel_filter(struct
> > i40e_pf *pf,  static int i40e_flow_flush_fdir_filter(struct i40e_pf
> > *pf);  static int i40e_flow_flush_ethertype_filter(struct i40e_pf
> > *pf);  static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
> > +static int
> > +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> > +			      const struct rte_flow_attr *attr,
> > +			      const struct rte_flow_item pattern[],
> > +			      const struct rte_flow_action actions[],
> > +			      struct rte_flow_error *error,
> > +			      union i40e_filter_t *filter); static int
> > +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
> > +			      const struct rte_flow_item *pattern,
> > +			      struct rte_flow_error *error,
> > +			      struct i40e_tunnel_filter_conf *filter);
> >
> >  const struct rte_flow_ops i40e_flow_ops = {
> >  	.validate = i40e_flow_validate,
> > @@ -318,6 +330,14 @@ static enum rte_flow_item_type pattern_mpls_4[]
> =
> > {
> >  	RTE_FLOW_ITEM_TYPE_END,
> >  };
> >
> > +/* Pattern matched QINQ */
> > +static enum rte_flow_item_type pattern_qinq_1[] = {
> > +	RTE_FLOW_ITEM_TYPE_ETH,
> > +	RTE_FLOW_ITEM_TYPE_VLAN,
> > +	RTE_FLOW_ITEM_TYPE_VLAN,
> > +	RTE_FLOW_ITEM_TYPE_END,
> > +};
> > +
> >  static struct i40e_valid_pattern i40e_supported_patterns[] = {
> >  	/* Ethertype */
> >  	{ pattern_ethertype, i40e_flow_parse_ethertype_filter }, @@ -348,6
> > +368,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] =
> > +{
> >  	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
> >  	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
> >  	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
> > +	/* QINQ */
> > +	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
> >  };
> >
> >  #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> > @@ -1171,7 +1193,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev
> > *dev,
> >  	return 0;
> >  }
> >
> > -/* Parse to get the action info of a tunnle filter
> > +/* Parse to get the action info of a tunnel filter
> >   * Tunnel action only supports PF, VF and QUEUE.
> >   */
> >  static int
> > @@ -1748,6 +1770,167 @@ i40e_flow_parse_mpls_filter(struct
> rte_eth_dev
> > *dev,  }
> >
> >  static int
> > +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
> > +			      const struct rte_flow_item *pattern,
> > +			      struct rte_flow_error *error,
> > +			      struct i40e_tunnel_filter_conf *filter) {
> > +	const struct rte_flow_item *item = pattern;
> > +	const struct rte_flow_item_eth *eth_spec;
> > +	const struct rte_flow_item_eth *eth_mask;
> > +	const struct rte_flow_item_eth *i_eth_spec = NULL;
> > +	const struct rte_flow_item_eth *i_eth_mask = NULL;
> > +	const struct rte_flow_item_vlan *vlan_spec = NULL;
> > +	const struct rte_flow_item_vlan *vlan_mask = NULL;
> > +	const struct rte_flow_item_vlan *i_vlan_spec = NULL;
> > +	const struct rte_flow_item_vlan *o_vlan_spec = NULL;
> > +
> > +	enum rte_flow_item_type item_type;
> > +	bool vlan_flag = 0;
> 
> Seems vlan_flag is not used at last.

Ignore it, sorry for missing the info of distinguishing outer vlan and inner vlan.

> 
> > +
> > +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> > +					   RTE_FLOW_ERROR_TYPE_ITEM,
> > +					   item,
> > +					   "Not support range");
> > +			return -rte_errno;
> > +		}
> > +		item_type = item->type;
> > +		switch (item_type) {
> > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > +			eth_spec = (const struct rte_flow_item_eth *)item-
> > >spec;
> > +			eth_mask = (const struct rte_flow_item_eth *)item-
> > >mask;
> > +			if ((!eth_spec && eth_mask) ||
> > +			    (eth_spec && !eth_mask)) {
> > +				rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +						   item,
> > +						   "Invalid ether spec/mask");
> > +				return -rte_errno;
> > +			}
> > +
> > +			if (eth_spec && eth_mask) {
> > +				/* DST address of inner MAC shouldn't be
> > masked.
> > +				 * SRC address of Inner MAC should be
> > masked.
> > +				 */
> > +				if (!is_broadcast_ether_addr(&eth_mask-
> > >dst) ||
> > +				    !is_zero_ether_addr(&eth_mask->src) ||
> > +				    eth_mask->type) {
> > +					rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > +						   item,
> > +						   "Invalid ether spec/mask");
> > +					return -rte_errno;
> > +				}
> > +
> > +				rte_memcpy(&filter->outer_mac,
> > +						   &eth_spec->dst,
> > +						   ETHER_ADDR_LEN);
> > +			}
> > +
> > +			i_eth_spec = eth_spec;
> > +			i_eth_mask = eth_mask;
> > +			break;
> > +		case RTE_FLOW_ITEM_TYPE_VLAN:
> > +			vlan_spec =
> > +				(const struct rte_flow_item_vlan *)item-
> > >spec;
> > +			vlan_mask =
> > +				(const struct rte_flow_item_vlan *)item-
> > >mask;
> > +
> > +			if (!(vlan_spec && vlan_mask)) {
> > +				rte_flow_error_set(error, EINVAL,
> > +					   RTE_FLOW_ERROR_TYPE_ITEM,
> > +					   item,
> > +					   "Invalid vlan item");
> > +				return -rte_errno;
> > +			}
> > +
> > +			if (!vlan_flag) {
> > +				o_vlan_spec = vlan_spec;
> > +				vlan_flag = 1;
> > +			} else {
> > +				i_vlan_spec = vlan_spec;
> > +				vlan_flag = 0;
> > +			}
> > +			break;
> > +
> > +		default:
> > +			break;
> > +		}
> > +	}
> > +
> > +	/* Check specification and mask to get the filter type */
> > +	if (vlan_spec && vlan_mask &&
> > +	    (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
> > +			/* There is an inner and outer vlan */
> > +		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
> > +			& I40E_TCI_MASK;
> > +		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
> > +			& I40E_TCI_MASK;
> > +		if (i_eth_spec && i_eth_mask)
> > +			filter->filter_type =
> > +				I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> > +		else {
> > +			rte_flow_error_set(error, EINVAL,
> > +					   RTE_FLOW_ERROR_TYPE_ITEM,
> > +					   NULL,
> > +					   "Invalid filter type");
> > +			return -rte_errno;
> > +		}
> > +	} else if ((!vlan_spec && !vlan_mask) ||
> > +		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
> > +		if (i_eth_spec && i_eth_mask) {
> > +			filter->filter_type =
> > I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> > +		} else {
> > +			rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> > +				   "Invalid filter type");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> > +				   "Not supported by tunnel filter.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> > +			      const struct rte_flow_attr *attr,
> > +			      const struct rte_flow_item pattern[],
> > +			      const struct rte_flow_action actions[],
> > +			      struct rte_flow_error *error,
> > +			      union i40e_filter_t *filter) {
> > +	struct i40e_tunnel_filter_conf *tunnel_filter =
> > +		&filter->consistent_tunnel_filter;
> > +	int ret;
> > +
> > +	ret = i40e_flow_parse_qinq_pattern(dev, pattern,
> > +					     error, tunnel_filter);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i40e_flow_parse_tunnel_action(dev, actions, error,
> > tunnel_filter);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i40e_flow_parse_attr(attr, error);
> > +	if (ret)
> > +		return ret;
> > +
> > +	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
> > +
> > +	return ret;
> > +}
> > +
> > +static int
> >  i40e_flow_validate(struct rte_eth_dev *dev,
> >  		   const struct rte_flow_attr *attr,
> >  		   const struct rte_flow_item pattern[],
> > --
> > 2.11.0
  
Iremonger, Bernard March 29, 2017, 3:10 p.m. UTC | #4
Hi Wenzhuo,

> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Wednesday, March 29, 2017 2:25 AM
> To: Iremonger, Bernard <bernard.iremonger@intel.com>; dev@dpdk.org;
> Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Helin <helin.zhang@intel.com>
> Subject: RE: [PATCH v3 2/5] net/i40e: parse QinQ pattern
> 
> Hi Bernard,
> 
> > -----Original Message-----
> > From: Iremonger, Bernard
> > Sent: Wednesday, March 29, 2017 12:21 AM
> > To: dev@dpdk.org; Xing, Beilei; Wu, Jingjing
> > Cc: Zhang, Helin; Lu, Wenzhuo; Iremonger, Bernard
> > Subject: [PATCH v3 2/5] net/i40e: parse QinQ pattern
> >
> > add QinQ pattern.
> > add i40e_flow_parse_qinq_pattern function.
> > add i40e_flow_parse_qinq_filter function.
> >
> > Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> > ---
> >  drivers/net/i40e/i40e_flow.c | 187
> > ++++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 185 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index
> > be243e172..39b09ead5 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> 
> > +	/* Check specification and mask to get the filter type */
> > +	if (vlan_spec && vlan_mask &&
> The previous code already checked the vlan_spec and vlan_mask should not
> be NULL. Seems not necessary to check it again.

I will remove this check.

> > +	    (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
> The vlan_mask here should be inner vlan mask.  The outer vlan mask is lost.
> Should we store the outer vlan mask and check it?

Yes, I will store and check both inner and outer vlan masks.
 
> > +			/* There is an inner and outer vlan */
> > +		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
> > +			& I40E_TCI_MASK;
> > +		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
> > +			& I40E_TCI_MASK;
> > +		if (i_eth_spec && i_eth_mask)
> > +			filter->filter_type =
> > +				I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> > +		else {
> > +			rte_flow_error_set(error, EINVAL,
> > +					   RTE_FLOW_ERROR_TYPE_ITEM,
> > +					   NULL,
> > +					   "Invalid filter type");
> > +			return -rte_errno;
> > +		}
> > +	} else if ((!vlan_spec && !vlan_mask) ||
> > +		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
> > +		if (i_eth_spec && i_eth_mask) {
> The similar concern as above.

I will  change as above.
  

> 
> > +			filter->filter_type =
> > I40E_TUNNEL_FILTER_CUSTOM_QINQ;
> > +		} else {
> > +			rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> > +				   "Invalid filter type");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> > +				   "Not supported by tunnel filter.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
> > +
> > +	return 0;
> > +}

Regards,

Bernard.
  

Patch

diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index be243e172..39b09ead5 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -128,6 +128,18 @@  static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      struct rte_flow_error *error,
+			      union i40e_filter_t *filter);
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+			      const struct rte_flow_item *pattern,
+			      struct rte_flow_error *error,
+			      struct i40e_tunnel_filter_conf *filter);
 
 const struct rte_flow_ops i40e_flow_ops = {
 	.validate = i40e_flow_validate,
@@ -318,6 +330,14 @@  static enum rte_flow_item_type pattern_mpls_4[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* Pattern matched QINQ */
+static enum rte_flow_item_type pattern_qinq_1[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	/* Ethertype */
 	{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
@@ -348,6 +368,8 @@  static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+	/* QINQ */
+	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
 
 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
@@ -1171,7 +1193,7 @@  i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 	return 0;
 }
 
-/* Parse to get the action info of a tunnle filter
+/* Parse to get the action info of a tunnel filter
  * Tunnel action only supports PF, VF and QUEUE.
  */
 static int
@@ -1748,6 +1770,167 @@  i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
 }
 
 static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+			      const struct rte_flow_item *pattern,
+			      struct rte_flow_error *error,
+			      struct i40e_tunnel_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_eth *i_eth_spec = NULL;
+	const struct rte_flow_item_eth *i_eth_mask = NULL;
+	const struct rte_flow_item_vlan *vlan_spec = NULL;
+	const struct rte_flow_item_vlan *vlan_mask = NULL;
+	const struct rte_flow_item_vlan *i_vlan_spec = NULL;
+	const struct rte_flow_item_vlan *o_vlan_spec = NULL;
+
+	enum rte_flow_item_type item_type;
+	bool vlan_flag = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = (const struct rte_flow_item_eth *)item->spec;
+			eth_mask = (const struct rte_flow_item_eth *)item->mask;
+			if ((!eth_spec && eth_mask) ||
+			    (eth_spec && !eth_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ether spec/mask");
+				return -rte_errno;
+			}
+
+			if (eth_spec && eth_mask) {
+				/* DST address of inner MAC shouldn't be masked.
+				 * SRC address of Inner MAC should be masked.
+				 */
+				if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+				    !is_zero_ether_addr(&eth_mask->src) ||
+				    eth_mask->type) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ether spec/mask");
+					return -rte_errno;
+				}
+
+				rte_memcpy(&filter->outer_mac,
+						   &eth_spec->dst,
+						   ETHER_ADDR_LEN);
+			}
+
+			i_eth_spec = eth_spec;
+			i_eth_mask = eth_mask;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec =
+				(const struct rte_flow_item_vlan *)item->spec;
+			vlan_mask =
+				(const struct rte_flow_item_vlan *)item->mask;
+
+			if (!(vlan_spec && vlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid vlan item");
+				return -rte_errno;
+			}
+
+			if (!vlan_flag) {
+				o_vlan_spec = vlan_spec;
+				vlan_flag = 1;
+			} else {
+				i_vlan_spec = vlan_spec;
+				vlan_flag = 0;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	/* Check specification and mask to get the filter type */
+	if (vlan_spec && vlan_mask &&
+	    (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+			/* There is an inner and outer vlan */
+		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
+			& I40E_TCI_MASK;
+		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
+			& I40E_TCI_MASK;
+		if (i_eth_spec && i_eth_mask)
+			filter->filter_type =
+				I40E_TUNNEL_FILTER_CUSTOM_QINQ;
+		else {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL,
+					   "Invalid filter type");
+			return -rte_errno;
+		}
+	} else if ((!vlan_spec && !vlan_mask) ||
+		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+		if (i_eth_spec && i_eth_mask) {
+			filter->filter_type = I40E_TUNNEL_FILTER_CUSTOM_QINQ;
+		} else {
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				   "Invalid filter type");
+			return -rte_errno;
+		}
+	} else {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				   "Not supported by tunnel filter.");
+		return -rte_errno;
+	}
+
+	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      struct rte_flow_error *error,
+			      union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_qinq_pattern(dev, pattern,
+					     error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
+static int
 i40e_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_attr *attr,
 		   const struct rte_flow_item pattern[],