diff mbox series

[v1,2/2] net/ice: support inner/outer L2/L3 field for FDIR

Message ID 20200928063146.668003-3-zhirun.yan@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers show
Series support FDIR inner/outer field for tunnel packet | expand

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/travis-robot success Travis build: passed
ci/checkpatch success coding style OK

Commit Message

Zhirun Yan Sept. 28, 2020, 6:31 a.m. UTC
Distinguish inner/outer fields for parse pattern. So FDIR for tunnel
can be more flexible. Enable VXLAN inner/outer L3/L4 different fields
for FDIR.

Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
---
 drivers/net/ice/ice_fdir_filter.c | 59 +++++++++++++++++++++++++++++--
 1 file changed, 56 insertions(+), 3 deletions(-)

Comments

Cao, Yahui Oct. 15, 2020, 9:28 a.m. UTC | #1
> -----Original Message-----
> From: Yan, Zhirun <zhirun.yan@intel.com>
> Sent: Monday, September 28, 2020 2:32 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; dev@dpdk.org
> Cc: Cao, Yahui <yahui.cao@intel.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>; Su, Simei <simei.su@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>; Yan, Zhirun <zhirun.yan@intel.com>
> Subject: [PATCH v1 2/2] net/ice: support inner/outer L2/L3 field for FDIR
> 
> Distinguish inner/outer fields for parse pattern. So FDIR for tunnel
> can be more flexible. Enable VXLAN inner/outer L3/L4 different fields
> for FDIR.
> 
> Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
> ---
>  drivers/net/ice/ice_fdir_filter.c | 59 +++++++++++++++++++++++++++++--
>  1 file changed, 56 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
> index 175abcdd5..961528d17 100644
> --- a/drivers/net/ice/ice_fdir_filter.c
> +++ b/drivers/net/ice/ice_fdir_filter.c
> @@ -56,6 +56,11 @@
>  	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
> 
>  #define ICE_FDIR_INSET_VXLAN_IPV4 (\
> +	ICE_FDIR_INSET_ETH | \
> +	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> +	ICE_INSET_IPV4_TOS | \
> +	ICE_INSET_UDP_DST_PORT | \
> +	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_SMAC | \
>  	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
> 
>  #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
> @@ -907,6 +912,7 @@ ice_fdir_input_set_parse(uint64_t inset, enum
> ice_flow_field *field)
>  	};
>  	static const struct ice_inset_map ice_inset_map[] = {
>  		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
> +		{ICE_INSET_SMAC, ICE_FLOW_FIELD_IDX_ETH_SA},
>  		{ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
>  		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
>  		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
> @@ -1655,6 +1661,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  	uint32_t vtc_flow_cpu;
>  	uint16_t ether_type;
>  	enum rte_flow_item_type next_type;
> +	bool is_outer_part = true;
> +
> +	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> +		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
> +			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
> +			break;
> +		}
> +	}
> 
>  	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
>  		if (item->last) {
> @@ -1672,7 +1686,25 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  			eth_mask = item->mask;
>  			next_type = (item + 1)->type;
> 
> -			if (eth_spec && eth_mask) {
> +			if (!(eth_spec && eth_mask))
> +				break;
> +
> +			/* handle outer L2 fields */
> +			if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) {
> +				if (!rte_is_zero_ether_addr(&eth_mask->dst))
> {
> +					filter->outer_input_set |= ICE_INSET_DMAC;
> +					rte_memcpy(&filter->input.ext_data_outer.dst_mac,
> +						   &eth_spec->dst,
> +						   RTE_ETHER_ADDR_LEN);
> +				}
> +
> +				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
> +					filter->outer_input_set |=ICE_INSET_SMAC;
> +					rte_memcpy(&filter->input.ext_data_outer.src_mac,
> +						   &eth_spec->src,
> +						   RTE_ETHER_ADDR_LEN);
> +				}
> +			} else {
>  				if (!rte_is_zero_ether_addr(&eth_mask->dst))
> {
>  					input_set |= ICE_INSET_DMAC;
>  					rte_memcpy(&filter->input.ext_data.dst_mac,
> @@ -1714,7 +1746,27 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  			ipv4_spec = item->spec;
>  			ipv4_mask = item->mask;
> 
> -			if (ipv4_spec && ipv4_mask) {
> +			if (!(ipv4_spec && ipv4_mask))
> +				break;
> +
[Cao, Yahui] 
It seems below code has quite some duplication with code within the else condition
 Can you use a pointer *input_set and *ip to point to the different input_set bits and input_set value,
So that existing code can be reused.
> +			/* handle outer L3 fields */
> +			if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) {
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> +					filter->outer_input_set |= ICE_INSET_IPV4_DST;
> +					filter->input.ip_outer.v4.dst_ip =
> +						ipv4_spec->hdr.dst_addr;
> +				}
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> +					filter->outer_input_set |= ICE_INSET_IPV4_SRC;
> +					filter->input.ip_outer.v4.src_ip =
> +						ipv4_spec->hdr.src_addr;
> +				}
> +				if (ipv4_mask->hdr.type_of_service == UINT8_MAX) {
> +					input_set |= ICE_INSET_IPV4_TOS;
[Cao, Yahui] 
Why use input_set instead of filter->outer_input_set in the VXLAN outer section ?

> +					filter->input.ip_outer.v4.tos =
> +						ipv4_spec->hdr.type_of_service;
> +				}
> +			} else {
>  				/* Check IPv4 mask and update input set */
>  				if (ipv4_mask->hdr.version_ihl ||
>  				    ipv4_mask->hdr.total_length ||
> @@ -1944,6 +1996,8 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_VXLAN:
>  			l3 = RTE_FLOW_ITEM_TYPE_END;
> +			is_outer_part = false;
> +
>  			vxlan_spec = item->spec;
>  			vxlan_mask = item->mask;
> 
> @@ -1955,7 +2009,6 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
>  				return -rte_errno;
>  			}
> 
> -			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_GTPU:
>  			l3 = RTE_FLOW_ITEM_TYPE_END;
> --
> 2.25.1
Zhirun Yan Nov. 2, 2020, 8:23 a.m. UTC | #2
> -----Original Message-----
> From: Cao, Yahui
> Sent: Thursday, October 15, 2020 5:29 PM
> To: Yan, Zhirun <zhirun.yan@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> dev@dpdk.org
> Cc: Wang, Xiao W <xiao.w.wang@intel.com>; Su, Simei
> <simei.su@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v1 2/2] net/ice: support inner/outer L2/L3 field for FDIR
> 
> 
> 
> > -----Original Message-----
> > From: Yan, Zhirun <zhirun.yan@intel.com>
> > Sent: Monday, September 28, 2020 2:32 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; dev@dpdk.org
> > Cc: Cao, Yahui <yahui.cao@intel.com>; Wang, Xiao W
> > <xiao.w.wang@intel.com>; Su, Simei <simei.su@intel.com>; Guo, Junfeng
> > <junfeng.guo@intel.com>; Yan, Zhirun <zhirun.yan@intel.com>
> > Subject: [PATCH v1 2/2] net/ice: support inner/outer L2/L3 field for
> > FDIR
> >
> > Distinguish inner/outer fields for parse pattern. So FDIR for tunnel
> > can be more flexible. Enable VXLAN inner/outer L3/L4 different fields
> > for FDIR.
> >
> > Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
> > ---
> >  drivers/net/ice/ice_fdir_filter.c | 59
> > +++++++++++++++++++++++++++++--
> >  1 file changed, 56 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_fdir_filter.c
> > b/drivers/net/ice/ice_fdir_filter.c
> > index 175abcdd5..961528d17 100644
> > --- a/drivers/net/ice/ice_fdir_filter.c
> > +++ b/drivers/net/ice/ice_fdir_filter.c
> > @@ -56,6 +56,11 @@
> >  	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
> >
> >  #define ICE_FDIR_INSET_VXLAN_IPV4 (\
> > +	ICE_FDIR_INSET_ETH | \
> > +	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
> > +	ICE_INSET_IPV4_TOS | \
> > +	ICE_INSET_UDP_DST_PORT | \
> > +	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_SMAC | \
> >  	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
> >
> >  #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\ @@ -907,6 +912,7 @@
> > ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
> >  	};
> >  	static const struct ice_inset_map ice_inset_map[] = {
> >  		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
> > +		{ICE_INSET_SMAC, ICE_FLOW_FIELD_IDX_ETH_SA},
> >  		{ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
> >  		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
> >  		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, @@
> -1655,6
> > +1661,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter
> *ad,
> >  	uint32_t vtc_flow_cpu;
> >  	uint16_t ether_type;
> >  	enum rte_flow_item_type next_type;
> > +	bool is_outer_part = true;
> > +
> > +	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END;
> item++)
> > {
> > +		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
> > +			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
> > +			break;
> > +		}
> > +	}
> >
> >  	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END;
> item++) {
> >  		if (item->last) {
> > @@ -1672,7 +1686,25 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
> >  			eth_mask = item->mask;
> >  			next_type = (item + 1)->type;
> >
> > -			if (eth_spec && eth_mask) {
> > +			if (!(eth_spec && eth_mask))
> > +				break;
> > +
> > +			/* handle outer L2 fields */
> > +			if (is_outer_part && tunnel_type ==
> ICE_FDIR_TUNNEL_TYPE_VXLAN) {
> > +				if (!rte_is_zero_ether_addr(&eth_mask-
> >dst))
> > {
> > +					filter->outer_input_set |=
> ICE_INSET_DMAC;
> > +					rte_memcpy(&filter-
> >input.ext_data_outer.dst_mac,
> > +						   &eth_spec->dst,
> > +						   RTE_ETHER_ADDR_LEN);
> > +				}
> > +
> > +				if (!rte_is_zero_ether_addr(&eth_mask-
> >src)) {
> > +					filter->outer_input_set
> |=ICE_INSET_SMAC;
> > +					rte_memcpy(&filter-
> >input.ext_data_outer.src_mac,
> > +						   &eth_spec->src,
> > +						   RTE_ETHER_ADDR_LEN);
> > +				}
> > +			} else {
> >  				if (!rte_is_zero_ether_addr(&eth_mask-
> >dst))
> > {
> >  					input_set |= ICE_INSET_DMAC;
> >  					rte_memcpy(&filter-
> >input.ext_data.dst_mac,
> > @@ -1714,7 +1746,27 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
> >  			ipv4_spec = item->spec;
> >  			ipv4_mask = item->mask;
> >
> > -			if (ipv4_spec && ipv4_mask) {
> > +			if (!(ipv4_spec && ipv4_mask))
> > +				break;
> > +
> [Cao, Yahui]
> It seems below code has quite some duplication with code within the else
> condition  Can you use a pointer *input_set and *ip to point to the different
> input_set bits and input_set value, So that existing code can be reused.

Yes, you are right. And I will refactor this part in V2. Thanks.

> > +			/* handle outer L3 fields */
> > +			if (is_outer_part && tunnel_type ==
> ICE_FDIR_TUNNEL_TYPE_VXLAN) {
> > +				if (ipv4_mask->hdr.dst_addr ==
> UINT32_MAX) {
> > +					filter->outer_input_set |=
> ICE_INSET_IPV4_DST;
> > +					filter->input.ip_outer.v4.dst_ip =
> > +						ipv4_spec->hdr.dst_addr;
> > +				}
> > +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> {
> > +					filter->outer_input_set |=
> ICE_INSET_IPV4_SRC;
> > +					filter->input.ip_outer.v4.src_ip =
> > +						ipv4_spec->hdr.src_addr;
> > +				}
> > +				if (ipv4_mask->hdr.type_of_service ==
> UINT8_MAX) {
> > +					input_set |= ICE_INSET_IPV4_TOS;
> [Cao, Yahui]
> Why use input_set instead of filter->outer_input_set in the VXLAN outer
> section ?
> 

Typo, will fix it in V2. Thanks.

> > +					filter->input.ip_outer.v4.tos =
> > +						ipv4_spec-
> >hdr.type_of_service;
> > +				}
> > +			} else {
> >  				/* Check IPv4 mask and update input set */
> >  				if (ipv4_mask->hdr.version_ihl ||
> >  				    ipv4_mask->hdr.total_length || @@ -
> 1944,6 +1996,8 @@
> > ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
> >  			break;
> >  		case RTE_FLOW_ITEM_TYPE_VXLAN:
> >  			l3 = RTE_FLOW_ITEM_TYPE_END;
> > +			is_outer_part = false;
> > +
> >  			vxlan_spec = item->spec;
> >  			vxlan_mask = item->mask;
> >
> > @@ -1955,7 +2009,6 @@ ice_fdir_parse_pattern(__rte_unused struct
> ice_adapter *ad,
> >  				return -rte_errno;
> >  			}
> >
> > -			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
> >  			break;
> >  		case RTE_FLOW_ITEM_TYPE_GTPU:
> >  			l3 = RTE_FLOW_ITEM_TYPE_END;
> > --
> > 2.25.1
diff mbox series

Patch

diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 175abcdd5..961528d17 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -56,6 +56,11 @@ 
 	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
 
 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
+	ICE_FDIR_INSET_ETH | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_IPV4_TOS | \
+	ICE_INSET_UDP_DST_PORT | \
+	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_SMAC | \
 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
 
 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
@@ -907,6 +912,7 @@  ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
 	};
 	static const struct ice_inset_map ice_inset_map[] = {
 		{ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
+		{ICE_INSET_SMAC, ICE_FLOW_FIELD_IDX_ETH_SA},
 		{ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
 		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
 		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
@@ -1655,6 +1661,14 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	uint32_t vtc_flow_cpu;
 	uint16_t ether_type;
 	enum rte_flow_item_type next_type;
+	bool is_outer_part = true;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
+			break;
+		}
+	}
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
@@ -1672,7 +1686,25 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			eth_mask = item->mask;
 			next_type = (item + 1)->type;
 
-			if (eth_spec && eth_mask) {
+			if (!(eth_spec && eth_mask))
+				break;
+
+			/* handle outer L2 fields */
+			if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) {
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					filter->outer_input_set |= ICE_INSET_DMAC;
+					rte_memcpy(&filter->input.ext_data_outer.dst_mac,
+						   &eth_spec->dst,
+						   RTE_ETHER_ADDR_LEN);
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					filter->outer_input_set |= ICE_INSET_SMAC;
+					rte_memcpy(&filter->input.ext_data_outer.src_mac,
+						   &eth_spec->src,
+						   RTE_ETHER_ADDR_LEN);
+				}
+			} else {
 				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
 					input_set |= ICE_INSET_DMAC;
 					rte_memcpy(&filter->input.ext_data.dst_mac,
@@ -1714,7 +1746,27 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			ipv4_spec = item->spec;
 			ipv4_mask = item->mask;
 
-			if (ipv4_spec && ipv4_mask) {
+			if (!(ipv4_spec && ipv4_mask))
+				break;
+
+			/* handle outer L3 fields */
+			if (is_outer_part && tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) {
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					filter->outer_input_set |= ICE_INSET_IPV4_DST;
+					filter->input.ip_outer.v4.dst_ip =
+						ipv4_spec->hdr.dst_addr;
+				}
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					filter->outer_input_set |= ICE_INSET_IPV4_SRC;
+					filter->input.ip_outer.v4.src_ip =
+						ipv4_spec->hdr.src_addr;
+				}
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX) {
+					input_set |= ICE_INSET_IPV4_TOS;
+					filter->input.ip_outer.v4.tos =
+						ipv4_spec->hdr.type_of_service;
+				}
+			} else {
 				/* Check IPv4 mask and update input set */
 				if (ipv4_mask->hdr.version_ihl ||
 				    ipv4_mask->hdr.total_length ||
@@ -1944,6 +1996,8 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			l3 = RTE_FLOW_ITEM_TYPE_END;
+			is_outer_part = false;
+
 			vxlan_spec = item->spec;
 			vxlan_mask = item->mask;
 
@@ -1955,7 +2009,6 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 				return -rte_errno;
 			}
 
-			tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GTPU:
 			l3 = RTE_FLOW_ITEM_TYPE_END;