[v5,3/5] net/ice: support switch flow for specific L4 type

Message ID 20200629051030.3541-4-wei.zhao1@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series enable more PPPoE packet type for switch |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Zhao1, Wei June 29, 2020, 5:10 a.m. UTC
  This patch add more specific tunnel type for ipv4/ipv6 packet,
it enable tcp/udp layer of ipv4/ipv6 as L4 payload but without
L4 dst/src port number as input set for the switch filter rule.

Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: stable@dpdk.org

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/ice_switch_filter.c | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)
  

Patch

diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c607e8d17..7d1cd98f5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -474,8 +474,10 @@  ice_switch_inset_get(const struct rte_flow_item pattern[],
 	bool pppoe_elem_valid = 0;
 	bool pppoe_patt_valid = 0;
 	bool pppoe_prot_valid = 0;
-	bool profile_rule = 0;
 	bool tunnel_valid = 0;
+	bool profile_rule = 0;
+	bool nvgre_valid = 0;
+	bool vxlan_valid = 0;
 	bool ipv6_valiad = 0;
 	bool ipv4_valiad = 0;
 	bool udp_valiad = 0;
@@ -923,7 +925,7 @@  ice_switch_inset_get(const struct rte_flow_item pattern[],
 					   "Invalid VXLAN item");
 				return 0;
 			}
-
+			vxlan_valid = 1;
 			tunnel_valid = 1;
 			if (vxlan_spec && vxlan_mask) {
 				list[t].type = ICE_VXLAN;
@@ -960,6 +962,7 @@  ice_switch_inset_get(const struct rte_flow_item pattern[],
 					   "Invalid NVGRE item");
 				return 0;
 			}
+			nvgre_valid = 1;
 			tunnel_valid = 1;
 			if (nvgre_spec && nvgre_mask) {
 				list[t].type = ICE_NVGRE;
@@ -1325,6 +1328,21 @@  ice_switch_inset_get(const struct rte_flow_item pattern[],
 			*tun_type = ICE_SW_TUN_PPPOE;
 	}
 
+	if (*tun_type == ICE_NON_TUN) {
+		if (vxlan_valid)
+			*tun_type = ICE_SW_TUN_VXLAN;
+		else if (nvgre_valid)
+			*tun_type = ICE_SW_TUN_NVGRE;
+		else if (ipv4_valiad && tcp_valiad)
+			*tun_type = ICE_SW_IPV4_TCP;
+		else if (ipv4_valiad && udp_valiad)
+			*tun_type = ICE_SW_IPV4_UDP;
+		else if (ipv6_valiad && tcp_valiad)
+			*tun_type = ICE_SW_IPV6_TCP;
+		else if (ipv6_valiad && udp_valiad)
+			*tun_type = ICE_SW_IPV6_UDP;
+	}
+
 	*lkups_num = t;
 
 	return input_set;
@@ -1536,10 +1554,6 @@  ice_switch_parse_pattern_action(struct ice_adapter *ad,
 
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		item_num++;
-		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
-			tun_type = ICE_SW_TUN_VXLAN;
-		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
-			tun_type = ICE_SW_TUN_NVGRE;
 		if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
 			const struct rte_flow_item_eth *eth_mask;
 			if (item->mask)