[v6,4/4] net/ice: enable protocol agnostic flow offloading in FDIR

Message ID 20211028083416.1490834-5-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series enable protocol agnostic flow offloading in FDIR |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-spell-check-testing warning Testing issues
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation fail Compilation issues
ci/intel-Testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Junfeng Guo Oct. 28, 2021, 8:34 a.m. UTC
  Protocol agnostic flow offloading in Flow Director is enabled by this
patch based on the Parser Library, using existing rte_flow raw API.

Note that the raw flow requires:
1. byte string of raw target packet bits.
2. byte string of mask of target packet.

Here is an example:
FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:

flow create 0 ingress pattern raw \
pattern spec \
00000000000000000000000008004500001400004000401000000000000001020304 \
pattern mask \
000000000000000000000000000000000000000000000000000000000000ffffffff \
/ end actions queue index 3 / mark id 3 / end

Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto)
is optional in our cases. To avoid redundancy, we just omit the mask
of 0x0800 (with 0xFFFF) in the mask byte string example. The prefix
'0x' for the spec and mask byte (hex) strings are also omitted here.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 drivers/net/ice/ice_ethdev.h           |  17 ++
 drivers/net/ice/ice_fdir_filter.c      | 257 +++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.c     |   7 +
 drivers/net/ice/ice_generic_flow.h     |   3 +
 5 files changed, 285 insertions(+)
  

Patch

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 9c13ceed1c..cc449a4340 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -167,6 +167,7 @@  New Features
 
 * **Updated Intel ice driver.**
 
+  * Added protocol agnostic flow offloading support in Flow Director.
   * Added 1PPS out support by a devargs.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added DEV_RX_OFFLOAD_TIMESTAMP support.
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 599e0028f7..441242ee89 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -318,6 +318,11 @@  struct ice_fdir_filter_conf {
 	uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
 	uint64_t input_set_i; /* only for tunnel inner fields */
 	uint32_t mark_flag;
+
+	struct ice_parser_profile *prof;
+	const u8 *pkt_buf;
+	bool parser_ena;
+	u8 pkt_len;
 };
 
 #define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
@@ -487,6 +492,17 @@  struct ice_devargs {
 	uint8_t pps_out_ena;
 };
 
+/**
+ * Structure to store fdir fv entry.
+ */
+struct ice_fdir_prof_info {
+	struct LIST_ENTRY_TYPE l_entry;
+
+	struct ice_parser_profile prof;
+	u16 ptg;
+	u64 fdir_actived_cnt;
+};
+
 /**
  * Structure to store private data for each PF/VF instance.
  */
@@ -509,6 +525,7 @@  struct ice_adapter {
 	struct rte_timecounter rx_tstamp_tc;
 	struct rte_timecounter tx_tstamp_tc;
 	bool ptp_ena;
+	struct LIST_HEAD_TYPE fdir_prof_list;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index bd627e3aa8..d5ae84b86d 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -107,6 +107,7 @@ 
 	ICE_INSET_NAT_T_ESP_SPI)
 
 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
+	{pattern_raw,					ICE_INSET_NONE,			ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_ethertype,				ICE_FDIR_INSET_ETH,		ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv4,				ICE_FDIR_INSET_ETH_IPV4,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv4_udp,				ICE_FDIR_INSET_ETH_IPV4_UDP,	ICE_INSET_NONE,			ICE_INSET_NONE},
@@ -1158,6 +1159,8 @@  ice_fdir_init(struct ice_adapter *ad)
 	if (ret)
 		return ret;
 
+	INIT_LIST_HEAD(&ad->fdir_prof_list);
+
 	parser = &ice_fdir_parser;
 
 	return ice_register_parser(parser, ad);
@@ -1188,6 +1191,24 @@  ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
 		return 0;
 }
 
+static int
+ice_fdir_add_del_raw(struct ice_pf *pf,
+		     struct ice_fdir_filter_conf *filter,
+		     bool add)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
+
+	struct ice_fltr_desc desc;
+	memset(&desc, 0, sizeof(desc));
+	filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+	return ice_fdir_programming(pf, &desc);
+}
+
 static int
 ice_fdir_add_del_filter(struct ice_pf *pf,
 			struct ice_fdir_filter_conf *filter,
@@ -1303,6 +1324,97 @@  ice_fdir_create_filter(struct ice_adapter *ad,
 	struct ice_fdir_fltr_pattern key;
 	bool is_tun;
 	int ret;
+	int i;
+
+	if (filter->parser_ena) {
+		struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+		u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+		u16 main_vsi = pf->main_vsi->idx;
+		bool fv_found = false;
+		u16 vsi_num;
+
+		struct ice_fdir_prof_info *pi;
+		LIST_FOR_EACH_ENTRY(pi, &ad->fdir_prof_list,
+				    ice_fdir_prof_info, l_entry) {
+			if (pi->ptg != hw->blk[ICE_BLK_FD].xlt1.t[id])
+				continue;
+			if (!pi->fdir_actived_cnt) {
+				vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
+				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+							   vsi_num, id);
+				if (ret)
+					return -rte_errno;
+
+				vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
+				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+							   vsi_num, id);
+				if (ret)
+					return -rte_errno;
+			}
+			for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+				if (pi->prof.fv[i].proto_id !=
+				    filter->prof->fv[i].proto_id ||
+				    pi->prof.fv[i].offset !=
+				    filter->prof->fv[i].offset)
+					break;
+			if (i == ICE_MAX_FV_WORDS) {
+				fv_found = true;
+				pi->fdir_actived_cnt++;
+				break;
+			}
+		}
+
+		if (!fv_found) {
+			ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
+						   filter->prof, ICE_BLK_FD);
+			if (ret)
+				return -rte_errno;
+		}
+
+		ret = ice_fdir_add_del_raw(pf, filter, true);
+		if (ret)
+			return -rte_errno;
+
+		if (!fv_found) {
+			pi = (struct ice_fdir_prof_info *)
+				ice_malloc(hw, sizeof(*pi));
+			if (!pi)
+				return ICE_ERR_NO_MEMORY;
+
+			memset(&pi->prof, 0, sizeof(pi->prof));
+
+			for (i = 0; i < filter->prof->fv_num; i++) {
+				pi->prof.fv[i].proto_id =
+					filter->prof->fv[i].proto_id;
+				pi->prof.fv[i].offset =
+					filter->prof->fv[i].offset;
+				pi->prof.fv[i].spec = filter->prof->fv[i].spec;
+				pi->prof.fv[i].msk = filter->prof->fv[i].msk;
+			}
+			pi->ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+			pi->fdir_actived_cnt = 1;
+
+			LIST_ADD(&pi->l_entry, &ad->fdir_prof_list);
+		}
+
+		if (filter->mark_flag == 1)
+			ice_fdir_rx_parsing_enable(ad, 1);
+
+		entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+		if (!entry)
+			return -rte_errno;
+
+		rte_memcpy(entry, filter, sizeof(*filter));
+
+		filter->prof = NULL;
+		filter->pkt_buf = NULL;
+
+		flow->rule = entry;
+
+		return 0;
+	}
 
 	ice_fdir_extract_fltr_key(&key, filter);
 	node = ice_fdir_entry_lookup(fdir_info, &key);
@@ -1397,6 +1509,44 @@  ice_fdir_destroy_filter(struct ice_adapter *ad,
 
 	filter = (struct ice_fdir_filter_conf *)flow->rule;
 
+	if (filter->parser_ena) {
+		struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+		ret = ice_fdir_add_del_raw(pf, filter, false);
+		if (ret)
+			return -rte_errno;
+
+		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+		int i;
+		struct ice_fdir_prof_info *pi;
+		LIST_FOR_EACH_ENTRY(pi, &ad->fdir_prof_list,
+				    ice_fdir_prof_info, l_entry) {
+			if (pi->ptg != hw->blk[ICE_BLK_FD].xlt1.t[id])
+				continue;
+			for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+				if (pi->prof.fv[i].proto_id !=
+				    filter->prof->fv[i].proto_id ||
+				    pi->prof.fv[i].offset !=
+				    filter->prof->fv[i].offset)
+					break;
+			if (i == ICE_MAX_FV_WORDS) {
+				pi->fdir_actived_cnt--;
+				break;
+			}
+		}
+
+		if (filter->mark_flag == 1)
+			ice_fdir_rx_parsing_enable(ad, 0);
+
+		flow->rule = NULL;
+		filter->prof = NULL;
+		filter->pkt_buf = NULL;
+
+		rte_free(filter);
+
+		return 0;
+	}
+
 	is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
 
 	if (filter->counter) {
@@ -1675,6 +1825,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
 	enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
 	enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -1702,6 +1853,9 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	struct ice_fdir_extra *p_ext_data;
 	struct ice_fdir_v4 *p_v4 = NULL;
 	struct ice_fdir_v6 *p_v6 = NULL;
+	struct ice_parser_result rslt;
+	struct ice_parser *psr;
+	uint8_t item_num = 0;
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -1713,6 +1867,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 		    item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
 			is_outer = false;
 		}
+		item_num++;
 	}
 
 	/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
@@ -1733,6 +1888,101 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			    &input_set_i : &input_set_o;
 
 		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_RAW:
+			raw_spec = item->spec;
+			raw_mask = item->mask;
+
+			if (item_num != 1)
+				break;
+
+			/* convert raw spec & mask from byte string to int */
+			unsigned char *tmp_spec =
+				(uint8_t *)(uintptr_t)raw_spec->pattern;
+			unsigned char *tmp_mask =
+				(uint8_t *)(uintptr_t)raw_mask->pattern;
+			uint16_t udp_port = 0;
+			uint16_t tmp_val = 0;
+			uint8_t pkt_len = 0;
+			uint8_t tmp = 0;
+			int i, j;
+
+			pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+			if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+				pkt_len)
+				return -rte_errno;
+
+			for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+				tmp = tmp_spec[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = tmp_spec[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_spec[j] = tmp_val + tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_spec[j] = tmp_val + tmp - '0';
+
+				tmp = tmp_mask[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = tmp_mask[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_mask[j] = tmp_val + tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_mask[j] = tmp_val + tmp - '0';
+			}
+
+			pkt_len /= 2;
+
+			if (ice_parser_create(&ad->hw, &psr))
+				return -rte_errno;
+			if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
+						     &udp_port))
+				ice_parser_vxlan_tunnel_set(psr, udp_port,
+							    true);
+			if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
+				return -rte_errno;
+			ice_parser_destroy(psr);
+
+			if (!tmp_mask)
+				return -rte_errno;
+
+			filter->prof = (struct ice_parser_profile *)
+				ice_malloc(&ad->hw, sizeof(*filter->prof));
+			if (!filter->prof)
+				return -ENOMEM;
+
+			if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
+				pkt_len, ICE_BLK_FD, true, filter->prof))
+				return -rte_errno;
+
+			u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
+			if (!pkt_buf)
+				return -ENOMEM;
+			rte_memcpy(pkt_buf, tmp_spec, pkt_len);
+			filter->pkt_buf = pkt_buf;
+
+			filter->pkt_len = pkt_len;
+
+			filter->parser_ena = true;
+
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 			eth_spec = item->spec;
@@ -2198,6 +2448,7 @@  ice_fdir_parse(struct ice_adapter *ad,
 	struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
 	struct ice_pattern_match_item *item = NULL;
 	uint64_t input_set;
+	bool raw = false;
 	int ret;
 
 	memset(filter, 0, sizeof(*filter));
@@ -2213,7 +2464,13 @@  ice_fdir_parse(struct ice_adapter *ad,
 	ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
 	if (ret)
 		goto error;
+
+	if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
+		raw = true;
+
 	input_set = filter->input_set_o | filter->input_set_i;
+	input_set = raw ? ~input_set : input_set;
+
 	if (!input_set || filter->input_set_o &
 	    ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
 	    filter->input_set_i & ~item->input_set_mask_i) {
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 02f854666a..d3391c86c0 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -65,6 +65,12 @@  enum rte_flow_item_type pattern_empty[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* raw */
+enum rte_flow_item_type pattern_raw[] = {
+	RTE_FLOW_ITEM_TYPE_RAW,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 /* L2 */
 enum rte_flow_item_type pattern_ethertype[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
@@ -2081,6 +2087,7 @@  struct ice_ptype_match {
 };
 
 static struct ice_ptype_match ice_ptype_map[] = {
+	{pattern_raw,					ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4,				ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4_udp,				ICE_PTYPE_IPV4_UDP_PAY},
 	{pattern_eth_ipv4_tcp,				ICE_PTYPE_IPV4_TCP_PAY},
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 8845a3e156..1b030c0466 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -124,6 +124,9 @@ 
 /* empty pattern */
 extern enum rte_flow_item_type pattern_empty[];
 
+/* raw pattern */
+extern enum rte_flow_item_type pattern_raw[];
+
 /* L2 */
 extern enum rte_flow_item_type pattern_ethertype[];
 extern enum rte_flow_item_type pattern_ethertype_vlan[];