[02/25] net/nfp: add the offload support of IPv6 VXLAN item

Message ID 1666063359-34283-3-git-send-email-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add the extend rte_flow offload support of nfp PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 18, 2022, 3:22 a.m. UTC
  Add the corresponding data structure and logics, to support
the offload of IPv6 VXLAN item.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_cmsg.h |  42 ++++++++++++
 drivers/net/nfp/nfp_flow.c               | 113 ++++++++++++++++++++++++-------
 2 files changed, 129 insertions(+), 26 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index 08e2873..996ba3b 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -329,6 +329,11 @@  struct nfp_flower_tun_ipv4 {
 	rte_be32_t dst;
 };
 
+struct nfp_flower_tun_ipv6 {
+	uint8_t ipv6_src[16];
+	uint8_t ipv6_dst[16];
+};
+
 struct nfp_flower_tun_ip_ext {
 	uint8_t tos;
 	uint8_t ttl;
@@ -359,6 +364,43 @@  struct nfp_flower_ipv4_udp_tun {
 	rte_be32_t tun_id;
 };
 
+/*
+ * Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
+ * -----------------------------------------------------------------
+ *    3                   2                   1
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_src,   31 - 0                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_src,  63 - 32                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_src,  95 - 64                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_src, 127 - 96                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_dst,   31 - 0                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_dst,  63 - 32                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_dst,  95 - 64                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  ipv6_addr_dst, 127 - 96                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |           Reserved            |      tos      |      ttl      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                            Reserved                           |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                     VNI                       |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_udp_tun {
+	struct nfp_flower_tun_ipv6 ipv6;
+	rte_be16_t reserved1;
+	struct nfp_flower_tun_ip_ext ip_ext;
+	rte_be32_t reserved2;
+	rte_be32_t tun_id;
+};
+
 struct nfp_fl_act_head {
 	uint8_t jump_id;
 	uint8_t len_lw;
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 5f6f800..1673518 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -505,6 +505,7 @@  struct nfp_mask_id_entry {
 {
 	struct rte_eth_dev *ethdev;
 	bool outer_ip4_flag = false;
+	bool outer_ip6_flag = false;
 	const struct rte_flow_item *item;
 	struct nfp_flower_representor *representor;
 	const struct rte_flow_item_port_id *port_id;
@@ -547,6 +548,8 @@  struct nfp_mask_id_entry {
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected");
 			key_ls->key_layer |= NFP_FLOWER_LAYER_IPV6;
 			key_ls->key_size += sizeof(struct nfp_flower_ipv6);
+			if (!outer_ip6_flag)
+				outer_ip6_flag = true;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_TCP detected");
@@ -565,8 +568,9 @@  struct nfp_mask_id_entry {
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_VXLAN detected");
-			/* Clear IPv4 bits */
+			/* Clear IPv4 and IPv6 bits */
 			key_ls->key_layer &= ~NFP_FLOWER_LAYER_IPV4;
+			key_ls->key_layer &= ~NFP_FLOWER_LAYER_IPV6;
 			key_ls->tun_type = NFP_FL_TUN_VXLAN;
 			key_ls->key_layer |= NFP_FLOWER_LAYER_VXLAN;
 			if (outer_ip4_flag) {
@@ -576,6 +580,19 @@  struct nfp_mask_id_entry {
 				 * in `struct nfp_flower_ipv4_udp_tun`
 				 */
 				key_ls->key_size -= sizeof(struct nfp_flower_ipv4);
+			} else if (outer_ip6_flag) {
+				key_ls->key_layer |= NFP_FLOWER_LAYER_EXT_META;
+				key_ls->key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+				key_ls->key_size += sizeof(struct nfp_flower_ext_meta);
+				key_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+				/*
+				 * The outer l3 layer information is
+				 * in `struct nfp_flower_ipv6_udp_tun`
+				 */
+				key_ls->key_size -= sizeof(struct nfp_flower_ipv6);
+			} else {
+				PMD_DRV_LOG(ERR, "No outer IP layer for VXLAN tunnel.");
+				return -EINVAL;
 			}
 			break;
 		default:
@@ -902,42 +919,61 @@  struct nfp_mask_id_entry {
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
 		bool is_mask,
-		__rte_unused bool is_outer_layer)
+		bool is_outer_layer)
 {
 	struct nfp_flower_ipv6 *ipv6;
 	const struct rte_ipv6_hdr *hdr;
 	struct nfp_flower_meta_tci *meta_tci;
 	const struct rte_flow_item_ipv6 *spec;
 	const struct rte_flow_item_ipv6 *mask;
+	struct nfp_flower_ipv6_udp_tun *ipv6_udp_tun;
 
 	spec = item->spec;
 	mask = item->mask ? item->mask : proc->mask_default;
 	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
 
-	if (spec == NULL) {
-		PMD_DRV_LOG(DEBUG, "nfp flow merge ipv6: no item->spec!");
-		goto ipv6_end;
-	}
+	if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) {
+		if (spec == NULL) {
+			PMD_DRV_LOG(DEBUG, "nfp flow merge ipv6: no item->spec!");
+			return 0;
+		}
 
-	/*
-	 * reserve space for L4 info.
-	 * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4
-	 */
-	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
-		*mbuf_off += sizeof(struct nfp_flower_tp_ports);
+		hdr = is_mask ? &mask->hdr : &spec->hdr;
+		ipv6_udp_tun = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off;
+
+		ipv6_udp_tun->ip_ext.tos = (hdr->vtc_flow &
+				RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
+		ipv6_udp_tun->ip_ext.ttl = hdr->hop_limits;
+		memcpy(ipv6_udp_tun->ipv6.ipv6_src, hdr->src_addr,
+				sizeof(ipv6_udp_tun->ipv6.ipv6_src));
+		memcpy(ipv6_udp_tun->ipv6.ipv6_dst, hdr->dst_addr,
+				sizeof(ipv6_udp_tun->ipv6.ipv6_dst));
+	} else {
+		if (spec == NULL) {
+			PMD_DRV_LOG(DEBUG, "nfp flow merge ipv6: no item->spec!");
+			goto ipv6_end;
+		}
 
-	hdr = is_mask ? &mask->hdr : &spec->hdr;
-	ipv6 = (struct nfp_flower_ipv6 *)*mbuf_off;
+		/*
+		 * reserve space for L4 info.
+		 * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv6
+		 */
+		if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+			*mbuf_off += sizeof(struct nfp_flower_tp_ports);
+
+		hdr = is_mask ? &mask->hdr : &spec->hdr;
+		ipv6 = (struct nfp_flower_ipv6 *)*mbuf_off;
 
-	ipv6->ip_ext.tos   = (hdr->vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
-			RTE_IPV6_HDR_TC_SHIFT;
-	ipv6->ip_ext.proto = hdr->proto;
-	ipv6->ip_ext.ttl   = hdr->hop_limits;
-	memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src));
-	memcpy(ipv6->ipv6_dst, hdr->dst_addr, sizeof(ipv6->ipv6_dst));
+		ipv6->ip_ext.tos   = (hdr->vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+				RTE_IPV6_HDR_TC_SHIFT;
+		ipv6->ip_ext.proto = hdr->proto;
+		ipv6->ip_ext.ttl   = hdr->hop_limits;
+		memcpy(ipv6->ipv6_src, hdr->src_addr, sizeof(ipv6->ipv6_src));
+		memcpy(ipv6->ipv6_dst, hdr->dst_addr, sizeof(ipv6->ipv6_dst));
 
 ipv6_end:
-	*mbuf_off += sizeof(struct nfp_flower_ipv6);
+		*mbuf_off += sizeof(struct nfp_flower_ipv6);
+	}
 
 	return 0;
 }
@@ -1106,7 +1142,7 @@  struct nfp_mask_id_entry {
 }
 
 static int
-nfp_flow_merge_vxlan(__rte_unused struct rte_flow *nfp_flow,
+nfp_flow_merge_vxlan(struct rte_flow *nfp_flow,
 		char **mbuf_off,
 		const struct rte_flow_item *item,
 		const struct nfp_flow_item_proc *proc,
@@ -1115,8 +1151,15 @@  struct nfp_mask_id_entry {
 {
 	const struct rte_vxlan_hdr *hdr;
 	struct nfp_flower_ipv4_udp_tun *tun4;
+	struct nfp_flower_ipv6_udp_tun *tun6;
+	struct nfp_flower_meta_tci *meta_tci;
 	const struct rte_flow_item_vxlan *spec;
 	const struct rte_flow_item_vxlan *mask;
+	struct nfp_flower_ext_meta *ext_meta = NULL;
+
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+		ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
 	spec = item->spec;
 	if (spec == NULL) {
@@ -1127,11 +1170,21 @@  struct nfp_mask_id_entry {
 	mask = item->mask ? item->mask : proc->mask_default;
 	hdr = is_mask ? &mask->hdr : &spec->hdr;
 
-	tun4 = (struct nfp_flower_ipv4_udp_tun *)*mbuf_off;
-	tun4->tun_id = hdr->vx_vni;
+	if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+			NFP_FLOWER_LAYER2_TUN_IPV6)) {
+		tun6 = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off;
+		tun6->tun_id = hdr->vx_vni;
+	} else {
+		tun4 = (struct nfp_flower_ipv4_udp_tun *)*mbuf_off;
+		tun4->tun_id = hdr->vx_vni;
+	}
 
 vxlan_end:
-	*mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun);
+	if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+			NFP_FLOWER_LAYER2_TUN_IPV6))
+		*mbuf_off += sizeof(struct nfp_flower_ipv6_udp_tun);
+	else
+		*mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun);
 
 	return 0;
 }
@@ -1140,7 +1193,8 @@  struct nfp_mask_id_entry {
 static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	[RTE_FLOW_ITEM_TYPE_END] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH,
-			RTE_FLOW_ITEM_TYPE_IPV4),
+			RTE_FLOW_ITEM_TYPE_IPV4,
+			RTE_FLOW_ITEM_TYPE_IPV6),
 	},
 	[RTE_FLOW_ITEM_TYPE_ETH] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
@@ -1413,6 +1467,7 @@  struct nfp_mask_id_entry {
 	char *mbuf_off_exact;
 	bool is_tun_flow = false;
 	bool is_outer_layer = true;
+	struct nfp_flower_meta_tci *meta_tci;
 	const struct rte_flow_item *loop_item;
 
 	mbuf_off_exact = nfp_flow->payload.unmasked_data +
@@ -1422,6 +1477,12 @@  struct nfp_mask_id_entry {
 			sizeof(struct nfp_flower_meta_tci) +
 			sizeof(struct nfp_flower_in_port);
 
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) {
+		mbuf_off_exact += sizeof(struct nfp_flower_ext_meta);
+		mbuf_off_mask += sizeof(struct nfp_flower_ext_meta);
+	}
+
 	/* Check if this is a tunnel flow and get the inner item*/
 	is_tun_flow = nfp_flow_inner_item_get(items, &loop_item);
 	if (is_tun_flow)