[08/25] net/nfp: prepare for the decap action of IPv6 UDP tunnel

Message ID 1666063359-34283-9-git-send-email-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add the extend rte_flow offload support of nfp PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 18, 2022, 3:22 a.m. UTC
  Add the related data structure and functions, prepare for
the decap action of IPv6 UDP tunnel.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_cmsg.c |  42 +++++++++
 drivers/net/nfp/flower/nfp_flower_cmsg.h |  24 +++++
 drivers/net/nfp/nfp_flow.c               | 145 ++++++++++++++++++++++++++++++-
 drivers/net/nfp/nfp_flow.h               |   9 ++
 4 files changed, 217 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index f18f3de..76815cf 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -348,6 +348,48 @@ 
 }
 
 int
+nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower)
+{
+	uint16_t cnt;
+	uint32_t count = 0;
+	struct rte_mbuf *mbuf;
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+	struct nfp_flower_cmsg_tun_ipv6_addr *msg;
+
+	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+	if (mbuf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr");
+		return -ENOMEM;
+	}
+
+	msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, sizeof(*msg));
+
+	priv = app_fw_flower->flow_priv;
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+			rte_spinlock_unlock(&priv->ipv6_off_lock);
+			PMD_DRV_LOG(ERR, "IPv6 offload exceeds limit.");
+			return -ERANGE;
+		}
+		memcpy(&msg->ipv6_addr[count * 16], entry->ipv6_addr, 16UL);
+		count++;
+	}
+	msg->count = rte_cpu_to_be_32(count);
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+	if (cnt == 0) {
+		PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+		rte_pktmbuf_free(mbuf);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
 nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_fl_rule_metadata *nfp_flow_meta,
 		uint16_t mac_idx,
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index 0933dac..61f2f83 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -280,6 +280,29 @@  struct nfp_flower_cmsg_tun_ipv4_addr {
 	rte_be32_t ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
 };
 
+#define NFP_FL_IPV6_ADDRS_MAX        4
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_IP_V6
+ *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ *    -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *          +---------------------------------------------------------------+
+ *        0 |                    Number of IP Addresses                     |
+ *          +---------------------------------------------------------------+
+ *        1 |                        IP Address1 #1                         |
+ *          +---------------------------------------------------------------+
+ *        2 |                        IP Address1 #2                         |
+ *          +---------------------------------------------------------------+
+ *          |                             ...                               |
+ *          +---------------------------------------------------------------+
+ *       16 |                        IP Address4 #4                         |
+ *          +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_tun_ipv6_addr {
+	rte_be32_t count;
+	uint8_t ipv6_addr[NFP_FL_IPV6_ADDRS_MAX * 16];
+};
+
 /*
  * NFP_FLOWER_CMSG_TYPE_FLOW_STATS
  *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
@@ -802,6 +825,7 @@  int nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower,
 int nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_flower_cmsg_tun_neigh_v6 *payload);
 int nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower);
 int nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_fl_rule_metadata *nfp_flow_meta,
 		uint16_t mac_idx,
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 816c733..cc63aa5 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -487,16 +487,95 @@  struct nfp_pre_tun_entry {
 	return 0;
 }
 
+__rte_unused static int
+nfp_tun_add_ipv6_off(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t ipv6[])
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+	struct nfp_ipv6_addr_entry *tmp_entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+			entry->ref_count++;
+			rte_spinlock_unlock(&priv->ipv6_off_lock);
+			return 0;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	tmp_entry = rte_zmalloc("nfp_ipv6_off", sizeof(struct nfp_ipv6_addr_entry), 0);
+	if (tmp_entry == NULL) {
+		PMD_DRV_LOG(ERR, "Mem error when offloading IP6 address.");
+		return -ENOMEM;
+	}
+	memcpy(tmp_entry->ipv6_addr, ipv6, sizeof(tmp_entry->ipv6_addr));
+	tmp_entry->ref_count = 1;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_INSERT_HEAD(&priv->ipv6_off_list, tmp_entry, next);
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	return nfp_flower_cmsg_tun_off_v6(app_fw_flower);
+}
+
+static int
+nfp_tun_del_ipv6_off(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t ipv6[])
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+			entry->ref_count--;
+			if (entry->ref_count == 0) {
+				LIST_REMOVE(entry, next);
+				rte_free(entry);
+				rte_spinlock_unlock(&priv->ipv6_off_lock);
+				return nfp_flower_cmsg_tun_off_v6(app_fw_flower);
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	return 0;
+}
+
 static int
 nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr,
 		struct rte_flow *nfp_flow)
 {
 	int ret;
+	uint32_t key_layer2 = 0;
 	struct nfp_flower_ipv4_udp_tun *udp4;
+	struct nfp_flower_ipv6_udp_tun *udp6;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_ext_meta *ext_meta = NULL;
 
-	udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
-			sizeof(struct nfp_flower_ipv4_udp_tun));
-	ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+		ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
+
+	if (ext_meta != NULL)
+		key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2);
+
+	if (key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) {
+		udp6 = (struct nfp_flower_ipv6_udp_tun *)(nfp_flow->payload.mask_data -
+				sizeof(struct nfp_flower_ipv6_udp_tun));
+		ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, udp6->ipv6.ipv6_dst);
+	} else {
+		udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
+				sizeof(struct nfp_flower_ipv4_udp_tun));
+		ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+	}
 
 	return ret;
 }
@@ -2096,6 +2175,59 @@  struct nfp_pre_tun_entry {
 	return nfp_flower_cmsg_tun_neigh_v6_rule(app_fw_flower, &payload);
 }
 
+__rte_unused static int
+nfp_flower_add_tun_neigh_v6_decap(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow)
+{
+	struct nfp_fl_tun *tmp;
+	struct nfp_fl_tun *tun;
+	struct nfp_flow_priv *priv;
+	struct nfp_flower_ipv6 *ipv6;
+	struct nfp_flower_mac_mpls *eth;
+	struct nfp_flower_in_port *port;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_cmsg_tun_neigh_v6 payload;
+
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	port = (struct nfp_flower_in_port *)(meta_tci + 1);
+	eth = (struct nfp_flower_mac_mpls *)(port + 1);
+
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+		ipv6 = (struct nfp_flower_ipv6 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls) +
+				sizeof(struct nfp_flower_tp_ports));
+	else
+		ipv6 = (struct nfp_flower_ipv6 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls));
+
+	tun = &nfp_flow->tun;
+	tun->payload.v6_flag = 1;
+	memcpy(tun->payload.dst.dst_ipv6, ipv6->ipv6_src, sizeof(tun->payload.dst.dst_ipv6));
+	memcpy(tun->payload.src.src_ipv6, ipv6->ipv6_dst, sizeof(tun->payload.src.src_ipv6));
+	memcpy(tun->payload.dst_addr, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(tun->payload.src_addr, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+
+	tun->ref_cnt = 1;
+	priv = app_fw_flower->flow_priv;
+	LIST_FOREACH(tmp, &priv->nn_list, next) {
+		if (memcmp(&tmp->payload, &tun->payload, sizeof(struct nfp_fl_tun_entry)) == 0) {
+			tmp->ref_cnt++;
+			return 0;
+		}
+	}
+
+	LIST_INSERT_HEAD(&priv->nn_list, tun, next);
+
+	memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v6));
+	memcpy(payload.dst_ipv6, ipv6->ipv6_src, sizeof(payload.dst_ipv6));
+	memcpy(payload.src_ipv6, ipv6->ipv6_dst, sizeof(payload.src_ipv6));
+	memcpy(payload.common.dst_mac, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(payload.common.src_mac, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+	payload.common.port_id = port->in_port;
+
+	return nfp_flower_cmsg_tun_neigh_v6_rule(app_fw_flower, &payload);
+}
+
 static int
 nfp_flower_del_tun_neigh_v6(struct nfp_app_fw_flower *app_fw_flower,
 		uint8_t *ipv6)
@@ -2419,6 +2551,9 @@  struct nfp_pre_tun_entry {
 	nfp_mac_idx = (find_entry->mac_index << 8) |
 			NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT |
 			NFP_TUN_PRE_TUN_IDX_BIT;
+	if (nfp_flow->tun.payload.v6_flag != 0)
+		nfp_mac_idx |= NFP_TUN_PRE_TUN_IPV6_BIT;
+
 	ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr,
 			nfp_mac_idx, true);
 	if (ret != 0) {
@@ -3267,6 +3402,10 @@  struct nfp_pre_tun_entry {
 	rte_spinlock_init(&priv->ipv4_off_lock);
 	LIST_INIT(&priv->ipv4_off_list);
 
+	/* ipv6 off list */
+	rte_spinlock_init(&priv->ipv6_off_lock);
+	LIST_INIT(&priv->ipv6_off_list);
+
 	/* neighbor next list */
 	LIST_INIT(&priv->nn_list);
 
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 84a3005..1b4a51f 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -176,6 +176,12 @@  struct nfp_ipv4_addr_entry {
 	int ref_count;
 };
 
+struct nfp_ipv6_addr_entry {
+	LIST_ENTRY(nfp_ipv6_addr_entry) next;
+	uint8_t ipv6_addr[16];
+	int ref_count;
+};
+
 #define NFP_TUN_PRE_TUN_RULE_LIMIT  32
 
 struct nfp_flow_priv {
@@ -200,6 +206,9 @@  struct nfp_flow_priv {
 	/* IPv4 off */
 	LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */
 	rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */
+	/* IPv6 off */
+	LIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */
+	rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
 	/* neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
 };