[v5,12/14] net/nfp: handle bond packets from firmware

Message ID 20231226023745.3144143-13-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series Enhance the bond framework to support offload |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Dec. 26, 2023, 2:37 a.m. UTC
  From: Long Wu <long.wu@corigine.com>

Firmware sends bond firmware creation packets to driver,
driver needs to handle it in ctrl VNIC service.

Signed-off-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_bond.c |  53 ++++++++++++
 drivers/net/nfp/flower/nfp_flower_bond.h |  14 +++
 drivers/net/nfp/flower/nfp_flower_ctrl.c | 106 +++++++++++++++++++++--
 3 files changed, 168 insertions(+), 5 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower_bond.c b/drivers/net/nfp/flower/nfp_flower_bond.c
index 523e0025ad..73b616360a 100644
--- a/drivers/net/nfp/flower/nfp_flower_bond.c
+++ b/drivers/net/nfp/flower/nfp_flower_bond.c
@@ -161,6 +161,8 @@  nfp_flower_bond_init(struct nfp_app_fw_flower *app_fw_flower)
 	LIST_INIT(&nfp_bond->group_list);
 	nfp_flower_bond_increment_version(nfp_bond);
 	nfp_bond->app_fw_flower = app_fw_flower;
+	nfp_bond->retrans.head = 0;
+	nfp_bond->retrans.rear = 0;
 
 	return 0;
 }
@@ -168,6 +170,7 @@  nfp_flower_bond_init(struct nfp_app_fw_flower *app_fw_flower)
 void
 nfp_flower_bond_cleanup(struct nfp_app_fw_flower *app_fw_flower)
 {
+	struct rte_mbuf *mbuf_tmp;
 	struct nfp_bond_group *entry;
 	struct nfp_flower_bond *nfp_bond = app_fw_flower->nfp_bond;
 
@@ -177,6 +180,12 @@  nfp_flower_bond_cleanup(struct nfp_app_fw_flower *app_fw_flower)
 		LIST_REMOVE(entry, next);
 		rte_free(entry);
 	}
+
+	mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond);
+	for (; mbuf_tmp != NULL;
+			mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond))
+		rte_pktmbuf_free(mbuf_tmp);
+
 	pthread_mutex_unlock(&nfp_bond->mutex);
 
 	pthread_mutex_destroy(&nfp_bond->mutex);
@@ -587,3 +596,47 @@  nfp_flower_bond_all_member_are_phyrepr(struct rte_eth_dev *bond_dev)
 
 	return true;
 }
+
+int
+nfp_fl_bond_put_unprocessed(struct nfp_flower_bond *nfp_bond,
+		struct rte_mbuf *mbuf)
+{
+	uint16_t rear_real;
+	struct nfp_flower_bond_retrans *retrans;
+	struct nfp_flower_cmsg_bond_config *cmsg_payload;
+
+	cmsg_payload = rte_pktmbuf_mtod_offset(mbuf,
+			struct nfp_flower_cmsg_bond_config *,
+			NFP_FLOWER_CMSG_HLEN);
+
+	if (rte_be_to_cpu_32(cmsg_payload->group_id) >= NFP_FL_BOND_GROUP_MAX)
+		return -EINVAL;
+
+	retrans = &nfp_bond->retrans;
+	rear_real = (retrans->rear + 1) % NFP_FL_BOND_RETRANS_LIMIT;
+	if (rear_real == retrans->head)
+		return -ENOSPC;
+
+	retrans->mbufs[retrans->rear] = mbuf;
+
+	retrans->rear = rear_real;
+
+	return 0;
+}
+
+struct rte_mbuf *
+nfp_fl_bond_get_unprocessed(struct nfp_flower_bond *nfp_bond)
+{
+	struct rte_mbuf *mbuf;
+	struct nfp_flower_bond_retrans *retrans;
+
+	retrans = &nfp_bond->retrans;
+	if (retrans->rear == retrans->head)
+		return NULL;
+
+	mbuf = retrans->mbufs[retrans->head];
+
+	retrans->head = (retrans->head + 1) % NFP_FL_BOND_RETRANS_LIMIT;
+
+	return mbuf;
+}
diff --git a/drivers/net/nfp/flower/nfp_flower_bond.h b/drivers/net/nfp/flower/nfp_flower_bond.h
index f73589265f..59d1c211f2 100644
--- a/drivers/net/nfp/flower/nfp_flower_bond.h
+++ b/drivers/net/nfp/flower/nfp_flower_bond.h
@@ -41,6 +41,9 @@ 
 #define NFP_FL_BOND_PORT_TX_ENABLED    RTE_BIT32(1)
 #define NFP_FL_BOND_PORT_CHANGED       RTE_BIT32(2)
 
+/** We store 100(101-1) mbufs but queue needs 101 */
+#define NFP_FL_BOND_RETRANS_LIMIT      101
+
 enum nfp_flower_bond_batch {
 	NFP_FLOWER_BOND_BATCH_FIRST,
 	NFP_FLOWER_BOND_BATCH_MEMBER,
@@ -53,6 +56,12 @@  enum nfp_flower_bond_event {
 	NFP_FLOWER_UNREGISTER
 };
 
+struct nfp_flower_bond_retrans {
+	struct rte_mbuf *mbufs[NFP_FL_BOND_RETRANS_LIMIT];
+	uint16_t head;
+	uint16_t rear;
+};
+
 /* Control message payload for bond config */
 struct nfp_flower_cmsg_bond_config {
 	/** Configuration flags */
@@ -113,6 +122,8 @@  struct nfp_flower_bond {
 	bool group_id_map[NFP_FL_BOND_GROUP_MAX];
 	/** Pointer to the flower app */
 	struct nfp_app_fw_flower *app_fw_flower;
+	/** Store bond offload packets from firmware */
+	struct nfp_flower_bond_retrans retrans;
 };
 
 struct nfp_flower_bond_cmsg_args {
@@ -139,5 +150,8 @@  int nfp_flower_bond_event_handle(struct nfp_flower_bond *nfp_bond,
 		enum nfp_flower_bond_event event);
 void nfp_flower_bond_do_work(struct nfp_flower_bond *nfp_bond);
 bool nfp_flower_bond_all_member_are_phyrepr(struct rte_eth_dev *bond_dev);
+struct rte_mbuf *nfp_fl_bond_get_unprocessed(struct nfp_flower_bond *nfp_bond);
+int nfp_fl_bond_put_unprocessed(struct nfp_flower_bond *nfp_bond,
+		struct rte_mbuf *mbuf);
 
 #endif /* __NFP_FLOWER_BOND_H__ */
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index c25487c277..75c8c0b20a 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -15,6 +15,36 @@ 
 
 #define MAX_PKT_BURST 32
 
+static void
+nfp_flower_bond_recover_status(struct nfp_flower_bond *nfp_bond)
+{
+	int err;
+	struct rte_mbuf *mbuf_tmp;
+	struct nfp_bond_group *entry;
+
+	pthread_mutex_lock(&nfp_bond->mutex);
+
+	/* Clear all unprocessed messages */
+	mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond);
+	for (; mbuf_tmp != NULL;
+			mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond))
+		rte_pktmbuf_free(mbuf_tmp);
+
+	/* Mark all groups dirty */
+	LIST_FOREACH(entry, &nfp_bond->group_list, next)
+		entry->dirty = true;
+
+	/* Reset NFP group config */
+	err = nfp_flower_bond_reset(nfp_bond);
+	if (err != 0)
+		PMD_DRV_LOG(ERR, "Mem err in group reset msg");
+
+	pthread_mutex_unlock(&nfp_bond->mutex);
+
+	/* Schedule a LAG/BOND config update */
+	nfp_flower_bond_do_work(nfp_bond);
+}
+
 static uint16_t
 nfp_flower_ctrl_vnic_recv(void *rx_queue,
 		struct rte_mbuf **rx_pkts,
@@ -450,6 +480,55 @@  nfp_flower_cmsg_port_mod_rx(struct nfp_app_fw_flower *app_fw_flower,
 	return 0;
 }
 
+static bool
+nfp_flower_bond_unprocessed_msg(void *app_flower,
+		struct rte_mbuf *mbuf)
+{
+	uint16_t cnt;
+	uint8_t flags;
+	bool store_tag = false;
+	struct rte_mbuf *mbuf_tmp;
+	struct nfp_flower_bond *nfp_bond;
+	struct nfp_flower_cmsg_bond_config *cmsg_payload;
+	struct nfp_app_fw_flower *app_fw_flower = app_flower;
+
+	cmsg_payload = rte_pktmbuf_mtod_offset(mbuf,
+			struct nfp_flower_cmsg_bond_config *,
+			NFP_FLOWER_CMSG_HLEN);
+
+	flags = cmsg_payload->ctrl_flags;
+
+	/*
+	 * Note the intentional fall through below. If DATA and XON are both
+	 * set, the message will stored and sent again with the rest of the
+	 * unprocessed messages list.
+	 */
+	nfp_bond = app_fw_flower->nfp_bond;
+
+	/* Store */
+	if ((flags & NFP_FL_BOND_DATA) != 0) {
+		if (nfp_fl_bond_put_unprocessed(nfp_bond, mbuf) == 0)
+			store_tag = true;
+	}
+
+	/* Send stored */
+	if ((flags & NFP_FL_BOND_XON) != 0) {
+		mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond);
+		for (; mbuf_tmp != NULL;
+				mbuf_tmp = nfp_fl_bond_get_unprocessed(nfp_bond)) {
+			cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf_tmp);
+			if (cnt == 0)
+				rte_pktmbuf_free(mbuf_tmp);
+		}
+	}
+
+	/* Resend all */
+	if ((flags & NFP_FL_BOND_SYNC) != 0)
+		nfp_flower_bond_recover_status(nfp_bond);
+
+	return store_tag;
+}
+
 static void
 nfp_flower_cmsg_rx(struct nfp_app_fw_flower *app_fw_flower,
 		struct rte_mbuf **pkts_burst,
@@ -457,6 +536,7 @@  nfp_flower_cmsg_rx(struct nfp_app_fw_flower *app_fw_flower,
 {
 	uint16_t i;
 	char *meta;
+	bool mbuf_stored;
 	uint32_t meta_type;
 	uint32_t meta_info;
 	struct nfp_mtr_priv *mtr_priv;
@@ -486,18 +566,34 @@  nfp_flower_cmsg_rx(struct nfp_app_fw_flower *app_fw_flower,
 			continue;
 		}
 
-		if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+		mbuf_stored = false;
+
+		switch (cmsg_hdr->type) {
+		case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
 			/* We need to deal with stats updates from HW asap */
 			nfp_flower_cmsg_rx_stats(flow_priv, pkts_burst[i]);
-		} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_QOS_STATS) {
+			break;
+		case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
 			/* Handle meter stats */
 			nfp_flower_cmsg_rx_qos_stats(mtr_priv, pkts_burst[i]);
-		} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) {
-			/* Handle changes to port configuration/status */
+			break;
+		case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
+			/* Handle meter stats */
 			nfp_flower_cmsg_port_mod_rx(app_fw_flower, pkts_burst[i]);
+			break;
+		case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
+			/* Handle LAG/BOND related packets */
+			if (nfp_flower_support_bond_offload(app_fw_flower))
+				mbuf_stored = nfp_flower_bond_unprocessed_msg(app_fw_flower,
+						pkts_burst[i]);
+			break;
+		default:
+			PMD_DRV_LOG(INFO, "Unmatched repr message type: %u",
+					cmsg_hdr->type);
 		}
 
-		rte_pktmbuf_free(pkts_burst[i]);
+		if (!mbuf_stored)
+			rte_pktmbuf_free(pkts_burst[i]);
 	}
 }