[v5,11/14] net/nfp: support bond member notification

Message ID 20231226023745.3144143-12-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series Enhance the bond framework to support offload |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Dec. 26, 2023, 2:37 a.m. UTC
  From: Long Wu <long.wu@corigine.com>

A NFP representor port can receive a bond notification. Then driver
will parse this notification into one of these two events:
1. Bonding port configuration may have changed.
This includes creation of a bonding port, removal/addition of
a member port, changing the bond mode, etc.
2. Bonding port is deleted.

Signed-off-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_bond.c      | 183 +++++++++++++++++-
 drivers/net/nfp/flower/nfp_flower_bond.h      |   2 +
 .../net/nfp/flower/nfp_flower_representor.c   |  21 ++
 3 files changed, 205 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower_bond.c b/drivers/net/nfp/flower/nfp_flower_bond.c
index c814c0d4ea..523e0025ad 100644
--- a/drivers/net/nfp/flower/nfp_flower_bond.c
+++ b/drivers/net/nfp/flower/nfp_flower_bond.c
@@ -46,6 +46,39 @@  nfp_flower_bond_increment_version(struct nfp_flower_bond *nfp_bond)
 		nfp_bond->batch_ver += 2;
 }
 
+static void
+nfp_flower_bond_group_id_clear(struct nfp_flower_bond *nfp_bond,
+		uint32_t id_clear)
+{
+	bool *group_id_map = nfp_bond->group_id_map;
+
+	if (id_clear >= NFP_FL_BOND_GROUP_MAX || id_clear < NFP_FL_BOND_GROUP_MIN) {
+		PMD_DRV_LOG(ERR, "Try to clear invalid group id %u.", id_clear);
+		return;
+	}
+
+	if (group_id_map[id_clear])
+		group_id_map[id_clear] = false;
+}
+
+static int
+nfp_flower_bond_group_id_get(struct nfp_flower_bond *nfp_bond,
+		uint32_t *id_ret)
+{
+	uint32_t id;
+	bool *group_id_map = nfp_bond->group_id_map;
+
+	for (id = NFP_FL_BOND_GROUP_MIN; id < NFP_FL_BOND_GROUP_MAX; id++) {
+		if (!group_id_map[id]) {
+			group_id_map[id] = true;
+			*id_ret = id;
+			return 0;
+		}
+	}
+
+	return -ENOSPC;
+}
+
 static enum nfp_flower_bond_batch
 nfp_flower_bond_remove_node(struct nfp_flower_bond *nfp_bond,
 		struct nfp_bond_group *entry,
@@ -65,6 +98,7 @@  nfp_flower_bond_remove_node(struct nfp_flower_bond *nfp_bond,
 	entry->offloaded = false;
 
 	if (entry->to_destroy) {
+		nfp_flower_bond_group_id_clear(nfp_bond, entry->group_id);
 		LIST_REMOVE(entry, next);
 		rte_free(entry);
 	}
@@ -139,6 +173,7 @@  nfp_flower_bond_cleanup(struct nfp_app_fw_flower *app_fw_flower)
 
 	pthread_mutex_lock(&nfp_bond->mutex);
 	LIST_FOREACH(entry, &nfp_bond->group_list, next) {
+		nfp_flower_bond_group_id_clear(nfp_bond, entry->group_id);
 		LIST_REMOVE(entry, next);
 		rte_free(entry);
 	}
@@ -271,16 +306,162 @@  nfp_flower_bond_change_linkstatus_event(struct nfp_flower_bond *nfp_bond,
 	*bond_port_flags |= NFP_FL_BOND_PORT_CHANGED;
 }
 
+static struct nfp_bond_group *
+nfp_flower_bond_group_create(struct nfp_flower_bond *nfp_bond,
+		struct rte_eth_dev *bond_dev)
+{
+	uint32_t id;
+	unsigned int numa_node;
+	struct nfp_bond_group *group = NULL;
+
+	if (nfp_flower_bond_group_id_get(nfp_bond, &id) < 0)
+		return NULL;
+
+	numa_node = rte_socket_id();
+
+	group = rte_zmalloc_socket(NULL, sizeof(struct nfp_bond_group),
+			RTE_CACHE_LINE_SIZE, numa_node);
+	if (group == NULL) {
+		PMD_DRV_LOG(ERR, "Unable malloc memory for nfp bond group");
+		nfp_flower_bond_group_id_clear(nfp_bond, id);
+		return NULL;
+	}
+
+	group->group_id = id;
+	group->main_dev = bond_dev;
+	group->dirty = true;
+	group->offloaded = false;
+	group->to_remove = false;
+	group->to_destroy = false;
+	group->member_cnt = 0;
+	group->group_inst = ++nfp_bond->global_inst;
+	LIST_INSERT_HEAD(&nfp_bond->group_list, group, next);
+
+	return group;
+}
+
+static int
+nfp_flower_bond_changeupper_event(struct nfp_flower_bond *nfp_bond,
+		struct rte_eth_dev *bond_dev)
+{
+	uint32_t i;
+	uint16_t port_id;
+	bool can_offload = true;
+	uint16_t nfp_member_count;
+	struct rte_eth_dev *eth_dev;
+	struct nfp_bond_group *group;
+	struct bond_dev_private *internals;
+	struct nfp_flower_representor *repr;
+	struct nfp_app_fw_flower *app_flower;
+
+	internals = bond_dev->data->dev_private;
+	app_flower = nfp_bond->app_fw_flower;
+	nfp_member_count = 0;
+
+	for (i = 0; i < internals->member_count; i++) {
+		port_id = internals->members[i].port_id;
+		eth_dev = &rte_eth_devices[port_id];
+		if (!nfp_flower_is_phy_repr(eth_dev)) {
+			can_offload = false;
+			break;
+		}
+
+		repr = eth_dev->data->dev_private;
+
+		if (repr->app_fw_flower != app_flower) {
+			can_offload = false;
+			break;
+		}
+
+		if (internals->current_primary_port == port_id ||
+				internals->mode != BONDING_MODE_ACTIVE_BACKUP)
+			nfp_member_count++;
+	}
+
+	if (internals != NULL &&
+			internals->mode != BONDING_MODE_ACTIVE_BACKUP &&
+			((internals->mode != BONDING_MODE_BALANCE &&
+			internals->mode != BONDING_MODE_8023AD) ||
+			internals->balance_xmit_policy != BALANCE_XMIT_POLICY_LAYER34)) {
+		can_offload = false;
+		PMD_DRV_LOG(WARNING, "Unable to offload mode %u hash %u.",
+				internals->mode,
+				internals->balance_xmit_policy);
+	}
+
+	pthread_mutex_lock(&nfp_bond->mutex);
+
+	group = nfp_flower_bond_find_group(nfp_bond, bond_dev);
+	if (nfp_member_count == 0 || !can_offload) {
+		if (group != NULL && group->offloaded)
+			/* Set remove flag */
+			group->to_remove = true;
+
+		pthread_mutex_unlock(&nfp_bond->mutex);
+		return 0;
+	}
+
+	if (group == NULL) {
+		group = nfp_flower_bond_group_create(nfp_bond, bond_dev);
+		if (group == NULL) {
+			pthread_mutex_unlock(&nfp_bond->mutex);
+			return -1;
+		}
+	}
+
+	group->dirty = true;
+	group->member_cnt = nfp_member_count;
+	group->to_remove = false;
+
+	pthread_mutex_unlock(&nfp_bond->mutex);
+
+	return 0;
+}
+
+static void
+nfp_flower_bond_group_delete(struct nfp_flower_bond *nfp_bond,
+		struct rte_eth_dev *bond_dev)
+{
+	struct nfp_bond_group *group;
+
+	pthread_mutex_lock(&nfp_bond->mutex);
+
+	group = nfp_flower_bond_find_group(nfp_bond, bond_dev);
+	if (group == NULL) {
+		pthread_mutex_unlock(&nfp_bond->mutex);
+		PMD_DRV_LOG(WARNING, "Untracked bond got unregistered %s",
+				bond_dev->device->name);
+		return;
+	}
+
+	group->to_remove = true;
+	group->to_destroy = true;
+
+	pthread_mutex_unlock(&nfp_bond->mutex);
+}
+
 int
 nfp_flower_bond_event_handle(struct nfp_flower_bond *nfp_bond,
 		struct rte_eth_dev *bond_dev,
 		struct rte_eth_dev *nfp_dev,
 		enum nfp_flower_bond_event event)
 {
+	int ret = 0;
+
 	switch (event) {
+	case NFP_FLOWER_CHANGEUPPER:
+		ret = nfp_flower_bond_changeupper_event(nfp_bond, bond_dev);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Change upper event can not work.");
+			return ret;
+		}
+		break;
 	case NFP_FLOWER_CHANGELINKSTATUS:
 		nfp_flower_bond_change_linkstatus_event(nfp_bond, bond_dev, nfp_dev);
 		break;
+	case NFP_FLOWER_UNREGISTER:
+		nfp_flower_bond_group_delete(nfp_bond, bond_dev);
+		break;
 	default:
 		PMD_DRV_LOG(ERR, "Invalid bond offload event.");
 		return -1;
@@ -288,7 +469,7 @@  nfp_flower_bond_event_handle(struct nfp_flower_bond *nfp_bond,
 
 	nfp_flower_bond_do_work(nfp_bond);
 
-	return 0;
+	return ret;
 }
 
 enum nfp_flower_bond_batch
diff --git a/drivers/net/nfp/flower/nfp_flower_bond.h b/drivers/net/nfp/flower/nfp_flower_bond.h
index 80f56a6780..f73589265f 100644
--- a/drivers/net/nfp/flower/nfp_flower_bond.h
+++ b/drivers/net/nfp/flower/nfp_flower_bond.h
@@ -109,6 +109,8 @@  struct nfp_flower_bond {
 	uint32_t global_inst;
 	/** Incremented for each config packet sent */
 	uint32_t pkt_num;
+	/** ID of bond group in driver, true is used */
+	bool group_id_map[NFP_FL_BOND_GROUP_MAX];
 	/** Pointer to the flower app */
 	struct nfp_app_fw_flower *app_fw_flower;
 };
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 2810a7a271..43106da90e 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -390,6 +390,25 @@  nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+nfp_flower_repr_bond_notify_member(struct rte_eth_dev *dev,
+		struct rte_eth_dev *bond_dev)
+{
+	struct nfp_flower_representor *repr;
+	enum nfp_flower_bond_event event = NFP_FLOWER_CHANGEUPPER;
+
+	if (!nfp_flower_bond_all_member_are_phyrepr(bond_dev))
+		event = NFP_FLOWER_UNREGISTER;
+
+	repr = dev->data->dev_private;
+
+	if (nfp_flower_support_bond_offload(repr->app_fw_flower))
+		return nfp_flower_bond_event_handle(repr->app_fw_flower->nfp_bond,
+				bond_dev, dev, event);
+
+	return 0;
+}
+
 static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
 	.dev_infos_get        = nfp_flower_repr_dev_infos_get,
 
@@ -437,6 +456,8 @@  static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
 
 	.flow_ops_get         = nfp_flow_ops_get,
 	.mtr_ops_get          = nfp_net_mtr_ops_get,
+
+	.bond_notify_member   = nfp_flower_repr_bond_notify_member,
 };
 
 static uint32_t