[2/2] net/mlx5: fix destroying external representor matched flows

Message ID 20231109085547.1313003-3-suanmingm@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: fix flow rules for external SQ |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS

Commit Message

Suanming Mou Nov. 9, 2023, 8:55 a.m. UTC
  The external representor matched SQ flows are managed by external
SQ, PMD traffic enable/disable should not touch these flows.

This commit adds an extra external list for the external representor
matched SQ flows.

Fixes: 26e1eaf2dac4 ("net/mlx5: support device control for E-Switch default rule")

Cc: stable@dpdk.org

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |  1 +
 drivers/net/mlx5/mlx5_flow.h    |  4 +--
 drivers/net/mlx5/mlx5_flow_hw.c | 45 +++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_trigger.c |  4 +--
 drivers/net/mlx5/mlx5_txq.c     |  4 +--
 5 files changed, 39 insertions(+), 19 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 45ad0701f1..795748eddc 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1855,6 +1855,7 @@  struct mlx5_priv {
 	void *root_drop_action; /* Pointer to root drop action. */
 	rte_spinlock_t hw_ctrl_lock;
 	LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
+	LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows;
 	struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
 	struct rte_flow_template_table *hw_esw_sq_miss_tbl;
 	struct rte_flow_template_table *hw_esw_zero_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index d57b3b5465..8c0b9a4b60 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2874,12 +2874,12 @@  int flow_null_counter_query(struct rte_eth_dev *dev,
 int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
 
 int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
-					 uint32_t sqn);
+					 uint32_t sqn, bool external);
 int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
 					  uint32_t sqn);
 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
 int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
-int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn);
+int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
 int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
 		const struct rte_flow_actions_template_attr *attr,
 		const struct rte_flow_action actions[],
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index d512889682..8a23c7c281 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9189,6 +9189,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	priv->nb_queue = nb_q_updated;
 	rte_spinlock_init(&priv->hw_ctrl_lock);
 	LIST_INIT(&priv->hw_ctrl_flows);
+	LIST_INIT(&priv->hw_ext_ctrl_flows);
 	ret = flow_hw_create_ctrl_rx_tables(dev);
 	if (ret) {
 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -11343,6 +11344,8 @@  const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
  *   Index of an action template associated with @p table.
  * @param info
  *   Additional info about control flow rule.
+ * @param external
+ *   External ctrl flow.
  *
  * @return
  *   0 on success, negative errno value otherwise and rte_errno set.
@@ -11355,7 +11358,8 @@  flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
 			 uint8_t item_template_idx,
 			 struct rte_flow_action actions[],
 			 uint8_t action_template_idx,
-			 struct mlx5_hw_ctrl_flow_info *info)
+			 struct mlx5_hw_ctrl_flow_info *info,
+			 bool external)
 {
 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
 	uint32_t queue = CTRL_QUEUE_ID(priv);
@@ -11406,7 +11410,10 @@  flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
 		entry->info = *info;
 	else
 		entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
-	LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+	if (external)
+		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
+	else
+		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
 	return 0;
 error:
@@ -11580,11 +11587,23 @@  flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
 		mlx5_free(cf);
 		cf = cf_next;
 	}
+	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
+	while (cf != NULL) {
+		cf_next = LIST_NEXT(cf, next);
+		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
+		if (ret) {
+			rte_errno = ret;
+			return -ret;
+		}
+		LIST_REMOVE(cf, next);
+		mlx5_free(cf);
+		cf = cf_next;
+	}
 	return 0;
 }
 
 int
-mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
 {
 	uint16_t port_id = dev->data->port_id;
 	struct rte_flow_item_ethdev esw_mgr_spec = {
@@ -11668,7 +11687,7 @@  mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
 		.type = RTE_FLOW_ACTION_TYPE_END,
 	};
 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
-				       items, 0, actions, 0, &flow_info);
+				       items, 0, actions, 0, &flow_info, external);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
 			port_id, sqn, ret);
@@ -11699,7 +11718,7 @@  mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
 	};
 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
-				       items, 0, actions, 0, &flow_info);
+				       items, 0, actions, 0, &flow_info, external);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
 			port_id, sqn, ret);
@@ -11821,7 +11840,7 @@  mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
 	}
 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
 					proxy_priv->hw_esw_zero_tbl,
-					items, 0, actions, 0, &flow_info);
+					items, 0, actions, 0, &flow_info, false);
 }
 
 int
@@ -11876,11 +11895,11 @@  mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
 		return 0;
 	return flow_hw_create_ctrl_flow(dev, dev,
 					priv->hw_tx_meta_cpy_tbl,
-					eth_all, 0, copy_reg_action, 0, &flow_info);
+					eth_all, 0, copy_reg_action, 0, &flow_info, false);
 }
 
 int
-mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
+mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rte_flow_item_sq sq_spec = {
@@ -11934,7 +11953,7 @@  mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	}
 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
-					items, 0, actions, 0, &flow_info);
+					items, 0, actions, 0, &flow_info, external);
 }
 
 static uint32_t
@@ -12065,7 +12084,7 @@  __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
 	/* Without VLAN filtering, only a single flow rule must be created. */
-	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info);
+	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
 }
 
 static int
@@ -12106,7 +12125,7 @@  __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
 		};
 
 		items[1].spec = &vlan_spec;
-		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info))
+		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
 			return -rte_errno;
 	}
 	return 0;
@@ -12150,7 +12169,7 @@  __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
 		if (!memcmp(mac, &cmp, sizeof(*mac)))
 			continue;
 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
-		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info))
+		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
 			return -rte_errno;
 	}
 	return 0;
@@ -12204,7 +12223,7 @@  __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
 
 			items[1].spec = &vlan_spec;
 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
-						     &flow_info))
+						     &flow_info, false))
 				return -rte_errno;
 		}
 	}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7bdb897612..d7ecb149fa 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1494,13 +1494,13 @@  mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
 			continue;
 		queue = mlx5_txq_get_sqn(txq);
 		if ((priv->representor || priv->master) && config->dv_esw_en) {
-			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) {
+			if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) {
 				mlx5_txq_release(dev, i);
 				goto error;
 			}
 		}
 		if (config->dv_esw_en && config->repr_matching) {
-			if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue)) {
+			if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) {
 				mlx5_txq_release(dev, i);
 				goto error;
 			}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index ccdf2ffb14..1ac43548b2 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1311,10 +1311,10 @@  rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
 	}
 #ifdef HAVE_MLX5_HWS_SUPPORT
 	if (priv->sh->config.dv_flow_en == 2) {
-		if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num))
+		if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true))
 			return -rte_errno;
 		if (priv->sh->config.repr_matching &&
-		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num)) {
+		    mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) {
 			mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
 			return -rte_errno;
 		}