[2/2] net/mlx5: improve pattern template validation

Message ID 20240202150617.328603-3-getelson@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: update pattern validations |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/intel-Functional success Functional PASS

Commit Message

Gregory Etelson Feb. 2, 2024, 3:06 p.m. UTC
  Current PMD implementation validates pattern templates that will
always be rejected during table template creation.

The patch adds basic HWS verifications to pattern validation to
ensure that the pattern can be used in table template.

PMD updates `rte_errno` if pattern template validation failed:

E2BIG - pattern too big for PMD
ENOTSUP - pattern not supported by PMD
ENOMEM - PMD allocation failure

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |   1 +
 drivers/net/mlx5/mlx5_flow_hw.c | 121 ++++++++++++++++++++++++++++++--
 2 files changed, 117 insertions(+), 5 deletions(-)
  

Comments

Dariusz Sosnowski Feb. 6, 2024, 12:26 p.m. UTC | #1
Hi Gregory,

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Friday, February 2, 2024 16:06
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Maayan Kashani
> <mkashani@nvidia.com>; Dariusz Sosnowski <dsosnowski@nvidia.com>;
> Slava Ovsiienko <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>;
> Suanming Mou <suanmingm@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Subject: [PATCH 2/2] net/mlx5: improve pattern template validation
> 
> Current PMD implementation validates pattern templates that will always be
> rejected during table template creation.
> 
> The patch adds basic HWS verifications to pattern validation to ensure that the
> pattern can be used in table template.
> 
> PMD updates `rte_errno` if pattern template validation failed:
> 
> E2BIG - pattern too big for PMD
> ENOTSUP - pattern not supported by PMD
> ENOMEM - PMD allocation failure
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
>
> [snip]
>
> --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
> index da873ae2e2..443aa5fcf0 100644
> --- a/drivers/net/mlx5/mlx5_flow_hw.c
> +++ b/drivers/net/mlx5/mlx5_flow_hw.c
> @@ -6840,6 +6840,46 @@ flow_hw_pattern_has_sq_match(const struct
> rte_flow_item *items)
>  	return false;
>  }
> 
> +static int
> +pattern_template_validate(struct rte_eth_dev *dev,
> +			  struct rte_flow_pattern_template *pt[], uint32_t
> pt_num) {
> +	uint32_t group = 0;
> +	struct rte_flow_error error;
> +	struct rte_flow_template_table_attr tbl_attr = {
> +		.nb_flows = 64,
> +		.insertion_type =
> RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
> +		.hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
> +		.flow_attr = {
> +			.ingress = pt[0]->attr.ingress,
> +			.egress = pt[0]->attr.egress,
> +			.transfer = pt[0]->attr.transfer
> +		}
> +	};
> +	struct mlx5_priv *priv = dev->data->dev_private;
> +	struct rte_flow_actions_template *action_template;
> +
> +	if (pt[0]->attr.ingress)
> +		action_template = priv-
> >action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
> +	else if (pt[0]->attr.egress)
> +		action_template = priv-
> >action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
> +	else if (pt[0]->attr.transfer)
> +		action_template = priv-
> >action_template_drop[MLX5DR_TABLE_TYPE_FDB];
> +	else
> +		return EINVAL;
> +	do {
> +		struct rte_flow_template_table *tmpl_tbl;
> +
> +		tbl_attr.flow_attr.group = group;
> +		tmpl_tbl = flow_hw_template_table_create(dev, &tbl_attr, pt, pt_num,
flow_hw_table_create() should be called here.
If E-Switch is enabled, flow_hw_template_table_create() will perform internal group translation for FDB and TX domain,
so group 0 will be untested.

> +							 &action_template, 1,
> NULL);
> +		if (!tmpl_tbl)
> +			return rte_errno;
> +		flow_hw_table_destroy(dev, tmpl_tbl, &error);
I don't think that passing error struct is needed here, since this error is not propagated up.

> [snip]
>
> @@ -9184,6 +9235,66 @@ flow_hw_compare_config(const struct
> mlx5_flow_hw_attr *hw_attr,
>  	return true;
>  }
> 
> +/*
> + * No need to explicitly release drop action templates on port stop.
> + * Drop action templates release with other action templates during
> + * mlx5_dev_close -> flow_hw_resource_release ->
> +flow_hw_actions_template_destroy  */ static void
> +action_template_drop_release(struct rte_eth_dev *dev) {
> +	int i;
> +	struct mlx5_priv *priv = dev->data->dev_private;
> +
> +	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
> +		if (!priv->action_template_drop[i])
> +			continue;
> +		flow_hw_actions_template_destroy(dev,
> +						 priv-
> >action_template_drop[i],
> +						 NULL);
> +	}
I'd suggest adding zeroing action_template_drop array entries here.
In case of failure inside rte_flow_configure(), rollback code called on error must
reset the affected fields in private data to allow safe call to rte_flow_configure() again.

> [snip]
>
> @@ -9621,10 +9735,7 @@ flow_hw_resource_release(struct rte_eth_dev
> *dev)
>  	flow_hw_flush_all_ctrl_flows(dev);
>  	flow_hw_cleanup_tx_repr_tagging(dev);
>  	flow_hw_cleanup_ctrl_rx_tables(dev);
> -	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
> -		grp = LIST_FIRST(&priv->flow_hw_grp);
> -		flow_hw_group_unset_miss_group(dev, grp, NULL);
> -	}
> +	action_template_drop_release(dev);
Why is the miss actions cleanup code removed? It does not seem related to the patch.

Best regards,
Dariusz Sosnowski
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f2e2e04429..e98db91888 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1965,6 +1965,7 @@  struct mlx5_priv {
 	struct mlx5_aso_mtr_pool *hws_mpool; /* HW steering's Meter pool. */
 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
 	/**< HW steering templates used to create control flow rules. */
+	struct rte_flow_actions_template *action_template_drop[MLX5DR_TABLE_TYPE_MAX];
 #endif
 	struct rte_eth_dev *shared_host; /* Host device for HW steering. */
 	uint16_t shared_refcnt; /* HW steering host reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index da873ae2e2..443aa5fcf0 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -6840,6 +6840,46 @@  flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
 	return false;
 }
 
+static int
+pattern_template_validate(struct rte_eth_dev *dev,
+			  struct rte_flow_pattern_template *pt[], uint32_t pt_num)
+{
+	uint32_t group = 0;
+	struct rte_flow_error error;
+	struct rte_flow_template_table_attr tbl_attr = {
+		.nb_flows = 64,
+		.insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
+		.hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
+		.flow_attr = {
+			.ingress = pt[0]->attr.ingress,
+			.egress = pt[0]->attr.egress,
+			.transfer = pt[0]->attr.transfer
+		}
+	};
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct rte_flow_actions_template *action_template;
+
+	if (pt[0]->attr.ingress)
+		action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
+	else if (pt[0]->attr.egress)
+		action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
+	else if (pt[0]->attr.transfer)
+		action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
+	else
+		return EINVAL;
+	do {
+		struct rte_flow_template_table *tmpl_tbl;
+
+		tbl_attr.flow_attr.group = group;
+		tmpl_tbl = flow_hw_template_table_create(dev, &tbl_attr, pt, pt_num,
+							 &action_template, 1, NULL);
+		if (!tmpl_tbl)
+			return rte_errno;
+		flow_hw_table_destroy(dev, tmpl_tbl, &error);
+	} while (++group <= 1);
+	return 0;
+}
+
 /**
  * Create flow item template.
  *
@@ -6975,8 +7015,19 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		}
 	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+	rte_errno = pattern_template_validate(dev, &it, 1);
+	if (rte_errno)
+		goto error;
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
+error:
+	flow_hw_flex_item_release(dev, &it->flex_item);
+	claim_zero(mlx5dr_match_template_destroy(it->mt));
+	mlx5_free(it);
+	rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+			   "Failed to create pattern template");
+	return NULL;
+
 }
 
 /**
@@ -9184,6 +9235,66 @@  flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
 	return true;
 }
 
+/*
+ * No need to explicitly release drop action templates on port stop.
+ * Drop action templates release with other action templates during
+ * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
+ */
+static void
+action_template_drop_release(struct rte_eth_dev *dev)
+{
+	int i;
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+		if (!priv->action_template_drop[i])
+			continue;
+		flow_hw_actions_template_destroy(dev,
+						 priv->action_template_drop[i],
+						 NULL);
+	}
+}
+
+static int
+action_template_drop_init(struct rte_eth_dev *dev,
+			  struct rte_flow_error *error)
+{
+	const struct rte_flow_action drop[2] = {
+		[0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
+		[1] = { .type = RTE_FLOW_ACTION_TYPE_END },
+	};
+	const struct rte_flow_action *actions = drop;
+	const struct rte_flow_action *masks = drop;
+	const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
+		[MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
+		[MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
+		[MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
+	};
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
+		flow_hw_actions_template_create(dev,
+						&attr[MLX5DR_TABLE_TYPE_NIC_RX],
+						actions, masks, error);
+	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
+		return -1;
+	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
+		flow_hw_actions_template_create(dev,
+						&attr[MLX5DR_TABLE_TYPE_NIC_TX],
+						actions, masks, error);
+	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
+		return -1;
+	if (priv->sh->config.dv_esw_en && priv->master) {
+		priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
+			flow_hw_actions_template_create(dev,
+							&attr[MLX5DR_TABLE_TYPE_FDB],
+							actions, masks, error);
+		if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
+			return -1;
+	}
+	return 0;
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -9426,6 +9537,9 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	rte_spinlock_init(&priv->hw_ctrl_lock);
 	LIST_INIT(&priv->hw_ctrl_flows);
 	LIST_INIT(&priv->hw_ext_ctrl_flows);
+	ret = action_template_drop_init(dev, error);
+	if (ret)
+		goto err;
 	ret = flow_hw_create_ctrl_rx_tables(dev);
 	if (ret) {
 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -9559,6 +9673,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
 		priv->hws_cpool = NULL;
 	}
+	action_template_drop_release(dev);
 	mlx5_flow_quota_destroy(dev);
 	flow_hw_destroy_send_to_kernel_action(priv);
 	flow_hw_free_vport_actions(priv);
@@ -9612,7 +9727,6 @@  flow_hw_resource_release(struct rte_eth_dev *dev)
 	struct rte_flow_template_table *tbl;
 	struct rte_flow_pattern_template *it;
 	struct rte_flow_actions_template *at;
-	struct mlx5_flow_group *grp;
 	uint32_t i;
 
 	if (!priv->dr_ctx)
@@ -9621,10 +9735,7 @@  flow_hw_resource_release(struct rte_eth_dev *dev)
 	flow_hw_flush_all_ctrl_flows(dev);
 	flow_hw_cleanup_tx_repr_tagging(dev);
 	flow_hw_cleanup_ctrl_rx_tables(dev);
-	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
-		grp = LIST_FIRST(&priv->flow_hw_grp);
-		flow_hw_group_unset_miss_group(dev, grp, NULL);
-	}
+	action_template_drop_release(dev);
 	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
 		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
 		flow_hw_table_destroy(dev, tbl, NULL);