[4/4] net/mlx5: fix flow configure validation

Message ID 20240306202150.79577-4-dsosnowski@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [1/4] net/mlx5/hws: fix direct index insert on dep wqe |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-broadcom-Performance fail Performance Testing issues
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS

Commit Message

Dariusz Sosnowski March 6, 2024, 8:21 p.m. UTC
  There's an existing limitation in mlx5 PMD, that all configured flow
queues must have the same size. Even though this condition is checked,
some allocations are done before that. This lead to segmentation
fault during rollback on error in rte_flow_configure() implementation.

This patch fixes that by reorganizing validation, so that configuration
options are validated before any allocations are done and
necessary checks for NULL are added to error rollback.

Bugzilla ID: 1199
Fixes: b401400db24e ("net/mlx5: add port flow configuration")
Cc: stable@dpdk.org

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 62 +++++++++++++++++++++++----------
 1 file changed, 43 insertions(+), 19 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 17ab3a98fe..407a843578 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -10253,6 +10253,38 @@  mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char
 			       RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
 }
 
+static int
+flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
+			    uint16_t nb_queue,
+			    const struct rte_flow_queue_attr *queue_attr[],
+			    struct rte_flow_error *error)
+{
+	uint32_t size;
+	unsigned int i;
+
+	if (port_attr == NULL)
+		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Port attributes must be non-NULL");
+
+	if (nb_queue == 0)
+		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "At least one flow queue is required");
+
+	if (queue_attr == NULL)
+		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Queue attributes must be non-NULL");
+
+	size = queue_attr[0]->size;
+	for (i = 1; i < nb_queue; ++i) {
+		if (queue_attr[i]->size != size)
+			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+						  NULL,
+						  "All flow queues must have the same size");
+	}
+
+	return 0;
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -10304,10 +10336,8 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	int ret = 0;
 	uint32_t action_flags;
 
-	if (!port_attr || !nb_queue || !queue_attr) {
-		rte_errno = EINVAL;
-		goto err;
-	}
+	if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error))
+		return -rte_errno;
 	/*
 	 * Calling rte_flow_configure() again is allowed if and only if
 	 * provided configuration matches the initially provided one.
@@ -10354,14 +10384,6 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	/* Allocate the queue job descriptor LIFO. */
 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
 	for (i = 0; i < nb_q_updated; i++) {
-		/*
-		 * Check if the queues' size are all the same as the
-		 * limitation from HWS layer.
-		 */
-		if (_queue_attr[i]->size != _queue_attr[0]->size) {
-			rte_errno = EINVAL;
-			goto err;
-		}
 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
 			     sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
 	}
@@ -10643,14 +10665,16 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
 		priv->shared_host = NULL;
 	}
-	for (i = 0; i < nb_q_updated; i++) {
-		rte_ring_free(priv->hw_q[i].indir_iq);
-		rte_ring_free(priv->hw_q[i].indir_cq);
-		rte_ring_free(priv->hw_q[i].flow_transfer_pending);
-		rte_ring_free(priv->hw_q[i].flow_transfer_completed);
+	if (priv->hw_q) {
+		for (i = 0; i < nb_q_updated; i++) {
+			rte_ring_free(priv->hw_q[i].indir_iq);
+			rte_ring_free(priv->hw_q[i].indir_cq);
+			rte_ring_free(priv->hw_q[i].flow_transfer_pending);
+			rte_ring_free(priv->hw_q[i].flow_transfer_completed);
+		}
+		mlx5_free(priv->hw_q);
+		priv->hw_q = NULL;
 	}
-	mlx5_free(priv->hw_q);
-	priv->hw_q = NULL;
 	if (priv->acts_ipool) {
 		mlx5_ipool_destroy(priv->acts_ipool);
 		priv->acts_ipool = NULL;