[v2,3/3] net/mlx5: fix invalid counter query
diff mbox series

Message ID 1595404727-164521-3-git-send-email-suanmingm@mellanox.com
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers show
Series
  • [v2,1/3] net/mlx5: separate aging counter pool range
Related show

Checks

Context Check Description
ci/travis-robot success Travis build: passed
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Suanming Mou July 22, 2020, 7:58 a.m. UTC
Currently, the counter query requires the counter ID should start
with 4 aligned. In none-batch mode, the counter pool might have the
chance to get the counter ID not 4 aligned. In this case, the counter
should be skipped, or the query will be failed.

Skip the counter with ID not 4 aligned as the first counter in the
none-batch count pool to avoid invalid counter query. Once having
new min_dcs ID in the poll less than the skipped counters, the
skipped counters will be returned to the pool free list to use.

Fixes: 5382d28c2110 ("net/mlx5: accelerate DV flow counter transactions")
Cc: stable@dpdk.org

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.h         |  6 ++-
 drivers/net/mlx5/mlx5_flow.c    |  6 +++
 drivers/net/mlx5/mlx5_flow_dv.c | 94 ++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 103 insertions(+), 3 deletions(-)

Patch
diff mbox series

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5d7d609..0ecfc76 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -366,8 +366,9 @@  struct mlx5_flow_counter {
 struct mlx5_flow_counter_ext {
 	uint32_t shared:1; /**< Share counter ID with other flow rules. */
 	uint32_t batch: 1;
+	uint32_t skipped:1; /* This counter is skipped or not. */
 	/**< Whether the counter was allocated by batch command. */
-	uint32_t ref_cnt:30; /**< Reference counter. */
+	uint32_t ref_cnt:29; /**< Reference counter. */
 	uint32_t id; /**< User counter ID. */
 	union {  /**< Holds the counters for the rule. */
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
@@ -390,8 +391,9 @@  struct mlx5_flow_counter_pool {
 		rte_atomic64_t a64_dcs;
 	};
 	/* The devx object of the minimum counter ID. */
-	uint32_t index:29; /* Pool index in container. */
+	uint32_t index:28; /* Pool index in container. */
 	uint32_t type:2; /* Memory type behind the counter array. */
+	uint32_t skip_cnt:1; /* Pool contains skipped counter. */
 	volatile uint32_t query_gen:1; /* Query round. */
 	rte_spinlock_t sl; /* The pool lock. */
 	struct mlx5_counter_stats_raw *raw;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index b56bee4..40a8575 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5974,6 +5974,11 @@  struct mlx5_meter_domains_infos *
 		goto set_alarm;
 	dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
 							      (&pool->a64_dcs);
+	if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) {
+		/* Pool without valid counter. */
+		pool->raw_hw = NULL;
+		goto next_pool;
+	}
 	offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
 	/*
 	 * Identify the counters released between query trigger and query
@@ -5998,6 +6003,7 @@  struct mlx5_meter_domains_infos *
 	pool->raw_hw->min_dcs_id = dcs->id;
 	LIST_REMOVE(pool->raw_hw, next);
 	sh->cmng.pending_queries++;
+next_pool:
 	pool_index++;
 	if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
 		batch ^= 0x1;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2fc4457..a2b7329 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4408,6 +4408,66 @@  struct field_modify_info modify_tcp[] = {
 }
 
 /**
+ * Restore skipped counters in the pool.
+ *
+ * As counter pool query requires the first counter dcs
+ * ID start with 4 alinged, if the pool counters with
+ * min_dcs ID are not aligned with 4, the counters will
+ * be skipped.
+ * Once other min_dcs ID less than these skipped counter
+ * dcs ID appears, the skipped counters will be safe to
+ * use.
+ * Should be called when min_dcs is updated.
+ *
+ * @param[in] pool
+ *   Current counter pool.
+ * @param[in] last_min_dcs
+ *   Last min_dcs.
+ */
+static void
+flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool,
+			struct mlx5_devx_obj *last_min_dcs)
+{
+	struct mlx5_flow_counter_ext *cnt_ext;
+	uint32_t offset, new_offset;
+	uint32_t skip_cnt = 0;
+	uint32_t i;
+
+	if (!pool->skip_cnt)
+		return;
+	/*
+	 * If last min_dcs is not valid. The skipped counter may even after
+	 * last min_dcs, set the offset to the whole pool.
+	 */
+	if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+		offset = MLX5_COUNTERS_PER_POOL;
+	else
+		offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL;
+	new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL;
+	/*
+	 * Check the counters from 1 to the last_min_dcs range. Counters
+	 * before new min_dcs indicates pool still has skipped counters.
+	 * Counters be skipped after new min_dcs will be ready to use.
+	 * Offset 0 counter must be empty or min_dcs, start from 1.
+	 */
+	for (i = 1; i < offset; i++) {
+		cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+		if (cnt_ext->skipped) {
+			if (i > new_offset) {
+				cnt_ext->skipped = 0;
+				TAILQ_INSERT_TAIL
+					(&pool->counters[pool->query_gen],
+					 MLX5_POOL_GET_CNT(pool, i), next);
+			} else {
+				skip_cnt++;
+			}
+		}
+	}
+	if (!skip_cnt)
+		pool->skip_cnt = 0;
+}
+
+/**
  * Prepare a new counter and/or a new counter pool.
  *
  * @param[in] dev
@@ -4432,6 +4492,7 @@  struct field_modify_info modify_tcp[] = {
 	struct mlx5_pools_container *cont;
 	struct mlx5_flow_counter_pool *pool;
 	struct mlx5_counters tmp_tq;
+	struct mlx5_devx_obj *last_min_dcs;
 	struct mlx5_devx_obj *dcs = NULL;
 	struct mlx5_flow_counter *cnt;
 	uint32_t add2other;
@@ -4466,13 +4527,44 @@  struct field_modify_info modify_tcp[] = {
 				}
 			}
 		}
-		if (dcs->id < pool->min_dcs->id)
+		if ((dcs->id < pool->min_dcs->id ||
+		    pool->min_dcs->id &
+		    (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) &&
+		    !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) {
+			/*
+			 * Update the pool min_dcs only if current dcs is
+			 * valid and exist min_dcs is not valid or greater
+			 * than new dcs.
+			 */
+			last_min_dcs = pool->min_dcs;
 			rte_atomic64_set(&pool->a64_dcs,
 					 (int64_t)(uintptr_t)dcs);
+			/*
+			 * Restore any skipped counters if the new min_dcs
+			 * ID is smaller or min_dcs is not valid.
+			 */
+			if (dcs->id < last_min_dcs->id ||
+			    last_min_dcs->id &
+			    (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+				flow_dv_counter_restore(pool, last_min_dcs);
+		}
 		i = dcs->id % MLX5_COUNTERS_PER_POOL;
 		cnt = MLX5_POOL_GET_CNT(pool, i);
 		cnt->pool = pool;
 		MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+		/*
+		 * If min_dcs is not valid, it means the new allocated dcs
+		 * also fail to become the valid min_dcs, just skip it.
+		 * Or if min_dcs is valid, and new dcs ID is smaller than
+		 * min_dcs, but not become the min_dcs, also skip it.
+		 */
+		if (pool->min_dcs->id &
+		    (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) ||
+		    dcs->id < pool->min_dcs->id) {
+			MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1;
+			pool->skip_cnt = 1;
+			goto retry;
+		}
 		if (add2other) {
 			TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen],
 					  cnt, next);