[v3,03/22] net/mlx5: add index pool foreach define

Message ID 20210702061816.10454-4-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: insertion rate optimization |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Suanming Mou July 2, 2021, 6:17 a.m. UTC
  In some cases, application may want to know all the allocated
index in order to apply some operations to the allocated index.

This commit adds the indexed pool functions to support foreach
operation.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_utils.c | 86 +++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_utils.h |  8 ++++
 2 files changed, 94 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 215024632d..0ed279e162 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -839,6 +839,92 @@  mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
 	return 0;
 }
 
+void
+mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
+{
+	uint32_t i, j;
+	struct mlx5_indexed_cache *gc;
+	struct rte_bitmap *ibmp;
+	uint32_t bmp_num, mem_size;
+
+	if (!pool->cfg.per_core_cache)
+		return;
+	gc = pool->gc;
+	if (!gc)
+		return;
+	/* Reset bmp. */
+	bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
+	mem_size = rte_bitmap_get_memory_footprint(bmp_num);
+	pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
+					 RTE_CACHE_LINE_SIZE, rte_socket_id());
+	if (!pool->bmp_mem) {
+		DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
+		return;
+	}
+	ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
+	if (!ibmp) {
+		pool->cfg.free(pool->bmp_mem);
+		pool->bmp_mem = NULL;
+		DRV_LOG(ERR, "Ipool bitmap create failed.\n");
+		return;
+	}
+	pool->ibmp = ibmp;
+	/* Clear global cache. */
+	for (i = 0; i < gc->len; i++)
+		rte_bitmap_clear(ibmp, gc->idx[i] - 1);
+	/* Clear core cache. */
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
+
+		if (!ilc)
+			continue;
+		for (j = 0; j < ilc->len; j++)
+			rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
+	}
+}
+
+static void *
+mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+	struct rte_bitmap *ibmp;
+	uint64_t slab = 0;
+	uint32_t iidx = *pos;
+
+	ibmp = pool->ibmp;
+	if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
+		if (pool->bmp_mem) {
+			pool->cfg.free(pool->bmp_mem);
+			pool->bmp_mem = NULL;
+			pool->ibmp = NULL;
+		}
+		return NULL;
+	}
+	iidx += __builtin_ctzll(slab);
+	rte_bitmap_clear(ibmp, iidx);
+	iidx++;
+	*pos = iidx;
+	return mlx5_ipool_get_cache(pool, iidx);
+}
+
+void *
+mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
+{
+	uint32_t idx = *pos;
+	void *entry;
+
+	if (pool->cfg.per_core_cache)
+		return mlx5_ipool_get_next_cache(pool, pos);
+	while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
+		entry = mlx5_ipool_get(pool, idx);
+		if (entry) {
+			*pos = idx;
+			return entry;
+		}
+		idx++;
+	}
+	return NULL;
+}
+
 void
 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
 {
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 0469062695..737dd7052d 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -261,6 +261,9 @@  struct mlx5_indexed_pool {
 			/* Global cache. */
 			struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE];
 			/* Local cache. */
+			struct rte_bitmap *ibmp;
+			void *bmp_mem;
+			/* Allocate objects bitmap. Use during flush. */
 		};
 	};
 #ifdef POOL_DEBUG
@@ -862,4 +865,9 @@  struct {								\
 	     (entry);							\
 	     idx++, (entry) = mlx5_l3t_get_next((tbl), &idx))
 
+#define MLX5_IPOOL_FOREACH(ipool, idx, entry)				\
+	for ((idx) = 0, mlx5_ipool_flush_cache((ipool)),		\
+	    (entry) = mlx5_ipool_get_next((ipool), &idx);		\
+	    (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx))
+
 #endif /* RTE_PMD_MLX5_UTILS_H_ */