diff mbox series

[v3,13/14] net/mlx5: make ASO meter queue thread-safe

Message ID 20210413001033.1999355-14-lizh@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers show
Series Add ASO meter support in MLX5 PMD | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Li Zhang April 13, 2021, 12:10 a.m. UTC
Synchronize ASO meter queue accesses from
different threads using a spinlock.

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h          |  1 +
 drivers/net/mlx5/mlx5_flow_aso.c | 16 +++++++++++++---
 2 files changed, 14 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 99ef83dd4b..61957d0017 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -493,6 +493,7 @@  struct mlx5_aso_sq_elem {
 
 struct mlx5_aso_sq {
 	uint16_t log_desc_n;
+	rte_spinlock_t sqsl;
 	struct mlx5_aso_cq cq;
 	struct mlx5_devx_sq sq_obj;
 	volatile uint64_t *uar_addr;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index fe5c9912f2..cd2cc016b9 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -274,6 +274,7 @@  mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
 	sq->tail = 0;
 	sq->sqn = sq->sq_obj.sq->id;
 	sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
+	rte_spinlock_init(&sq->sqsl);
 	return 0;
 error:
 	mlx5_aso_destroy_sq(sq);
@@ -665,12 +666,15 @@  mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
 	struct mlx5_flow_meter_info *fm = NULL;
 	uint16_t size = 1 << sq->log_desc_n;
 	uint16_t mask = size - 1;
-	uint16_t res = size - (uint16_t)(sq->head - sq->tail);
+	uint16_t res;
 	uint32_t dseg_idx = 0;
 	struct mlx5_aso_mtr_pool *pool = NULL;
 
+	rte_spinlock_lock(&sq->sqsl);
+	res = size - (uint16_t)(sq->head - sq->tail);
 	if (unlikely(!res)) {
 		DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
+		rte_spinlock_unlock(&sq->sqsl);
 		return 0;
 	}
 	wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
@@ -707,6 +711,7 @@  mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
 	rte_wmb();
 	*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
 	rte_wmb();
+	rte_spinlock_unlock(&sq->sqsl);
 	return 1;
 }
 
@@ -737,12 +742,16 @@  mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
 	const unsigned int mask = cq_size - 1;
 	uint32_t idx;
 	uint32_t next_idx = cq->cq_ci & mask;
-	const uint16_t max = (uint16_t)(sq->head - sq->tail);
+	uint16_t max;
 	uint16_t n = 0;
 	int ret;
 
-	if (unlikely(!max))
+	rte_spinlock_lock(&sq->sqsl);
+	max = (uint16_t)(sq->head - sq->tail);
+	if (unlikely(!max)) {
+		rte_spinlock_unlock(&sq->sqsl);
 		return;
+	}
 	do {
 		idx = next_idx;
 		next_idx = (cq->cq_ci + 1) & mask;
@@ -769,6 +778,7 @@  mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
 		rte_io_wmb();
 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
 	}
+	rte_spinlock_unlock(&sq->sqsl);
 }
 
 /**