From patchwork Wed Apr 14 02:57:34 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Li Zhang X-Patchwork-Id: 91386 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 07583A0524; Wed, 14 Apr 2021 04:58:52 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A925D16161C; Wed, 14 Apr 2021 04:57:57 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by mails.dpdk.org (Postfix) with ESMTP id 537951615E5 for ; Wed, 14 Apr 2021 04:57:50 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from lizh@nvidia.com) with SMTP; 14 Apr 2021 05:57:46 +0300 Received: from nvidia.com (c-135-185-1-009.mtl.labs.mlnx [10.135.185.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 13E2vh4r010194; Wed, 14 Apr 2021 05:57:46 +0300 From: Li Zhang To: dekelp@nvidia.com, orika@nvidia.com, viacheslavo@nvidia.com, matan@nvidia.com, shahafs@nvidia.com Cc: dev@dpdk.org, thomas@monjalon.net, rasland@nvidia.com, roniba@nvidia.com Date: Wed, 14 Apr 2021 05:57:34 +0300 Message-Id: <20210414025736.31142-14-lizh@nvidia.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20210414025736.31142-1-lizh@nvidia.com> References: <20210331073632.1443011-1-lizh@nvidia.com> <20210414025736.31142-1-lizh@nvidia.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v4 13/14] net/mlx5: make ASO meter queue thread-safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Synchronize ASO meter queue accesses from different threads using a spinlock. Signed-off-by: Li Zhang Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow_aso.c | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 99ef83dd4b..61957d0017 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -493,6 +493,7 @@ struct mlx5_aso_sq_elem { struct mlx5_aso_sq { uint16_t log_desc_n; + rte_spinlock_t sqsl; struct mlx5_aso_cq cq; struct mlx5_devx_sq sq_obj; volatile uint64_t *uar_addr; diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c index fe5c9912f2..cd2cc016b9 100644 --- a/drivers/net/mlx5/mlx5_flow_aso.c +++ b/drivers/net/mlx5/mlx5_flow_aso.c @@ -274,6 +274,7 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, sq->tail = 0; sq->sqn = sq->sq_obj.sq->id; sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar); + rte_spinlock_init(&sq->sqsl); return 0; error: mlx5_aso_destroy_sq(sq); @@ -665,12 +666,15 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq, struct mlx5_flow_meter_info *fm = NULL; uint16_t size = 1 << sq->log_desc_n; uint16_t mask = size - 1; - uint16_t res = size - (uint16_t)(sq->head - sq->tail); + uint16_t res; uint32_t dseg_idx = 0; struct mlx5_aso_mtr_pool *pool = NULL; + rte_spinlock_lock(&sq->sqsl); + res = size - (uint16_t)(sq->head - sq->tail); if (unlikely(!res)) { DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send"); + rte_spinlock_unlock(&sq->sqsl); return 0; } wqe = &sq->sq_obj.aso_wqes[sq->head & mask]; @@ -707,6 +711,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq, rte_wmb(); *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */ rte_wmb(); + rte_spinlock_unlock(&sq->sqsl); return 1; } @@ -737,12 +742,16 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq) const unsigned int mask = cq_size - 1; uint32_t idx; uint32_t next_idx = cq->cq_ci & mask; - const uint16_t max = (uint16_t)(sq->head - sq->tail); + uint16_t max; uint16_t n = 0; int ret; - if (unlikely(!max)) + rte_spinlock_lock(&sq->sqsl); + max = (uint16_t)(sq->head - sq->tail); + if (unlikely(!max)) { + rte_spinlock_unlock(&sq->sqsl); return; + } do { idx = next_idx; next_idx = (cq->cq_ci + 1) & mask; @@ -769,6 +778,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq) rte_io_wmb(); cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); } + rte_spinlock_unlock(&sq->sqsl); } /**