From patchwork Tue Oct 27 15:28:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexander Kozyrev X-Patchwork-Id: 82358 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E73EEA04B5; Tue, 27 Oct 2020 16:30:07 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DA301BE4B; Tue, 27 Oct 2020 16:28:46 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 92DCB6A1B for ; Tue, 27 Oct 2020 16:28:34 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from akozyrev@nvidia.com) with SMTP; 27 Oct 2020 17:28:28 +0200 Received: from nvidia.com (pegasus02.mtr.labs.mlnx [10.210.16.122]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RFSSeb029461; Tue, 27 Oct 2020 17:28:28 +0200 From: Alexander Kozyrev To: dev@dpdk.org Cc: rasland@nvidia.com, matan@nvidia.com, viacheslavo@nvidia.com Date: Tue, 27 Oct 2020 15:28:21 +0000 Message-Id: <20201027152824.15232-2-akozyrev@nvidia.com> X-Mailer: git-send-email 2.24.1 In-Reply-To: <20201027152824.15232-1-akozyrev@nvidia.com> References: <20201027152824.15232-1-akozyrev@nvidia.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 1/4] common/mlx5: use C11 atomics for memory allocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use the relaxed ordering for mlx5 mallocs. Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/common/mlx5/mlx5_malloc.c | 91 ++++++++++++++++--------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c index 44899717e0..f64c15fceb 100644 --- a/drivers/common/mlx5/mlx5_malloc.c +++ b/drivers/common/mlx5/mlx5_malloc.c @@ -8,8 +8,6 @@ #include #include -#include - #include "mlx5_common_utils.h" #include "mlx5_malloc.h" @@ -17,27 +15,24 @@ struct mlx5_sys_mem { uint32_t init:1; /* Memory allocator initialized. */ uint32_t enable:1; /* System memory select. */ uint32_t reserve:30; /* Reserve. */ - union { - struct rte_memseg_list *last_msl; - rte_atomic64_t a64_last_msl; - }; + struct rte_memseg_list *last_msl; /* last allocated rte memory memseg list. */ #ifdef RTE_LIBRTE_MLX5_DEBUG - rte_atomic64_t malloc_sys; + uint64_t malloc_sys; /* Memory allocated from system count. */ - rte_atomic64_t malloc_rte; + uint64_t malloc_rte; /* Memory allocated from hugepage count. */ - rte_atomic64_t realloc_sys; + uint64_t realloc_sys; /* Memory reallocate from system count. */ - rte_atomic64_t realloc_rte; + uint64_t realloc_rte; /* Memory reallocate from hugepage count. */ - rte_atomic64_t free_sys; + uint64_t free_sys; /* Memory free to system count. */ - rte_atomic64_t free_rte; + uint64_t free_rte; /* Memory free to hugepage count. */ - rte_atomic64_t msl_miss; + uint64_t msl_miss; /* MSL miss count. */ - rte_atomic64_t msl_update; + uint64_t msl_update; /* MSL update count. */ #endif }; @@ -47,14 +42,14 @@ static struct mlx5_sys_mem mlx5_sys_mem = { .init = 0, .enable = 0, #ifdef RTE_LIBRTE_MLX5_DEBUG - .malloc_sys = RTE_ATOMIC64_INIT(0), - .malloc_rte = RTE_ATOMIC64_INIT(0), - .realloc_sys = RTE_ATOMIC64_INIT(0), - .realloc_rte = RTE_ATOMIC64_INIT(0), - .free_sys = RTE_ATOMIC64_INIT(0), - .free_rte = RTE_ATOMIC64_INIT(0), - .msl_miss = RTE_ATOMIC64_INIT(0), - .msl_update = RTE_ATOMIC64_INIT(0), + .malloc_sys = 0, + .malloc_rte = 0, + .realloc_sys = 0, + .realloc_rte = 0, + .free_sys = 0, + .free_rte = 0, + .msl_miss = 0, + .msl_update = 0, #endif }; @@ -97,12 +92,14 @@ mlx5_mem_update_msl(void *addr) * different with the cached msl. */ if (addr && !mlx5_mem_check_msl(addr, - (struct rte_memseg_list *)(uintptr_t)rte_atomic64_read - (&mlx5_sys_mem.a64_last_msl))) { - rte_atomic64_set(&mlx5_sys_mem.a64_last_msl, - (int64_t)(uintptr_t)rte_mem_virt2memseg_list(addr)); + (struct rte_memseg_list *)__atomic_load_n + (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) { + __atomic_store_n(&mlx5_sys_mem.last_msl, + rte_mem_virt2memseg_list(addr), + __ATOMIC_RELAXED); #ifdef RTE_LIBRTE_MLX5_DEBUG - rte_atomic64_inc(&mlx5_sys_mem.msl_update); + __atomic_add_fetch(&mlx5_sys_mem.msl_update, 1, + __ATOMIC_RELAXED); #endif } } @@ -123,12 +120,12 @@ mlx5_mem_is_rte(void *addr) * Check if the last cache msl matches. Drop to slow path * to check if the memory belongs to rte memory. */ - if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)(uintptr_t) - rte_atomic64_read(&mlx5_sys_mem.a64_last_msl))) { + if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *) + __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) { if (!rte_mem_virt2memseg_list(addr)) return false; #ifdef RTE_LIBRTE_MLX5_DEBUG - rte_atomic64_inc(&mlx5_sys_mem.msl_miss); + __atomic_add_fetch(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED); #endif } return true; @@ -190,7 +187,8 @@ mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket) mlx5_mem_update_msl(addr); #ifdef RTE_LIBRTE_MLX5_DEBUG if (addr) - rte_atomic64_inc(&mlx5_sys_mem.malloc_rte); + __atomic_add_fetch(&mlx5_sys_mem->malloc_rte, 1, + __ATOMIC_RELAXED); #endif return addr; } @@ -203,7 +201,8 @@ mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket) addr = malloc(size); #ifdef RTE_LIBRTE_MLX5_DEBUG if (addr) - rte_atomic64_inc(&mlx5_sys_mem.malloc_sys); + __atomic_add_fetch(&mlx5_sys_mem->malloc_sys, 1, + __ATOMIC_RELAXED); #endif return addr; } @@ -236,7 +235,8 @@ mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align, mlx5_mem_update_msl(new_addr); #ifdef RTE_LIBRTE_MLX5_DEBUG if (new_addr) - rte_atomic64_inc(&mlx5_sys_mem.realloc_rte); + __atomic_add_fetch(&mlx5_sys_mem->realloc_rte, 1, + __ATOMIC_RELAXED); #endif return new_addr; } @@ -248,7 +248,8 @@ mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align, new_addr = realloc(addr, size); #ifdef RTE_LIBRTE_MLX5_DEBUG if (new_addr) - rte_atomic64_inc(&mlx5_sys_mem.realloc_sys); + __atomic_add_fetch(&mlx5_sys_mem->realloc_sys, 1, + __ATOMIC_RELAXED); #endif return new_addr; } @@ -260,12 +261,14 @@ mlx5_free(void *addr) return; if (!mlx5_mem_is_rte(addr)) { #ifdef RTE_LIBRTE_MLX5_DEBUG - rte_atomic64_inc(&mlx5_sys_mem.free_sys); + __atomic_add_fetch(&mlx5_sys_mem->free_sys, 1, + __ATOMIC_RELAXED); #endif free(addr); } else { #ifdef RTE_LIBRTE_MLX5_DEBUG - rte_atomic64_inc(&mlx5_sys_mem.free_rte); + __atomic_add_fetch(&mlx5_sys_mem->free_rte, 1, + __ATOMIC_RELAXED); #endif rte_free(addr); } @@ -279,14 +282,14 @@ mlx5_memory_stat_dump(void) " free:%"PRIi64"\nRTE memory malloc:%"PRIi64"," " realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64"," " update:%"PRIi64"", - rte_atomic64_read(&mlx5_sys_mem.malloc_sys), - rte_atomic64_read(&mlx5_sys_mem.realloc_sys), - rte_atomic64_read(&mlx5_sys_mem.free_sys), - rte_atomic64_read(&mlx5_sys_mem.malloc_rte), - rte_atomic64_read(&mlx5_sys_mem.realloc_rte), - rte_atomic64_read(&mlx5_sys_mem.free_rte), - rte_atomic64_read(&mlx5_sys_mem.msl_miss), - rte_atomic64_read(&mlx5_sys_mem.msl_update)); + __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED), + __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED)); #endif } From patchwork Tue Oct 27 15:28:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexander Kozyrev X-Patchwork-Id: 82357 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B44B1A04B5; Tue, 27 Oct 2020 16:29:40 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 817A3AA1F; Tue, 27 Oct 2020 16:28:41 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id B2B406CAC for ; Tue, 27 Oct 2020 16:28:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from akozyrev@nvidia.com) with SMTP; 27 Oct 2020 17:28:29 +0200 Received: from nvidia.com (pegasus02.mtr.labs.mlnx [10.210.16.122]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RFSSec029461; Tue, 27 Oct 2020 17:28:28 +0200 From: Alexander Kozyrev To: dev@dpdk.org Cc: rasland@nvidia.com, matan@nvidia.com, viacheslavo@nvidia.com Date: Tue, 27 Oct 2020 15:28:22 +0000 Message-Id: <20201027152824.15232-3-akozyrev@nvidia.com> X-Mailer: git-send-email 2.24.1 In-Reply-To: <20201027152824.15232-1-akozyrev@nvidia.com> References: <20201027152824.15232-1-akozyrev@nvidia.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 2/4] common/mlx5: use C11 atomics for netlink sequence X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use __atomic_add_fetch instead of rte_atomic32_add_return to generate a Netlink sequence number. Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/common/mlx5/linux/mlx5_nl.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c index 0ecd9c7014..40d8620300 100644 --- a/drivers/common/mlx5/linux/mlx5_nl.c +++ b/drivers/common/mlx5/linux/mlx5_nl.c @@ -18,7 +18,6 @@ #include #include -#include #include "mlx5_nl.h" #include "mlx5_common_utils.h" @@ -169,10 +168,10 @@ struct mlx5_nl_ifindex_data { uint32_t portnum; /**< IB device max port number (out). */ }; -rte_atomic32_t atomic_sn = RTE_ATOMIC32_INIT(0); +uint32_t atomic_sn; /* Generate Netlink sequence number. */ -#define MLX5_NL_SN_GENERATE ((uint32_t)rte_atomic32_add_return(&atomic_sn, 1)) +#define MLX5_NL_SN_GENERATE __atomic_add_fetch(&atomic_sn, 1, __ATOMIC_RELAXED) /** * Opens a Netlink socket. From patchwork Tue Oct 27 15:28:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexander Kozyrev X-Patchwork-Id: 82355 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0316EA04B5; Tue, 27 Oct 2020 16:28:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BD2D172DF; Tue, 27 Oct 2020 16:28:37 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A51886CA9 for ; Tue, 27 Oct 2020 16:28:34 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from akozyrev@nvidia.com) with SMTP; 27 Oct 2020 17:28:29 +0200 Received: from nvidia.com (pegasus02.mtr.labs.mlnx [10.210.16.122]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RFSSed029461; Tue, 27 Oct 2020 17:28:28 +0200 From: Alexander Kozyrev To: dev@dpdk.org Cc: rasland@nvidia.com, matan@nvidia.com, viacheslavo@nvidia.com Date: Tue, 27 Oct 2020 15:28:23 +0000 Message-Id: <20201027152824.15232-4-akozyrev@nvidia.com> X-Mailer: git-send-email 2.24.1 In-Reply-To: <20201027152824.15232-1-akozyrev@nvidia.com> References: <20201027152824.15232-1-akozyrev@nvidia.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 3/4] net/mlx5: use C11 atomics for RxQ/TxQ refcounts X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use the relaxed ordering for RxQ/TxQ refcounts. Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/linux/mlx5_ethdev_os.c | 1 - drivers/net/mlx5/mlx5.c | 9 +++----- drivers/net/mlx5/mlx5.h | 6 +++--- drivers/net/mlx5/mlx5_ethdev.c | 1 - drivers/net/mlx5/mlx5_flow_dv.c | 3 ++- drivers/net/mlx5/mlx5_rxq.c | 28 ++++++++++++------------- drivers/net/mlx5/mlx5_txq.c | 8 +++---- 7 files changed, 26 insertions(+), 30 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c index 593b0d08ac..19b281925f 100644 --- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 91aaee3d8c..27c9c2abb6 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1232,8 +1232,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); table_key.direction = 1; tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, SOCKET_ID_ANY); @@ -1245,8 +1244,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); table_key.direction = 0; table_key.domain = 1; tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0, @@ -1259,8 +1257,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv) err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); if (err) goto error; - rte_atomic32_init(&tbl_data->tbl.refcnt); - rte_atomic32_inc(&tbl_data->tbl.refcnt); + __atomic_store_n(&tbl_data->tbl.refcnt, 1, __ATOMIC_RELAXED); return err; error: mlx5_free_table_hash_list(priv); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 74298115fc..0141c0670e 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -719,7 +719,7 @@ struct mlx5_rxq_obj { /* Indirection table. */ struct mlx5_ind_table_obj { LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ RTE_STD_C11 union { void *ind_table; /**< Indirection table. */ @@ -733,7 +733,7 @@ struct mlx5_ind_table_obj { __extension__ struct mlx5_hrxq { ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ uint32_t shared:1; /* This object used in shared action. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 @@ -872,7 +872,7 @@ struct mlx5_priv { /* Indirection tables. */ LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ /**< Verbs modify header action object. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ uint8_t max_lro_msg_size; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index fc04fc8224..8f39e84e08 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index dafe07f42e..3f1ccf8fe0 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -10283,7 +10283,8 @@ __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, if (hrxq_idx) { *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_atomic32_inc(&(*hrxq)->refcnt); + __atomic_fetch_add(&(*hrxq)->refcnt, 1, + __ATOMIC_RELAXED); } } else { struct mlx5_flow_rss_desc *rss_desc = diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ddd5df7c37..8d05315e61 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -461,7 +461,6 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1); - } /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */ @@ -1669,7 +1668,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; #endif tmpl->rxq.idx = idx; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1716,7 +1715,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; tmpl->hairpin_conf = *hairpin_conf; tmpl->rxq.idx = idx; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; } @@ -1741,7 +1740,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) if (rxq_data) { rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); } return rxq_ctrl; } @@ -1916,7 +1915,7 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, if (ind_tbl) { unsigned int i; - rte_atomic32_inc(&ind_tbl->refcnt); + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); for (i = 0; i != ind_tbl->queues_n; ++i) mlx5_rxq_get(dev, ind_tbl->queues[i]); } @@ -1941,11 +1940,11 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) priv->obj_ops.ind_table_destroy(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); - if (!rte_atomic32_read(&ind_tbl->refcnt)) { + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) { LIST_REMOVE(ind_tbl, next); mlx5_free(ind_tbl); return 0; @@ -2019,7 +2018,7 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); if (ret < 0) goto error; - rte_atomic32_inc(&ind_tbl->refcnt); + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); return ind_tbl; error: @@ -2078,7 +2077,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, mlx5_ind_table_obj_release(dev, ind_tbl); continue; } - rte_atomic32_inc(&hrxq->refcnt); + __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED); return idx; } return 0; @@ -2184,7 +2183,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) return 0; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_glue->destroy_flow_action(hrxq->action); #endif @@ -2257,7 +2256,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, rte_errno = errno; goto error; } - rte_atomic32_inc(&hrxq->refcnt); + __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, hrxq, next); return hrxq_idx; @@ -2287,7 +2286,8 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) int ret; if (priv->drop_queue.hrxq) { - rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + __atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1, + __ATOMIC_RELAXED); return priv->drop_queue.hrxq; } hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); @@ -2308,7 +2308,7 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) ret = priv->obj_ops.drop_action_create(dev); if (ret < 0) goto error; - rte_atomic32_set(&hrxq->refcnt, 1); + __atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED); return hrxq; error: if (hrxq) { @@ -2332,7 +2332,7 @@ mlx5_drop_action_destroy(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) { priv->obj_ops.drop_action_destroy(dev); mlx5_free(priv->drop_queue.rxq); mlx5_free(hrxq->ind_table); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index dca9c05951..7cd100813c 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1141,7 +1141,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; goto error; } - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); tmpl->type = MLX5_TXQ_TYPE_STANDARD; LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -1185,7 +1185,7 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->txq.idx = idx; tmpl->hairpin_conf = *hairpin_conf; tmpl->type = MLX5_TXQ_TYPE_HAIRPIN; - __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; } @@ -1210,7 +1210,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) if (txq_data) { ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); - __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED); } return ctrl; } @@ -1235,7 +1235,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) + if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) != 0) return 1; if (txq_ctrl->obj) { priv->obj_ops.txq_obj_release(txq_ctrl->obj); From patchwork Tue Oct 27 15:28:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexander Kozyrev X-Patchwork-Id: 82356 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8E228A04B5; Tue, 27 Oct 2020 16:29:19 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A01F472EE; Tue, 27 Oct 2020 16:28:39 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A99146CAB for ; Tue, 27 Oct 2020 16:28:34 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from akozyrev@nvidia.com) with SMTP; 27 Oct 2020 17:28:29 +0200 Received: from nvidia.com (pegasus02.mtr.labs.mlnx [10.210.16.122]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RFSSee029461; Tue, 27 Oct 2020 17:28:29 +0200 From: Alexander Kozyrev To: dev@dpdk.org Cc: rasland@nvidia.com, matan@nvidia.com, viacheslavo@nvidia.com Date: Tue, 27 Oct 2020 15:28:24 +0000 Message-Id: <20201027152824.15232-5-akozyrev@nvidia.com> X-Mailer: git-send-email 2.24.1 In-Reply-To: <20201027152824.15232-1-akozyrev@nvidia.com> References: <20201027152824.15232-1-akozyrev@nvidia.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 4/4] net/mlx5: use C11 atomics for RTE flow tables X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use the relaxed ordering for RTE flow tables. Enforce Acquire/Release model for managing DevX pools. Signed-off-by: Alexander Kozyrev Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 9 +-- drivers/net/mlx5/mlx5_flow.h | 15 ++-- drivers/net/mlx5/mlx5_flow_dv.c | 123 ++++++++++++++++++-------------- 3 files changed, 78 insertions(+), 69 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 0141c0670e..1bd8cd4b63 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -402,10 +402,7 @@ TAILQ_HEAD(mlx5_counters, mlx5_flow_counter); struct mlx5_flow_counter_pool { TAILQ_ENTRY(mlx5_flow_counter_pool) next; struct mlx5_counters counters[2]; /* Free counter list. */ - union { - struct mlx5_devx_obj *min_dcs; - rte_atomic64_t a64_dcs; - }; + struct mlx5_devx_obj *min_dcs; /* The devx object of the minimum counter ID. */ uint64_t time_of_last_age_check; /* System time (from rte_rdtsc()) read in the last aging check. */ @@ -464,7 +461,7 @@ struct mlx5_flow_counter_mng { /* Default miss action resource structure. */ struct mlx5_flow_default_miss_resource { void *action; /* Pointer to the rdma-core action. */ - rte_atomic32_t refcnt; /* Default miss action reference counter. */ + uint32_t refcnt; /* Default miss action reference counter. */ }; #define MLX5_AGE_EVENT_NEW 1 @@ -515,7 +512,7 @@ union mlx5_flow_tbl_key { /* Table structure. */ struct mlx5_flow_tbl_resource { void *obj; /**< Pointer to DR table object. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ }; #define MLX5_MAX_TABLES UINT16_MAX diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 8b5a93f173..ce315758ad 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -396,7 +395,7 @@ struct mlx5_flow_dv_matcher { /**< Pointer to the next element. */ struct mlx5_flow_tbl_resource *tbl; /**< Pointer to the table(group) the matcher associated with. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ void *matcher_object; /**< Pointer to DV matcher */ uint16_t crc; /**< CRC of key. */ uint16_t priority; /**< Priority of matcher. */ @@ -421,7 +420,7 @@ union mlx5_flow_encap_decap_key { struct mlx5_flow_dv_encap_decap_resource { struct mlx5_hlist_entry entry; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ void *action; /**< Encap/decap action object. */ uint8_t buf[MLX5_ENCAP_MAX_LEN]; @@ -438,7 +437,7 @@ struct mlx5_flow_dv_tag_resource { /**< hash list entry for tag resource, tag value as the key. */ void *action; /**< Tag action object. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ uint32_t idx; /**< Index for the index memory pool. */ }; @@ -459,7 +458,7 @@ struct mlx5_flow_dv_tag_resource { struct mlx5_flow_dv_modify_hdr_resource { struct mlx5_hlist_entry entry; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ void *action; /**< Modify header action object. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ @@ -482,7 +481,7 @@ union mlx5_flow_modify_hdr_key { /* Jump action resource structure. */ struct mlx5_flow_dv_jump_tbl_resource { - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ void *action; /**< Pointer to the rdma core action. */ }; @@ -491,7 +490,7 @@ struct mlx5_flow_dv_jump_tbl_resource { struct mlx5_flow_dv_port_id_action_resource { ILIST_ENTRY(uint32_t)next; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ void *action; /**< Action object. */ uint32_t port_id; /**< Port ID value. */ @@ -501,7 +500,7 @@ struct mlx5_flow_dv_port_id_action_resource { struct mlx5_flow_dv_push_vlan_action_resource { ILIST_ENTRY(uint32_t)next; /* Pointer to next element. */ - rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t refcnt; /**< Reference counter. */ void *action; /**< Action object. */ uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ rte_be32_t vlan_tag; /**< VLAN tag value. */ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 3f1ccf8fe0..caa85e87a4 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2873,8 +2873,10 @@ flow_dv_encap_decap_resource_register struct mlx5_flow_dv_encap_decap_resource, entry); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; dev_flow->dv.encap_decap = cache_resource; return 0; @@ -2897,8 +2899,7 @@ flow_dv_encap_decap_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, flow_dv_encap_decap_resource_match, (void *)cache_resource)) { @@ -2913,7 +2914,7 @@ flow_dv_encap_decap_resource_register dev_flow->dv.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -2944,7 +2945,7 @@ flow_dv_jump_tbl_resource_register int cnt, ret; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); + cnt = __atomic_load_n(&tbl_data->jump.refcnt, __ATOMIC_ACQUIRE); if (!cnt) { ret = mlx5_flow_os_create_flow_action_dest_flow_tbl (tbl->obj, &tbl_data->jump.action); @@ -2961,7 +2962,7 @@ flow_dv_jump_tbl_resource_register DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", (void *)&tbl_data->jump, cnt); } - rte_atomic32_inc(&tbl_data->jump.refcnt); + __atomic_fetch_add(&tbl_data->jump.refcnt, 1, __ATOMIC_RELEASE); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; @@ -2986,7 +2987,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_default_miss_resource *cache_resource = &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); + int cnt = __atomic_load_n(&cache_resource->refcnt, __ATOMIC_ACQUIRE); if (!cnt) { MLX5_ASSERT(cache_resource->action); @@ -2999,7 +3000,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", (void *)cache_resource->action, cnt); } - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, __ATOMIC_RELEASE); return 0; } @@ -3038,8 +3039,10 @@ flow_dv_port_id_action_resource_register DRV_LOG(DEBUG, "port id action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->rix_port_id_action = idx; dev_flow->dv.port_id_action = cache_resource; return 0; @@ -3062,15 +3065,14 @@ flow_dv_port_id_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, dev_flow->handle->rix_port_id_action, cache_resource, next); dev_flow->dv.port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -3111,8 +3113,10 @@ flow_dv_push_vlan_action_resource_register DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_push_vlan = idx; dev_flow->dv.push_vlan_res = cache_resource; return 0; @@ -3141,8 +3145,7 @@ flow_dv_push_vlan_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &sh->push_vlan_action_list, dev_flow->handle->dvh.rix_push_vlan, @@ -3150,7 +3153,7 @@ flow_dv_push_vlan_action_resource_register dev_flow->dv.push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } /** @@ -4550,8 +4553,10 @@ flow_dv_modify_hdr_resource_register entry); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.modify_hdr = cache_resource; return 0; @@ -4575,8 +4580,7 @@ flow_dv_modify_hdr_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, flow_dv_modify_hdr_resource_match, (void *)cache_resource)) { @@ -4590,7 +4594,7 @@ flow_dv_modify_hdr_resource_register dev_flow->handle->dvh.modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -8021,7 +8025,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); @@ -8056,9 +8060,9 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, * No multi-threads now, but still better to initialize the reference * count before insert it into the hash list. */ - rte_atomic32_init(&tbl->refcnt); + __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED); /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); + __atomic_store_n(&tbl_data->jump.refcnt, 0, __ATOMIC_RELAXED); pos->key = table_key.v64; ret = mlx5_hlist_insert(sh->flow_tbls, pos); if (ret < 0) { @@ -8068,7 +8072,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, mlx5_flow_os_destroy_flow_tbl(tbl->obj); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); } - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } @@ -8094,7 +8098,7 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { + if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) { struct mlx5_hlist_entry *pos = &tbl_data->entry; mlx5_flow_os_destroy_flow_tbl(tbl->obj); @@ -8197,8 +8201,10 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_load_n(&cache_matcher->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_matcher->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); @@ -8233,16 +8239,15 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, } /* Save the table information */ cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); dev_flow->handle->dvh.matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -8279,12 +8284,14 @@ flow_dv_tag_resource_register if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); return 0; } /* Register new resource. */ @@ -8303,8 +8310,7 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { mlx5_flow_os_destroy_flow_action(cache_resource->action); mlx5_free(cache_resource); @@ -8315,7 +8321,7 @@ flow_dv_tag_resource_register dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -8343,8 +8349,8 @@ flow_dv_tag_release(struct rte_eth_dev *dev, return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { + __atomic_load_n(&tag->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&tag->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); mlx5_hlist_remove(sh->tag_table, &tag->entry); DRV_LOG(DEBUG, "port %u tag %p: removed", @@ -10463,8 +10469,8 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { + __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_matcher (matcher->matcher_object)); LIST_REMOVE(matcher, next); @@ -10504,8 +10510,9 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); mlx5_hlist_remove(priv->sh->encaps_decaps, @@ -10545,8 +10552,9 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); /* jump action memory free is inside the table release. */ @@ -10577,8 +10585,10 @@ flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_glue->destroy_flow_action (cache_resource->action)); DRV_LOG(DEBUG, "default miss resource %p: removed", @@ -10610,8 +10620,9 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); mlx5_hlist_remove(priv->sh->modify_cmds, @@ -10650,8 +10661,9 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], @@ -10691,8 +10703,9 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],