From patchwork Wed Mar 27 22:37:34 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 138897 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 862E243D55; Wed, 27 Mar 2024 23:40:12 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C77B242DF5; Wed, 27 Mar 2024 23:38:32 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 2E79C41153 for ; Wed, 27 Mar 2024 23:38:05 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1086) id 4E16D20E6AF8; Wed, 27 Mar 2024 15:38:00 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 4E16D20E6AF8 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1711579081; bh=bXbpoIMxIj3jZgJ/gx07ssZhwBmz2bAQUHgoqZIGlFI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=aq20Jp2xtQjHQZrMvqEyCFAHc9xBx/vPLR0I2g/6GMcA5jaK40iLZmFhPBcArfkv5 HL36r6ZOfGLmFoTkuAoQYxZ9F/QxMxHmv7t2u2jFqTDtl3WucXoYkqJB0YBvbXpIs6 OC+yFfq5oYhPQeCiWVe3douSURAu9Q5t93ovGdjI= From: Tyler Retzlaff To: dev@dpdk.org Cc: =?utf-8?q?Mattias_R=C3=B6nnblom?= , =?utf-8?q?Morten_Br=C3=B8rup?= , Abdullah Sevincer , Ajit Khaparde , Alok Prasad , Anatoly Burakov , Andrew Rybchenko , Anoob Joseph , Bruce Richardson , Byron Marohn , Chenbo Xia , Chengwen Feng , Ciara Loftus , Ciara Power , Dariusz Sosnowski , David Hunt , Devendra Singh Rawat , Erik Gabriel Carrillo , Guoyang Zhou , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jakub Grajciar , Jerin Jacob , Jeroen de Borst , Jian Wang , Jiawen Wu , Jie Hai , Jingjing Wu , Joshua Washington , Joyce Kong , Junfeng Guo , Kevin Laatz , Konstantin Ananyev , Liang Ma , Long Li , Maciej Czekaj , Matan Azrad , Maxime Coquelin , Nicolas Chautru , Ori Kam , Pavan Nikhilesh , Peter Mccarthy , Rahul Lakkireddy , Reshma Pattan , Rosen Xu , Ruifeng Wang , Rushil Gupta , Sameh Gobriel , Sivaprasad Tummala , Somnath Kotur , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Sunil Uttarwar , Tetsuya Mukawa , Vamsi Attunuru , Viacheslav Ovsiienko , Vladimir Medvedkin , Xiaoyun Wang , Yipeng Wang , Yisen Zhuang , Yuying Zhang , Yuying Zhang , Ziyang Xuan , Tyler Retzlaff Subject: [PATCH v3 21/45] vdpa/mlx5: use rte stdatomic API Date: Wed, 27 Mar 2024 15:37:34 -0700 Message-Id: <1711579078-10624-22-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com> References: <1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com> <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional rte stdatomic API. Signed-off-by: Tyler Retzlaff Acked-by: Stephen Hemminger --- drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++--------- drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------ drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------ drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++- drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++- drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++- 6 files changed, 52 insertions(+), 44 deletions(-) diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index f900384..98c39a5 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -261,8 +261,8 @@ uint32_t timeout = 0; /* Check and wait all close tasks done. */ - while (__atomic_load_n(&priv->dev_close_progress, - __ATOMIC_RELAXED) != 0 && timeout < 1000) { + while (rte_atomic_load_explicit(&priv->dev_close_progress, + rte_memory_order_relaxed) != 0 && timeout < 1000) { rte_delay_us_sleep(10000); timeout++; } @@ -294,8 +294,8 @@ priv->last_c_thrd_idx = 0; else priv->last_c_thrd_idx++; - __atomic_store_n(&priv->dev_close_progress, - 1, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&priv->dev_close_progress, + 1, rte_memory_order_relaxed); if (mlx5_vdpa_task_add(priv, priv->last_c_thrd_idx, MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT, @@ -319,8 +319,8 @@ if (!priv->connected) mlx5_vdpa_dev_cache_clean(priv); priv->vid = 0; - __atomic_store_n(&priv->dev_close_progress, 0, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&priv->dev_close_progress, 0, + rte_memory_order_relaxed); priv->state = MLX5_VDPA_STATE_PROBED; DRV_LOG(INFO, "vDPA device %d was closed.", vid); return ret; @@ -664,7 +664,9 @@ static int mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv) { - uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0; + RTE_ATOMIC(uint32_t) remaining_cnt = 0; + RTE_ATOMIC(uint32_t) err_cnt = 0; + uint32_t task_num = 0; uint32_t max_queues, index, thrd_idx, data[1]; struct mlx5_vdpa_virtq *virtq; @@ -847,8 +849,8 @@ if (conf_thread_mng.initializer_priv == priv) if (mlx5_vdpa_mult_threads_create()) goto error; - __atomic_fetch_add(&conf_thread_mng.refcnt, 1, - __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1, + rte_memory_order_relaxed); } if (mlx5_vdpa_create_dev_resources(priv)) goto error; @@ -937,8 +939,8 @@ if (priv->vdev) rte_vdpa_unregister_device(priv->vdev); if (priv->use_c_thread) - if (__atomic_fetch_sub(&conf_thread_mng.refcnt, - 1, __ATOMIC_RELAXED) == 1) + if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt, + 1, rte_memory_order_relaxed) == 1) mlx5_vdpa_mult_threads_destroy(true); rte_free(priv); } diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index 7b37c98..0cc67ed 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type { struct mlx5_vdpa_task { struct mlx5_vdpa_priv *priv; enum mlx5_vdpa_task_type type; - uint32_t *remaining_cnt; - uint32_t *err_cnt; + RTE_ATOMIC(uint32_t) *remaining_cnt; + RTE_ATOMIC(uint32_t) *err_cnt; uint32_t idx; } __rte_packed __rte_aligned(4); @@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread { struct mlx5_vdpa_conf_thread_mng { void *initializer_priv; - uint32_t refcnt; + RTE_ATOMIC(uint32_t) refcnt; uint32_t max_thrds; pthread_mutex_t cthrd_lock; struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD]; @@ -212,7 +212,7 @@ struct mlx5_vdpa_priv { uint64_t features; /* Negotiated features. */ uint16_t log_max_rqt_size; uint16_t last_c_thrd_idx; - uint16_t dev_close_progress; + RTE_ATOMIC(uint16_t) dev_close_progress; uint16_t num_mrs; /* Number of memory regions. */ struct mlx5_vdpa_steer steer; struct mlx5dv_var *var; @@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base, mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv, uint32_t thrd_idx, enum mlx5_vdpa_task_type task_type, - uint32_t *remaining_cnt, uint32_t *err_cnt, + RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt, void **task_data, uint32_t num); int mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx); bool -mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt, - uint32_t *err_cnt, uint32_t sleep_time); +mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt, + RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time); int mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick); void diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c index 68ed841..84f611c 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c @@ -48,7 +48,7 @@ mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv, uint32_t thrd_idx, enum mlx5_vdpa_task_type task_type, - uint32_t *remaining_cnt, uint32_t *err_cnt, + RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt, void **task_data, uint32_t num) { struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng; @@ -70,8 +70,8 @@ return -1; for (i = 0 ; i < num; i++) if (task[i].remaining_cnt) - __atomic_fetch_add(task[i].remaining_cnt, 1, - __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1, + rte_memory_order_relaxed); /* wake up conf thread. */ pthread_mutex_lock(&conf_thread_mng.cthrd_lock); pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond); @@ -80,16 +80,16 @@ } bool -mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt, - uint32_t *err_cnt, uint32_t sleep_time) +mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt, + RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time) { /* Check and wait all tasks done. */ - while (__atomic_load_n(remaining_cnt, - __ATOMIC_RELAXED) != 0) { + while (rte_atomic_load_explicit(remaining_cnt, + rte_memory_order_relaxed) != 0) { rte_delay_us_sleep(sleep_time); } - if (__atomic_load_n(err_cnt, - __ATOMIC_RELAXED)) { + if (rte_atomic_load_explicit(err_cnt, + rte_memory_order_relaxed)) { DRV_LOG(ERR, "Tasks done with error."); return true; } @@ -137,8 +137,8 @@ if (ret) { DRV_LOG(ERR, "Failed to register mr %d.", task.idx); - __atomic_fetch_add(task.err_cnt, 1, - __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(task.err_cnt, 1, + rte_memory_order_relaxed); } break; case MLX5_VDPA_TASK_SETUP_VIRTQ: @@ -149,8 +149,8 @@ if (ret) { DRV_LOG(ERR, "Failed to setup virtq %d.", task.idx); - __atomic_fetch_add( - task.err_cnt, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit( + task.err_cnt, 1, rte_memory_order_relaxed); } virtq->enable = 1; pthread_mutex_unlock(&virtq->virtq_lock); @@ -164,9 +164,9 @@ DRV_LOG(ERR, "Failed to stop virtq %d.", task.idx); - __atomic_fetch_add( + rte_atomic_fetch_add_explicit( task.err_cnt, 1, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); pthread_mutex_unlock(&virtq->virtq_lock); break; } @@ -176,9 +176,9 @@ DRV_LOG(ERR, "Failed to get negotiated features virtq %d.", task.idx); - __atomic_fetch_add( + rte_atomic_fetch_add_explicit( task.err_cnt, 1, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); pthread_mutex_unlock(&virtq->virtq_lock); break; } @@ -200,9 +200,9 @@ if (!priv->connected) mlx5_vdpa_dev_cache_clean(priv); priv->vid = 0; - __atomic_store_n( + rte_atomic_store_explicit( &priv->dev_close_progress, 0, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); break; case MLX5_VDPA_TASK_PREPARE_VIRTQ: ret = mlx5_vdpa_virtq_single_resource_prepare( @@ -211,9 +211,9 @@ DRV_LOG(ERR, "Failed to prepare virtq %d.", task.idx); - __atomic_fetch_add( + rte_atomic_fetch_add_explicit( task.err_cnt, 1, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); } break; default: @@ -222,8 +222,8 @@ break; } if (task.remaining_cnt) - __atomic_fetch_sub(task.remaining_cnt, - 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(task.remaining_cnt, + 1, rte_memory_order_relaxed); } return 0; } diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c index 0fa671f..a207734 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c @@ -92,7 +92,9 @@ int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) { - uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0; + RTE_ATOMIC(uint32_t) remaining_cnt = 0; + RTE_ATOMIC(uint32_t) err_cnt = 0; + uint32_t task_num = 0; uint32_t i, thrd_idx, data[1]; struct mlx5_vdpa_virtq *virtq; uint64_t features; diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c index e333f0b..4dfe800 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c @@ -279,7 +279,9 @@ uint8_t mode = 0; int ret = -rte_errno; uint32_t i, thrd_idx, data[1]; - uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0; + RTE_ATOMIC(uint32_t) remaining_cnt = 0; + RTE_ATOMIC(uint32_t) err_cnt = 0; + uint32_t task_num = 0; struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare (priv->vid, &mode, &priv->vmem_info.size, &priv->vmem_info.gcd, &priv->vmem_info.entries_num); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index 607e290..093cdd0 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -666,7 +666,9 @@ { int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features); uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid); - uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0; + RTE_ATOMIC(uint32_t) remaining_cnt = 0; + RTE_ATOMIC(uint32_t) err_cnt = 0; + uint32_t task_num = 0; uint32_t i, thrd_idx, data[1]; struct mlx5_vdpa_virtq *virtq; struct rte_vhost_vring vq;