From patchwork Mon Oct 16 23:08:57 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 132678 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9CF5343183; Tue, 17 Oct 2023 01:11:03 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2D05C427DF; Tue, 17 Oct 2023 01:09:29 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id D25CF40E0F for ; Tue, 17 Oct 2023 01:09:08 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 8928520B74CD; Mon, 16 Oct 2023 16:09:07 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 8928520B74CD DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1697497747; bh=pEFbJZzyaIqLKkUDwBHn2JsUK4YWeGp0ZMPJCXKNSt0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XXEGYwE18J37ZDlOU6g6lr3oAdetct0ca7MKvSD5TyzSXgJofhSigPAKjfV2+YvP0 wnYejkKNdY1ihX8cICKf6wVPzC45VtwFUN3I30zTZXgVtJgpjn0XNahR99V9n2tUgE hS9V2KWukd7jaKStfgL102uiwBSGTk7f0Qo9tZZQ= From: Tyler Retzlaff To: dev@dpdk.org Cc: Akhil Goyal , Anatoly Burakov , Andrew Rybchenko , Bruce Richardson , Chenbo Xia , Ciara Power , David Christensen , David Hunt , Dmitry Kozlyuk , Dmitry Malloy , Elena Agostini , Erik Gabriel Carrillo , Fan Zhang , Ferruh Yigit , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jerin Jacob , Konstantin Ananyev , Matan Azrad , Maxime Coquelin , Narcisa Ana Maria Vasile , Nicolas Chautru , Olivier Matz , Ori Kam , Pallavi Kadam , Pavan Nikhilesh , Reshma Pattan , Sameh Gobriel , Shijith Thotton , Sivaprasad Tummala , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Thomas Monjalon , Viacheslav Ovsiienko , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH 13/21] stack: use rte optional stdatomic API Date: Mon, 16 Oct 2023 16:08:57 -0700 Message-Id: <1697497745-20664-14-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API Signed-off-by: Tyler Retzlaff --- lib/stack/rte_stack.h | 2 +- lib/stack/rte_stack_lf_c11.h | 24 ++++++++++++------------ lib/stack/rte_stack_lf_generic.h | 18 +++++++++--------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/lib/stack/rte_stack.h b/lib/stack/rte_stack.h index 921d29a..a379300 100644 --- a/lib/stack/rte_stack.h +++ b/lib/stack/rte_stack.h @@ -44,7 +44,7 @@ struct rte_stack_lf_list { /** List head */ struct rte_stack_lf_head head __rte_aligned(16); /** List len */ - uint64_t len; + RTE_ATOMIC(uint64_t) len; }; /* Structure containing two lock-free LIFO lists: the stack itself and a list diff --git a/lib/stack/rte_stack_lf_c11.h b/lib/stack/rte_stack_lf_c11.h index 687a6f6..9cb6998 100644 --- a/lib/stack/rte_stack_lf_c11.h +++ b/lib/stack/rte_stack_lf_c11.h @@ -26,8 +26,8 @@ * elements. If the mempool is near-empty to the point that this is a * concern, the user should consider increasing the mempool size. */ - return (unsigned int)__atomic_load_n(&s->stack_lf.used.len, - __ATOMIC_RELAXED); + return (unsigned int)rte_atomic_load_explicit(&s->stack_lf.used.len, + rte_memory_order_relaxed); } static __rte_always_inline void @@ -59,14 +59,14 @@ (rte_int128_t *)&list->head, (rte_int128_t *)&old_head, (rte_int128_t *)&new_head, - 1, __ATOMIC_RELEASE, - __ATOMIC_RELAXED); + 1, rte_memory_order_release, + rte_memory_order_relaxed); } while (success == 0); /* Ensure the stack modifications are not reordered with respect * to the LIFO len update. */ - __atomic_fetch_add(&list->len, num, __ATOMIC_RELEASE); + rte_atomic_fetch_add_explicit(&list->len, num, rte_memory_order_release); } static __rte_always_inline struct rte_stack_lf_elem * @@ -80,7 +80,7 @@ int success; /* Reserve num elements, if available */ - len = __atomic_load_n(&list->len, __ATOMIC_RELAXED); + len = rte_atomic_load_explicit(&list->len, rte_memory_order_relaxed); while (1) { /* Does the list contain enough elements? */ @@ -88,10 +88,10 @@ return NULL; /* len is updated on failure */ - if (__atomic_compare_exchange_n(&list->len, + if (rte_atomic_compare_exchange_weak_explicit(&list->len, &len, len - num, - 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED)) + rte_memory_order_acquire, + rte_memory_order_relaxed)) break; } @@ -110,7 +110,7 @@ * elements are properly ordered with respect to the head * pointer read. */ - __atomic_thread_fence(__ATOMIC_ACQUIRE); + __atomic_thread_fence(rte_memory_order_acquire); rte_prefetch0(old_head.top); @@ -159,8 +159,8 @@ (rte_int128_t *)&list->head, (rte_int128_t *)&old_head, (rte_int128_t *)&new_head, - 0, __ATOMIC_RELAXED, - __ATOMIC_RELAXED); + 0, rte_memory_order_relaxed, + rte_memory_order_relaxed); } while (success == 0); return old_head.top; diff --git a/lib/stack/rte_stack_lf_generic.h b/lib/stack/rte_stack_lf_generic.h index 39f7ff3..cc69e4d 100644 --- a/lib/stack/rte_stack_lf_generic.h +++ b/lib/stack/rte_stack_lf_generic.h @@ -27,7 +27,7 @@ * concern, the user should consider increasing the mempool size. */ /* NOTE: review for potential ordering optimization */ - return __atomic_load_n(&s->stack_lf.used.len, __ATOMIC_SEQ_CST); + return rte_atomic_load_explicit(&s->stack_lf.used.len, rte_memory_order_seq_cst); } static __rte_always_inline void @@ -64,11 +64,11 @@ (rte_int128_t *)&list->head, (rte_int128_t *)&old_head, (rte_int128_t *)&new_head, - 1, __ATOMIC_RELEASE, - __ATOMIC_RELAXED); + 1, rte_memory_order_release, + rte_memory_order_relaxed); } while (success == 0); /* NOTE: review for potential ordering optimization */ - __atomic_fetch_add(&list->len, num, __ATOMIC_SEQ_CST); + rte_atomic_fetch_add_explicit(&list->len, num, rte_memory_order_seq_cst); } static __rte_always_inline struct rte_stack_lf_elem * @@ -83,15 +83,15 @@ /* Reserve num elements, if available */ while (1) { /* NOTE: review for potential ordering optimization */ - uint64_t len = __atomic_load_n(&list->len, __ATOMIC_SEQ_CST); + uint64_t len = rte_atomic_load_explicit(&list->len, rte_memory_order_seq_cst); /* Does the list contain enough elements? */ if (unlikely(len < num)) return NULL; /* NOTE: review for potential ordering optimization */ - if (__atomic_compare_exchange_n(&list->len, &len, len - num, - 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) + if (rte_atomic_compare_exchange_strong_explicit(&list->len, &len, len - num, + rte_memory_order_seq_cst, rte_memory_order_seq_cst)) break; } @@ -143,8 +143,8 @@ (rte_int128_t *)&list->head, (rte_int128_t *)&old_head, (rte_int128_t *)&new_head, - 1, __ATOMIC_RELEASE, - __ATOMIC_RELAXED); + 1, rte_memory_order_release, + rte_memory_order_relaxed); } while (success == 0); return old_head.top;