From patchwork Thu Aug 17 21:42:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 130478 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F39DE4308E; Thu, 17 Aug 2023 23:42:45 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CEE46410D0; Thu, 17 Aug 2023 23:42:36 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 79BB2410EE; Thu, 17 Aug 2023 23:42:32 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 9EE19211F7C3; Thu, 17 Aug 2023 14:42:31 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 9EE19211F7C3 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1692308551; bh=30IFV+PgFW7jBCt1bBpdFD1RGiYWmLQlv8qpyuHKerM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=HF70goveOY8ao6R7S9bDUMWgQgURIBg3vFJKjhiy9Z4hY3Sq6jyI3w5xkOV7sefFI l/y96eQV1Wp6VwubvDuMt7zTnqTLAjt3QYT7INdql5a3lfmmeb83W0+x6jd5aAojIl d2fUCcVIqVOLajE9Sm2rs2YhZ5O1jcCqsrVVhtmM= From: Tyler Retzlaff To: dev@dpdk.org Cc: techboard@dpdk.org, Bruce Richardson , Honnappa Nagarahalli , Ruifeng Wang , Jerin Jacob , Sunil Kumar Kori , =?utf-8?q?Mattias_R=C3=B6nnblom?= , Joyce Kong , David Christensen , Konstantin Ananyev , David Hunt , Thomas Monjalon , David Marchand , Tyler Retzlaff Subject: [PATCH v5 3/6] eal: add rte atomic qualifier with casts Date: Thu, 17 Aug 2023 14:42:14 -0700 Message-Id: <1692308537-2646-4-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1692308537-2646-1-git-send-email-roretzla@linux.microsoft.com> References: <1691717521-1025-1-git-send-email-roretzla@linux.microsoft.com> <1692308537-2646-1-git-send-email-roretzla@linux.microsoft.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Introduce __rte_atomic qualifying casts in rte_optional atomics inline functions to prevent cascading the need to pass __rte_atomic qualified arguments. Warning, this is really implementation dependent and being done temporarily to avoid having to convert more of the libraries and tests in DPDK in the initial series that introduces the API. The consequence of the assumption of the ABI of the types in question not being ``the same'' is only a risk that may be realized when enable_stdatomic=true. Signed-off-by: Tyler Retzlaff Reviewed-by: Morten Brørup --- lib/eal/include/generic/rte_atomic.h | 48 ++++++++++++++++++++++++------------ lib/eal/include/generic/rte_pause.h | 9 ++++--- lib/eal/x86/rte_power_intrinsics.c | 7 +++--- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h index 5940e7e..709bf15 100644 --- a/lib/eal/include/generic/rte_atomic.h +++ b/lib/eal/include/generic/rte_atomic.h @@ -274,7 +274,8 @@ static inline void rte_atomic16_add(rte_atomic16_t *v, int16_t inc) { - rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } /** @@ -288,7 +289,8 @@ static inline void rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) { - rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } /** @@ -341,7 +343,8 @@ static inline int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) { - return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } /** @@ -361,7 +364,8 @@ static inline int16_t rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) { - return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } /** @@ -380,7 +384,8 @@ #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { - return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int16_t *)&v->cnt, 1, + rte_memory_order_seq_cst) + 1 == 0; } #endif @@ -400,7 +405,8 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { - return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int16_t *)&v->cnt, 1, + rte_memory_order_seq_cst) - 1 == 0; } #endif @@ -553,7 +559,8 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) static inline void rte_atomic32_add(rte_atomic32_t *v, int32_t inc) { - rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } /** @@ -567,7 +574,8 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) static inline void rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) { - rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } /** @@ -620,7 +628,8 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) static inline int32_t rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) { - return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } /** @@ -640,7 +649,8 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) static inline int32_t rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) { - return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } /** @@ -659,7 +669,8 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) { - return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int32_t *)&v->cnt, 1, + rte_memory_order_seq_cst) + 1 == 0; } #endif @@ -679,7 +690,8 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) { - return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int32_t *)&v->cnt, 1, + rte_memory_order_seq_cst) - 1 == 0; } #endif @@ -885,7 +897,8 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v) static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { - rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst); + rte_atomic_fetch_add_explicit((volatile __rte_atomic int64_t *)&v->cnt, inc, + rte_memory_order_seq_cst); } #endif @@ -904,7 +917,8 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v) static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { - rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst); + rte_atomic_fetch_sub_explicit((volatile __rte_atomic int64_t *)&v->cnt, dec, + rte_memory_order_seq_cst); } #endif @@ -962,7 +976,8 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v) static inline int64_t rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) { - return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc; + return rte_atomic_fetch_add_explicit((volatile __rte_atomic int64_t *)&v->cnt, inc, + rte_memory_order_seq_cst) + inc; } #endif @@ -986,7 +1001,8 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v) static inline int64_t rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) { - return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec; + return rte_atomic_fetch_sub_explicit((volatile __rte_atomic int64_t *)&v->cnt, dec, + rte_memory_order_seq_cst) - dec; } #endif diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h index 256309e..b7b059f 100644 --- a/lib/eal/include/generic/rte_pause.h +++ b/lib/eal/include/generic/rte_pause.h @@ -81,7 +81,8 @@ { assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (rte_atomic_load_explicit(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint16_t *)addr, memorder) + != expected) rte_pause(); } @@ -91,7 +92,8 @@ { assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (rte_atomic_load_explicit(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint32_t *)addr, memorder) + != expected) rte_pause(); } @@ -101,7 +103,8 @@ { assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed); - while (rte_atomic_load_explicit(addr, memorder) != expected) + while (rte_atomic_load_explicit((volatile __rte_atomic uint64_t *)addr, memorder) + != expected) rte_pause(); } diff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c index cf70e33..fb8539f 100644 --- a/lib/eal/x86/rte_power_intrinsics.c +++ b/lib/eal/x86/rte_power_intrinsics.c @@ -23,9 +23,10 @@ uint64_t val; /* trigger a write but don't change the value */ - val = rte_atomic_load_explicit((volatile uint64_t *)addr, rte_memory_order_relaxed); - rte_atomic_compare_exchange_strong_explicit((volatile uint64_t *)addr, &val, val, - rte_memory_order_relaxed, rte_memory_order_relaxed); + val = rte_atomic_load_explicit((volatile __rte_atomic uint64_t *)addr, + rte_memory_order_relaxed); + rte_atomic_compare_exchange_strong_explicit((volatile __rte_atomic uint64_t *)addr, + &val, val, rte_memory_order_relaxed, rte_memory_order_relaxed); } static bool wait_supported;