[v2,3/6] eal: add rte atomic qualifier with casts

Message ID 1691775136-6460-4-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series RFC optional rte optional stdatomics API |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tyler Retzlaff Aug. 11, 2023, 5:32 p.m. UTC
  Introduce __rte_atomic qualifying casts in rte_optional atomics inline
functions to prevent cascading the need to pass __rte_atomic qualified
arguments.

Warning, this is really implementation dependent and being done
temporarily to avoid having to convert more of the libraries and tests in
DPDK in the initial series that introduces the API. The consequence of the
assumption of the ABI of the types in question not being ``the same'' is
only a risk that may be realized when enable_stdatomic=true.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/eal/include/generic/rte_atomic.h | 48 ++++++++++++++++++++++++------------
 lib/eal/include/generic/rte_pause.h  |  9 ++++---
 lib/eal/x86/rte_power_intrinsics.c   |  7 +++---
 3 files changed, 42 insertions(+), 22 deletions(-)
  

Comments

Morten Brørup Aug. 14, 2023, 8:05 a.m. UTC | #1
> From: Tyler Retzlaff [mailto:roretzla@linux.microsoft.com]
> Sent: Friday, 11 August 2023 19.32
> 
> Introduce __rte_atomic qualifying casts in rte_optional atomics inline
> functions to prevent cascading the need to pass __rte_atomic qualified
> arguments.
> 
> Warning, this is really implementation dependent and being done
> temporarily to avoid having to convert more of the libraries and tests in
> DPDK in the initial series that introduces the API. The consequence of the
> assumption of the ABI of the types in question not being ``the same'' is
> only a risk that may be realized when enable_stdatomic=true.
> 
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
>  lib/eal/include/generic/rte_atomic.h | 48 ++++++++++++++++++++++++-----------
> -
>  lib/eal/include/generic/rte_pause.h  |  9 ++++---
>  lib/eal/x86/rte_power_intrinsics.c   |  7 +++---
>  3 files changed, 42 insertions(+), 22 deletions(-)
> 
> diff --git a/lib/eal/include/generic/rte_atomic.h
> b/lib/eal/include/generic/rte_atomic.h
> index f6c4b3e..4f954e0 100644
> --- a/lib/eal/include/generic/rte_atomic.h
> +++ b/lib/eal/include/generic/rte_atomic.h
> @@ -274,7 +274,8 @@
>  static inline void
>  rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
>  {
> -	rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);
> +	rte_atomic_fetch_add_explicit((volatile int16_t __rte_atomic *)&v->cnt,
> inc,
> +	    rte_memory_order_seq_cst);

As mentioned in my review to the 2/6 patch, I think __rte_atomic should come before the type, like this:
(volatile __rte_atomic int16_t *)

Same with all the changes.

Otherwise good.

Reviewed-by: Morten Brørup <mb@smartsharesystems.com>
  

Patch

diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
index f6c4b3e..4f954e0 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -274,7 +274,8 @@ 
 static inline void
 rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
 {
-	rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);
+	rte_atomic_fetch_add_explicit((volatile int16_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst);
 }
 
 /**
@@ -288,7 +289,8 @@ 
 static inline void
 rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
 {
-	rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);
+	rte_atomic_fetch_sub_explicit((volatile int16_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst);
 }
 
 /**
@@ -341,7 +343,8 @@ 
 static inline int16_t
 rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
 {
-	return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;
+	return rte_atomic_fetch_add_explicit((volatile int16_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst) + inc;
 }
 
 /**
@@ -361,7 +364,8 @@ 
 static inline int16_t
 rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
 {
-	return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;
+	return rte_atomic_fetch_sub_explicit((volatile int16_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst) - dec;
 }
 
 /**
@@ -380,7 +384,8 @@ 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-	return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0;
+	return rte_atomic_fetch_add_explicit((volatile int16_t __rte_atomic *)&v->cnt, 1,
+	    rte_memory_order_seq_cst) + 1 == 0;
 }
 #endif
 
@@ -400,7 +405,8 @@  static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-	return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0;
+	return rte_atomic_fetch_sub_explicit((volatile int16_t __rte_atomic *)&v->cnt, 1,
+	    rte_memory_order_seq_cst) - 1 == 0;
 }
 #endif
 
@@ -553,7 +559,8 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline void
 rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
 {
-	rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);
+	rte_atomic_fetch_add_explicit((volatile int32_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst);
 }
 
 /**
@@ -567,7 +574,8 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline void
 rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
 {
-	rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);
+	rte_atomic_fetch_sub_explicit((volatile int32_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst);
 }
 
 /**
@@ -620,7 +628,8 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline int32_t
 rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
 {
-	return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;
+	return rte_atomic_fetch_add_explicit((volatile int32_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst) + inc;
 }
 
 /**
@@ -640,7 +649,8 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline int32_t
 rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
 {
-	return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;
+	return rte_atomic_fetch_sub_explicit((volatile int32_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst) - dec;
 }
 
 /**
@@ -659,7 +669,8 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 {
-	return rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0;
+	return rte_atomic_fetch_add_explicit((volatile int32_t __rte_atomic *)&v->cnt, 1,
+	    rte_memory_order_seq_cst) + 1 == 0;
 }
 #endif
 
@@ -679,7 +690,8 @@  static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 {
-	return rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0;
+	return rte_atomic_fetch_sub_explicit((volatile int32_t __rte_atomic *)&v->cnt, 1,
+	    rte_memory_order_seq_cst) - 1 == 0;
 }
 #endif
 
@@ -885,7 +897,8 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline void
 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
 {
-	rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);
+	rte_atomic_fetch_add_explicit((volatile int64_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst);
 }
 #endif
 
@@ -904,7 +917,8 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline void
 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
 {
-	rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);
+	rte_atomic_fetch_sub_explicit((volatile int64_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst);
 }
 #endif
 
@@ -962,7 +976,8 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline int64_t
 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
 {
-	return rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;
+	return rte_atomic_fetch_add_explicit((volatile int64_t __rte_atomic *)&v->cnt, inc,
+	    rte_memory_order_seq_cst) + inc;
 }
 #endif
 
@@ -986,7 +1001,8 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline int64_t
 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
 {
-	return rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;
+	return rte_atomic_fetch_sub_explicit((volatile int64_t __rte_atomic *)&v->cnt, dec,
+	    rte_memory_order_seq_cst) - dec;
 }
 #endif
 
diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h
index c816e7d..c261689 100644
--- a/lib/eal/include/generic/rte_pause.h
+++ b/lib/eal/include/generic/rte_pause.h
@@ -87,7 +87,8 @@ 
 {
 	assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);
 
-	while (rte_atomic_load_explicit(addr, memorder) != expected)
+	while (rte_atomic_load_explicit((volatile uint16_t __rte_atomic *)addr, memorder)
+	    != expected)
 		rte_pause();
 }
 
@@ -97,7 +98,8 @@ 
 {
 	assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);
 
-	while (rte_atomic_load_explicit(addr, memorder) != expected)
+	while (rte_atomic_load_explicit((volatile uint32_t __rte_atomic *)addr, memorder)
+	    != expected)
 		rte_pause();
 }
 
@@ -107,7 +109,8 @@ 
 {
 	assert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);
 
-	while (rte_atomic_load_explicit(addr, memorder) != expected)
+	while (rte_atomic_load_explicit((volatile uint64_t __rte_atomic *)addr, memorder)
+	    != expected)
 		rte_pause();
 }
 
diff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c
index cf70e33..6c192f0 100644
--- a/lib/eal/x86/rte_power_intrinsics.c
+++ b/lib/eal/x86/rte_power_intrinsics.c
@@ -23,9 +23,10 @@ 
 	uint64_t val;
 
 	/* trigger a write but don't change the value */
-	val = rte_atomic_load_explicit((volatile uint64_t *)addr, rte_memory_order_relaxed);
-	rte_atomic_compare_exchange_strong_explicit((volatile uint64_t *)addr, &val, val,
-			rte_memory_order_relaxed, rte_memory_order_relaxed);
+	val = rte_atomic_load_explicit((volatile uint64_t __rte_atomic *)addr,
+	    rte_memory_order_relaxed);
+	rte_atomic_compare_exchange_strong_explicit((volatile uint64_t __rte_atomic *)addr,
+	    &val, val, rte_memory_order_relaxed, rte_memory_order_relaxed);
 }
 
 static bool wait_supported;