[v3,13/16] eal: use previous value atomic fetch operations

Message ID 1679338836-21321-14-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Accepted, archived
Delegated to: David Marchand
Headers
Series replace __atomic operations returning new value |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tyler Retzlaff March 20, 2023, 7 p.m. UTC
  Use __atomic_fetch_{add,and,or,sub,xor} instead of
__atomic_{add,and,or,sub,xor}_fetch adding the necessary code to
allow consumption of the resulting value.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 lib/eal/include/generic/rte_rwlock.h |  8 ++++----
 lib/eal/ppc/include/rte_atomic.h     | 16 ++++++++--------
 2 files changed, 12 insertions(+), 12 deletions(-)
  

Patch

diff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h
index d45c22c..71e2d8d 100644
--- a/lib/eal/include/generic/rte_rwlock.h
+++ b/lib/eal/include/generic/rte_rwlock.h
@@ -97,8 +97,8 @@ 
 			rte_pause();
 
 		/* Try to get read lock */
-		x = __atomic_add_fetch(&rwl->cnt, RTE_RWLOCK_READ,
-				       __ATOMIC_ACQUIRE);
+		x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
+				       __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
 
 		/* If no writer, then acquire was successful */
 		if (likely(!(x & RTE_RWLOCK_MASK)))
@@ -134,8 +134,8 @@ 
 		return -EBUSY;
 
 	/* Try to get read lock */
-	x = __atomic_add_fetch(&rwl->cnt, RTE_RWLOCK_READ,
-			       __ATOMIC_ACQUIRE);
+	x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
+			       __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
 
 	/* Back out if writer raced in */
 	if (unlikely(x & RTE_RWLOCK_MASK)) {
diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h
index 663b4d3..ffabd98 100644
--- a/lib/eal/ppc/include/rte_atomic.h
+++ b/lib/eal/ppc/include/rte_atomic.h
@@ -71,12 +71,12 @@  static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
 
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-	return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;
 }
 
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-	return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;
 }
 
 static inline uint16_t
@@ -113,12 +113,12 @@  static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
 
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 {
-	return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;
 }
 
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 {
-	return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;
 }
 
 static inline uint32_t
@@ -181,23 +181,23 @@  static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 static inline int64_t
 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
 {
-	return __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE);
+	return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE) + inc;
 }
 
 static inline int64_t
 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
 {
-	return __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE);
+	return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE) - dec;
 }
 
 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
 {
-	return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;
 }
 
 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
 {
-	return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+	return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;
 }
 
 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)