[v4,24/45] event/octeontx: use rte stdatomic API

Message ID 1713568003-30453-25-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers
Series use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Tyler Retzlaff April 19, 2024, 11:06 p.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/event/octeontx/timvf_evdev.h  |  8 ++++----
 drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
 2 files changed, 22 insertions(+), 22 deletions(-)
  

Patch

diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index e7a63e4..3a2dc47 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@  enum timvf_clk_src {
 struct __rte_aligned(8) tim_mem_bucket {
 	uint64_t first_chunk;
 	union {
-		uint64_t w1;
+		RTE_ATOMIC(uint64_t) w1;
 		struct {
-			uint32_t nb_entry;
+			RTE_ATOMIC(uint32_t) nb_entry;
 			uint8_t sbt:1;
 			uint8_t hbt:1;
 			uint8_t bsk:1;
 			uint8_t rsvd:5;
-			uint8_t lock;
-			int16_t chunk_remainder;
+			RTE_ATOMIC(uint8_t) lock;
+			RTE_ATOMIC(int16_t) chunk_remainder;
 		};
 	};
 	uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@ 
 static inline int16_t
 timr_bkt_get_rem(struct tim_mem_bucket *bktp)
 {
-	return __atomic_load_n(&bktp->chunk_remainder,
-			__ATOMIC_ACQUIRE);
+	return rte_atomic_load_explicit(&bktp->chunk_remainder,
+			rte_memory_order_acquire);
 }
 
 static inline void
 timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
 {
-	__atomic_store_n(&bktp->chunk_remainder, v,
-			__ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+			rte_memory_order_release);
 }
 
 static inline void
 timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
 {
-	__atomic_fetch_sub(&bktp->chunk_remainder, v,
-			__ATOMIC_RELEASE);
+	rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+			rte_memory_order_release);
 }
 
 static inline uint8_t
@@ -47,14 +47,14 @@ 
 timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
 {
 	const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
-	return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+	return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
 }
 
 static inline uint64_t
 timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
 {
 	const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
-	return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
 }
 
 static inline uint8_t
@@ -81,34 +81,34 @@ 
 {
 	/*Clear everything except lock. */
 	const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
-	return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
 }
 
 static inline uint64_t
 timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
 {
-	return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
-			__ATOMIC_ACQ_REL);
+	return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+			rte_memory_order_acq_rel);
 }
 
 static inline uint64_t
 timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
 {
-	return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
-			__ATOMIC_RELAXED);
+	return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+			rte_memory_order_relaxed);
 }
 
 static inline uint64_t
 timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
 {
 	const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
-	return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+	return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
 }
 
 static inline void
 timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
 {
-	__atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+	rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
 }
 
 static inline uint32_t
@@ -121,13 +121,13 @@ 
 static inline void
 timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
 {
-	__atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
 }
 
 static inline void
 timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
 {
-	__atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
 }
 
 static inline uint64_t
@@ -135,7 +135,7 @@ 
 {
 	const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
 			TIM_BUCKET_W1_S_NUM_ENTRIES);
-	return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+	return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
 }
 
 static inline struct tim_mem_entry *