[43/46] app/test-eventdev: use rte stdatomic API

Message ID 1710967892-7046-44-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Tyler Retzlaff March 20, 2024, 8:51 p.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 app/test-eventdev/test_order_atq.c    | 4 ++--
 app/test-eventdev/test_order_common.c | 5 +++--
 app/test-eventdev/test_order_common.h | 8 ++++----
 app/test-eventdev/test_order_queue.c  | 4 ++--
 app/test-eventdev/test_perf_common.h  | 6 +++---
 5 files changed, 14 insertions(+), 13 deletions(-)
  

Patch

diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@ 
 		uint16_t event = rte_event_dequeue_burst(dev_id, port,
 					&ev, 1, 0);
 		if (!event) {
-			if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
 				break;
 			rte_pause();
 			continue;
@@ -64,7 +64,7 @@ 
 				BURST_SIZE, 0);
 
 		if (nb_rx == 0) {
-			if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
 				break;
 			rte_pause();
 			continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@ 
 		evt_err("failed to allocate t->expected_flow_seq memory");
 		goto exp_nomem;
 	}
-	__atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
 	t->err = false;
 	t->nb_pkts = opt->nb_pkts;
 	t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@ 
 
 	while (t->err == false) {
 		uint64_t new_cycles = rte_get_timer_cycles();
-		int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+		int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+		    rte_memory_order_relaxed);
 
 		if (remaining <= 0) {
 			t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 1507265..65878d1 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@  struct test_order {
 	 * The atomic_* is an expensive operation,Since it is a functional test,
 	 * We are using the atomic_ operation to reduce the code complexity.
 	 */
-	uint64_t outstand_pkts;
+	RTE_ATOMIC(uint64_t) outstand_pkts;
 	enum evt_test_result result;
 	uint32_t nb_flows;
 	uint64_t nb_pkts;
@@ -95,7 +95,7 @@  struct test_order {
 order_process_stage_1(struct test_order *const t,
 		struct rte_event *const ev, const uint32_t nb_flows,
 		uint32_t *const expected_flow_seq,
-		uint64_t *const outstand_pkts)
+		RTE_ATOMIC(uint64_t) *const outstand_pkts)
 {
 	const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
 	/* compare the seqn against expected value */
@@ -113,7 +113,7 @@  struct test_order {
 	 */
 	expected_flow_seq[flow]++;
 	rte_pktmbuf_free(ev->mbuf);
-	__atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
 }
 
 static __rte_always_inline void
@@ -132,7 +132,7 @@  struct test_order {
 	const uint8_t port = w->port_id;\
 	const uint32_t nb_flows = t->nb_flows;\
 	uint32_t *expected_flow_seq = t->expected_flow_seq;\
-	uint64_t *outstand_pkts = &t->outstand_pkts;\
+	RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
 	if (opt->verbose_level > 1)\
 		printf("%s(): lcore %d dev_id %d port=%d\n",\
 			__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@ 
 		uint16_t event = rte_event_dequeue_burst(dev_id, port,
 					&ev, 1, 0);
 		if (!event) {
-			if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
 				break;
 			rte_pause();
 			continue;
@@ -64,7 +64,7 @@ 
 				BURST_SIZE, 0);
 
 		if (nb_rx == 0) {
-			if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
 				break;
 			rte_pause();
 			continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 2b4f572..7f7c823 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@  struct perf_elt {
 	 * stored before updating the number of
 	 * processed packets for worker lcores
 	 */
-	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+	rte_atomic_thread_fence(rte_memory_order_release);
 	w->processed_pkts++;
 
 	if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@  struct perf_elt {
 	/* Release fence here ensures event_prt is stored before updating the number of processed
 	 * packets for worker lcores.
 	 */
-	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+	rte_atomic_thread_fence(rte_memory_order_release);
 	w->processed_pkts++;
 
 	if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@  struct perf_elt {
 	/* Release fence here ensures event_prt is stored before updating the number of processed
 	 * packets for worker lcores.
 	 */
-	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+	rte_atomic_thread_fence(rte_memory_order_release);
 	w->processed_pkts += vec->nb_elem;
 
 	if (enable_fwd_latency) {