[v2,03/45] net/iavf: use rte stdatomic API

Message ID 1711048652-7512-4-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Tyler Retzlaff March 21, 2024, 7:16 p.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/net/iavf/iavf.h               | 16 ++++++++--------
 drivers/net/iavf/iavf_rxtx.c          |  4 ++--
 drivers/net/iavf/iavf_rxtx_vec_neon.c |  2 +-
 drivers/net/iavf/iavf_vchnl.c         | 14 +++++++-------
 4 files changed, 18 insertions(+), 18 deletions(-)
  

Patch

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 824ae4a..6b977e5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@  struct iavf_info {
 	struct virtchnl_vlan_caps vlan_v2_caps;
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
-	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
-	uint32_t pend_cmd_count;
+	volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+	RTE_ATOMIC(uint32_t) pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -456,13 +456,13 @@  struct iavf_cmd_info {
 _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 {
 	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
-	int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
-			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+	int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+			rte_memory_order_acquire, rte_memory_order_acquire);
 
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
-	__atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
 
 	return !ret;
 }
@@ -472,13 +472,13 @@  struct iavf_cmd_info {
 _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 {
 	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
-	int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
-			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+	int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+			rte_memory_order_acquire, rte_memory_order_acquire);
 
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
-	__atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
 
 	return !ret;
 }
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 0a5246d..d1d4e9f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@  struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 			s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 
 		/* This barrier is to order loads of different words in the descriptor */
-		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+		rte_atomic_thread_fence(rte_memory_order_acquire);
 
 		/* Compute how many contiguous DD bits were set */
 		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@  struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 		}
 
 		/* This barrier is to order loads of different words in the descriptor */
-		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+		rte_atomic_thread_fence(rte_memory_order_acquire);
 
 		/* Compute how many contiguous DD bits were set */
 		for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@ 
 		descs[0] =  vld1q_u64((uint64_t *)(rxdp));
 
 		/* Use acquire fence to order loads of descriptor qwords */
-		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+		rte_atomic_thread_fence(rte_memory_order_acquire);
 		/* A.2 reload qword0 to make it ordered after qword1 load */
 		descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
 		descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@  struct iavf_event_element {
 };
 
 struct iavf_event_handler {
-	uint32_t ndev;
+	RTE_ATOMIC(uint32_t) ndev;
 	rte_thread_t tid;
 	int fd[2];
 	pthread_mutex_t lock;
@@ -129,7 +129,7 @@  struct iavf_event_handler {
 {
 	struct iavf_event_handler *handler = &event_handler;
 
-	if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+	if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
 		return 0;
 #if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
 	int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@  struct iavf_event_handler {
 	int err = pipe(handler->fd);
 #endif
 	if (err != 0) {
-		__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
 		return -1;
 	}
 
@@ -146,7 +146,7 @@  struct iavf_event_handler {
 
 	if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
 				iavf_dev_event_handle, NULL)) {
-		__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
 		return -1;
 	}
 
@@ -158,7 +158,7 @@  struct iavf_event_handler {
 {
 	struct iavf_event_handler *handler = &event_handler;
 
-	if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+	if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
 		return;
 
 	int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@  struct iavf_event_handler {
 				/* read message and it's expected one */
 				if (msg_opc == vf->pend_cmd) {
 					uint32_t cmd_count =
-					__atomic_fetch_sub(&vf->pend_cmd_count,
-							1, __ATOMIC_RELAXED) - 1;
+					rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+							1, rte_memory_order_relaxed) - 1;
 					if (cmd_count == 0)
 						_notify_cmd(vf, msg_ret);
 				} else {