[02/46] net/ixgbe: use rte stdatomic API

Message ID 1710967892-7046-3-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Tyler Retzlaff March 20, 2024, 8:50 p.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   |  4 ++--
 3 files changed, 11 insertions(+), 9 deletions(-)
  

Patch

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@  struct rte_ixgbe_xstats_name_off {
 	}
 
 	/* NOTE: review for potential ordering optimization */
-	__atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+	rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
 	ixgbe_parse_devargs(eth_dev->data->dev_private,
 			    pci_dev->device.devargs);
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@  static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 	}
 
 	/* NOTE: review for potential ordering optimization */
-	__atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+	rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
 	ixgbevf_parse_devargs(eth_dev->data->dev_private,
 			      pci_dev->device.devargs);
 
@@ -4203,7 +4203,7 @@  static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
 
 	/* NOTE: review for potential ordering optimization */
-	while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+	while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
 		msec_delay(1);
 		timeout--;
 
@@ -4240,7 +4240,7 @@  static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 
 	intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 	/* NOTE: review for potential ordering optimization */
-	__atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+	rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
 	return 0;
 }
 
@@ -4336,7 +4336,8 @@  static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
 			ixgbe_dev_wait_setup_link_complete(dev, 0);
 			/* NOTE: review for potential ordering optimization */
-			if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+			if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+			    rte_memory_order_seq_cst)) {
 				/* To avoid race condition between threads, set
 				 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only
 				 * when there is no link thread running.
@@ -4348,7 +4349,8 @@  static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 					PMD_DRV_LOG(ERR,
 						"Create link thread failed!");
 					/* NOTE: review for potential ordering optimization */
-					__atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+					rte_atomic_store_explicit(&ad->link_thread_running, 0,
+					    rte_memory_order_seq_cst);
 				}
 			} else {
 				PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@  struct ixgbe_adapter {
 	 */
 	uint8_t pflink_fullchk;
 	uint8_t mac_ctrl_frame_fwd;
-	bool link_thread_running;
+	RTE_ATOMIC(bool) link_thread_running;
 	rte_thread_t link_thread_tid;
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6c17d4..e7dfd6f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@ 
 		 * Use acquire fence to ensure that status_error which includes
 		 * DD bit is loaded before loading of other descriptor words.
 		 */
-		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+		rte_atomic_thread_fence(rte_memory_order_acquire);
 
 		rxd = *rxdp;
 
@@ -2114,7 +2114,7 @@ 
 		 * Use acquire fence to ensure that status_error which includes
 		 * DD bit is loaded before loading of other descriptor words.
 		 */
-		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+		rte_atomic_thread_fence(rte_memory_order_acquire);
 
 		rxd = *rxdp;