@@ -84,8 +84,7 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
* to spin on me->locked until the previous lock holder resets
* the me->locked using mcslock_unlock().
*/
- while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
- rte_pause();
+ rte_wait_event_32(&me->locked, INT_MAX, 0, !=, __ATOMIC_ACQUIRE);
}
/**
@@ -117,8 +116,13 @@ rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
/* More nodes added to the queue by other CPUs.
* Wait until the next pointer is set.
*/
- while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
- rte_pause();
+#ifdef RTE_ARCH_32
+ rte_wait_event_32((uint32_t *)&me->next, UINT_MAX, 0, ==,
+ __ATOMIC_RELAXED);
+#else
+ rte_wait_event_64((uint64_t *)&me->next, ULONG_MAX, 0, ==,
+ __ATOMIC_RELAXED);
+#endif
}
/* Pass lock to next waiter. */