@@ -31,20 +31,12 @@ static inline void rte_pause(void)
/* Put processor into low power WFE(Wait For Event) state. */
#define __WFE() { asm volatile("wfe" : : : "memory"); }
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
- int memorder)
-{
- uint16_t value;
-
- assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
- /*
- * Atomic exclusive load from addr, it returns the 16-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
+/*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
#define __LOAD_EXC_16(src, dst, memorder) { \
if (memorder == __ATOMIC_RELAXED) { \
asm volatile("ldxrh %w[tmp], [%x[addr]]" \
@@ -58,6 +50,52 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
: "memory"); \
} }
+/*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_32(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+/*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_64(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+ int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
__LOAD_EXC_16(addr, value, memorder)
if (value != expected) {
__SEVL()
@@ -66,7 +104,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
__LOAD_EXC_16(addr, value, memorder)
} while (value != expected);
}
-#undef __LOAD_EXC_16
}
static __rte_always_inline void
@@ -77,25 +114,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
- /*
- * Atomic exclusive load from addr, it returns the 32-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
-#define __LOAD_EXC_32(src, dst, memorder) { \
- if (memorder == __ATOMIC_RELAXED) { \
- asm volatile("ldxr %w[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } else { \
- asm volatile("ldaxr %w[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } }
-
__LOAD_EXC_32(addr, value, memorder)
if (value != expected) {
__SEVL()
@@ -104,7 +122,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
__LOAD_EXC_32(addr, value, memorder)
} while (value != expected);
}
-#undef __LOAD_EXC_32
}
static __rte_always_inline void
@@ -115,25 +132,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
- /*
- * Atomic exclusive load from addr, it returns the 64-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
-#define __LOAD_EXC_64(src, dst, memorder) { \
- if (memorder == __ATOMIC_RELAXED) { \
- asm volatile("ldxr %x[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } else { \
- asm volatile("ldaxr %x[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } }
-
__LOAD_EXC_64(addr, value, memorder)
if (value != expected) {
__SEVL()
@@ -143,6 +141,171 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
} while (value != expected);
}
}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t expected, int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_16(addr, value, memorder)
+ if ((value & mask) != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_16(addr, value, memorder)
+ } while ((value & mask) != expected);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t expected, int memorder)
+{
+ uint32_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_32(addr, value, memorder)
+ if ((value & mask) != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_32(addr, value, memorder)
+ } while ((value & mask) != expected);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t expected, int memorder)
+{
+ uint64_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_64(addr, value, memorder)
+ if ((value & mask) != expected) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_64(addr, value, memorder)
+ } while ((value & mask) != expected);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+ int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_16(addr, value, memorder)
+ if (value == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_16(addr, value, memorder)
+ } while (value == original);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+ int memorder)
+{
+ uint32_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_32(addr, value, memorder)
+ if (value == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_32(addr, value, memorder)
+ } while (value == original);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+ int memorder)
+{
+ uint64_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_64(addr, value, memorder)
+ if (value == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_64(addr, value, memorder)
+ } while (value == original);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t original, int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_16(addr, value, memorder)
+ if ((value & mask) == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_16(addr, value, memorder)
+ } while ((value & mask) == original);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t original, int memorder)
+{
+ uint32_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_32(addr, value, memorder)
+ if ((value & mask) == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_32(addr, value, memorder)
+ } while ((value & mask) == original);
+ }
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t original, int memorder)
+{
+ uint64_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __LOAD_EXC_64(addr, value, memorder)
+ if ((value & mask) == original) {
+ __SEVL()
+ do {
+ __WFE()
+ __LOAD_EXC_64(addr, value, memorder)
+ } while ((value & mask) == original);
+ }
+}
+
+#undef __LOAD_EXC_16
+#undef __LOAD_EXC_32
#undef __LOAD_EXC_64
#undef __SEVL
@@ -81,6 +81,222 @@ static __rte_always_inline void
rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
int memorder);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 16-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ * A 16-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 32-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ * A 32-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 64-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ * A 64-bit expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 16-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param original
+ * A 16-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+ int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 32-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param original
+ * A 32-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+ int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 64-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param original
+ * A 64-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+ int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 16-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ * A 16-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t original, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 32-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ * A 32-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t original, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 64-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ * A 64-bit original value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t original, int memorder);
+
#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
static __rte_always_inline void
rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
@@ -111,6 +327,99 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
while (__atomic_load_n(addr, memorder) != expected)
rte_pause();
}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t expected, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) != expected)
+ rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t expected, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) != expected)
+ rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t expected, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) != expected)
+ rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) == original)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) == original)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+ int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while (__atomic_load_n(addr, memorder) == original)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+ uint16_t original, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) == original)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+ uint32_t original, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) == original)
+ rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+ uint64_t original, int memorder)
+{
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ while ((__atomic_load_n(addr, memorder) & mask) == original)
+ rte_pause();
+}
#endif
#endif /* _RTE_PAUSE_H_ */