[v9,1/5] eal: add a new generic helper for wait scheme

Message ID 20211101060007.2632418-2-feifei.wang2@arm.com (mailing list archive)
State Accepted, archived
Delegated to: David Marchand
Headers
Series add new helper for wait scheme |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Feifei Wang Nov. 1, 2021, 6 a.m. UTC
  Add a new generic helper which is a macro for wait scheme.

Furthermore, to prevent compilation warning in arm:
----------------------------------------------
'warning: implicit declaration of function ...'
----------------------------------------------
Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.
And add ‘__RTE_ARM’ for these macros to fix the namespace.
This is because original macros are undefine at the end of the file.
If new macro 'rte_wait_event' calls them in other files, they will be
seen as 'not defined'.

Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
 lib/eal/arm/include/rte_pause_64.h  | 202 +++++++++++++++++-----------
 lib/eal/include/generic/rte_pause.h |  29 ++++
 2 files changed, 155 insertions(+), 76 deletions(-)
  

Patch

diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h
index e87d10b8cc..0ca03c6130 100644
--- a/lib/eal/arm/include/rte_pause_64.h
+++ b/lib/eal/arm/include/rte_pause_64.h
@@ -26,47 +26,120 @@  static inline void rte_pause(void)
 #ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
 
 /* Send an event to quit WFE. */
-#define __SEVL() { asm volatile("sevl" : : : "memory"); }
+#define __RTE_ARM_SEVL() { asm volatile("sevl" : : : "memory"); }
 
 /* Put processor into low power WFE(Wait For Event) state. */
-#define __WFE() { asm volatile("wfe" : : : "memory"); }
+#define __RTE_ARM_WFE() { asm volatile("wfe" : : : "memory"); }
 
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
-		int memorder)
-{
-	uint16_t value;
-
-	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-	/*
-	 * Atomic exclusive load from addr, it returns the 16-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_16(src, dst, memorder) {               \
+/*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_16(src, dst, memorder) {       \
 	if (memorder == __ATOMIC_RELAXED) {               \
 		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \
 			: [tmp] "=&r" (dst)               \
-			: [addr] "r"(src)                 \
+			: [addr] "r" (src)                \
 			: "memory");                      \
 	} else {                                          \
 		asm volatile("ldaxrh %w[tmp], [%x[addr]]" \
 			: [tmp] "=&r" (dst)               \
-			: [addr] "r"(src)                 \
+			: [addr] "r" (src)                \
 			: "memory");                      \
 	} }
 
-	__LOAD_EXC_16(addr, value, memorder)
+/*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_32(src, dst, memorder) {      \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r" (src)               \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r" (src)               \
+			: "memory");                     \
+	} }
+
+/*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_64(src, dst, memorder) {      \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r" (src)               \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r" (src)               \
+			: "memory");                     \
+	} }
+
+/*
+ * Atomic exclusive load from addr, it returns the 128-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_128(src, dst, memorder) {                    \
+	volatile rte_int128_t *dst_128 = (volatile rte_int128_t *)&dst; \
+	if (memorder == __ATOMIC_RELAXED) {                             \
+		asm volatile("ldxp %x[tmp0], %x[tmp1], [%x[addr]]"      \
+			: [tmp0] "=&r" (dst_128->val[0]),               \
+			  [tmp1] "=&r" (dst_128->val[1])                \
+			: [addr] "r" (src)                              \
+			: "memory");                                    \
+	} else {                                                        \
+		asm volatile("ldaxp %x[tmp0], %x[tmp1], [%x[addr]]"     \
+			: [tmp0] "=&r" (dst_128->val[0]),               \
+			  [tmp1] "=&r" (dst_128->val[1])                \
+			: [addr] "r" (src)                              \
+			: "memory");                                    \
+	} }                                                             \
+
+#define __RTE_ARM_LOAD_EXC(src, dst, memorder, size) {          \
+	RTE_BUILD_BUG_ON(size != 16 && size != 32 && size != 64 \
+		&& size != 128);                                \
+	if (size == 16)                                         \
+		__RTE_ARM_LOAD_EXC_16(src, dst, memorder)       \
+	else if (size == 32)                                    \
+		__RTE_ARM_LOAD_EXC_32(src, dst, memorder)       \
+	else if (size == 64)                                    \
+		__RTE_ARM_LOAD_EXC_64(src, dst, memorder)       \
+	else if (size == 128)                                   \
+		__RTE_ARM_LOAD_EXC_128(src, dst, memorder)      \
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+		int memorder)
+{
+	uint16_t value;
+
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&
+		memorder != __ATOMIC_RELAXED);
+
+	__RTE_ARM_LOAD_EXC_16(addr, value, memorder)
 	if (value != expected) {
-		__SEVL()
+		__RTE_ARM_SEVL()
 		do {
-			__WFE()
-			__LOAD_EXC_16(addr, value, memorder)
+			__RTE_ARM_WFE()
+			__RTE_ARM_LOAD_EXC_16(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_16
 }
 
 static __rte_always_inline void
@@ -75,36 +148,17 @@  rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
 {
 	uint32_t value;
 
-	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-	/*
-	 * Atomic exclusive load from addr, it returns the 32-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_32(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&
+		memorder != __ATOMIC_RELAXED);
 
-	__LOAD_EXC_32(addr, value, memorder)
+	__RTE_ARM_LOAD_EXC_32(addr, value, memorder)
 	if (value != expected) {
-		__SEVL()
+		__RTE_ARM_SEVL()
 		do {
-			__WFE()
-			__LOAD_EXC_32(addr, value, memorder)
+			__RTE_ARM_WFE()
+			__RTE_ARM_LOAD_EXC_32(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_32
 }
 
 static __rte_always_inline void
@@ -113,40 +167,36 @@  rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 {
 	uint64_t value;
 
-	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-	/*
-	 * Atomic exclusive load from addr, it returns the 64-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_64(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&
+		memorder != __ATOMIC_RELAXED);
 
-	__LOAD_EXC_64(addr, value, memorder)
+	__RTE_ARM_LOAD_EXC_64(addr, value, memorder)
 	if (value != expected) {
-		__SEVL()
+		__RTE_ARM_SEVL()
 		do {
-			__WFE()
-			__LOAD_EXC_64(addr, value, memorder)
+			__RTE_ARM_WFE()
+			__RTE_ARM_LOAD_EXC_64(addr, value, memorder)
 		} while (value != expected);
 	}
 }
-#undef __LOAD_EXC_64
 
-#undef __SEVL
-#undef __WFE
+#define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder)       \
+do {                                                                      \
+	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));                \
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                  \
+		memorder != __ATOMIC_RELAXED);                            \
+	const uint32_t size = sizeof(*(addr)) << 3;                       \
+	typeof(*(addr)) expected_value = (expected);                      \
+	typeof(*(addr)) value;                                            \
+	__RTE_ARM_LOAD_EXC((addr), value, memorder, size)                 \
+	if (!((value & (mask)) cond expected_value)) {                    \
+		__RTE_ARM_SEVL()                                          \
+		do {                                                      \
+			__RTE_ARM_WFE()                                   \
+			__RTE_ARM_LOAD_EXC((addr), value, memorder, size) \
+		} while (!((value & (mask)) cond expected_value));        \
+	}                                                                 \
+} while (0)
 
 #endif
 
diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h
index 668ee4a184..5894a0ad94 100644
--- a/lib/eal/include/generic/rte_pause.h
+++ b/lib/eal/include/generic/rte_pause.h
@@ -111,6 +111,35 @@  rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 	while (__atomic_load_n(addr, memorder) != expected)
 		rte_pause();
 }
+
+/*
+ * Wait until *addr & mask makes the condition true. With a relaxed memory
+ * ordering model, the loads around this helper can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ *  A mask of value bits in interest.
+ * @param cond
+ *  A symbol representing the condition.
+ * @param expected
+ *  An expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+#define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder)  \
+do {                                                                 \
+	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));           \
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&             \
+		memorder != __ATOMIC_RELAXED);                       \
+	typeof(*(addr)) expected_value = (expected);                 \
+	while (!((__atomic_load_n((addr), (memorder)) & (mask)) cond \
+		expected_value))                                     \
+		rte_pause();                                         \
+} while (0)
 #endif
 
 #endif /* _RTE_PAUSE_H_ */