[v5,1/5] eal: add new definitions for wait scheme

Message ID 20211026080204.149550-2-feifei.wang2@arm.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series add new definitions for wait scheme |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Feifei Wang Oct. 26, 2021, 8:02 a.m. UTC
  Introduce macros as generic interface for address monitoring.
For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.

Furthermore, to prevent compilation warning in arm:
----------------------------------------------
'warning: implicit declaration of function ...'
----------------------------------------------
Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.

This is because original macros are undefine at the end of the file.
If new macro 'rte_wait_event' calls them in other files, they will be
seen as 'not defined'.

Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 lib/eal/arm/include/rte_pause_64.h  | 135 ++++++++++++++++------------
 lib/eal/include/generic/rte_pause.h |  27 ++++++
 2 files changed, 105 insertions(+), 57 deletions(-)
  

Comments

Feifei Wang Oct. 26, 2021, 8:08 a.m. UTC | #1
> -----邮件原件-----
> 发件人: Feifei Wang <feifei.wang2@arm.com>
> 发送时间: Tuesday, October 26, 2021 4:02 PM
> 收件人: Ruifeng Wang <Ruifeng.Wang@arm.com>
> 抄送: dev@dpdk.org; nd <nd@arm.com>; Feifei Wang
> <Feifei.Wang2@arm.com>
> 主题: [PATCH v5 1/5] eal: add new definitions for wait scheme
> 
> Introduce macros as generic interface for address monitoring.
> For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
> and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.
> 
> Furthermore, to prevent compilation warning in arm:
> ----------------------------------------------
> 'warning: implicit declaration of function ...'
> ----------------------------------------------
> Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.
> 
> This is because original macros are undefine at the end of the file.
> If new macro 'rte_wait_event' calls them in other files, they will be seen as
> 'not defined'.
> 
> Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
>  lib/eal/arm/include/rte_pause_64.h  | 135 ++++++++++++++++------------
> lib/eal/include/generic/rte_pause.h |  27 ++++++
>  2 files changed, 105 insertions(+), 57 deletions(-)
> 
> diff --git a/lib/eal/arm/include/rte_pause_64.h
> b/lib/eal/arm/include/rte_pause_64.h
> index e87d10b8cc..1fea0dec63 100644
> --- a/lib/eal/arm/include/rte_pause_64.h
> +++ b/lib/eal/arm/include/rte_pause_64.h
> @@ -31,20 +31,12 @@ static inline void rte_pause(void)
>  /* Put processor into low power WFE(Wait For Event) state. */  #define
> __WFE() { asm volatile("wfe" : : : "memory"); }
> 
> -static __rte_always_inline void
> -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> -		int memorder)
> -{
> -	uint16_t value;
> -
> -	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> __ATOMIC_RELAXED);
> -
> -	/*
> -	 * Atomic exclusive load from addr, it returns the 16-bit content of
> -	 * *addr while making it 'monitored',when it is written by someone
> -	 * else, the 'monitored' state is cleared and a event is generated
> -	 * implicitly to exit WFE.
> -	 */
> +/*
> + * Atomic exclusive load from addr, it returns the 16-bit content of
> + * *addr while making it 'monitored', when it is written by someone
> + * else, the 'monitored' state is cleared and an event is generated
> + * implicitly to exit WFE.
> + */
>  #define __LOAD_EXC_16(src, dst, memorder) {               \
>  	if (memorder == __ATOMIC_RELAXED) {               \
>  		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \ @@ -58,6 +50,62
> @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
>  			: "memory");                      \
>  	} }
> 
> +/*
> + * Atomic exclusive load from addr, it returns the 32-bit content of
> + * *addr while making it 'monitored', when it is written by someone
> + * else, the 'monitored' state is cleared and an event is generated
> + * implicitly to exit WFE.
> + */
> +#define __LOAD_EXC_32(src, dst, memorder) {              \
> +	if (memorder == __ATOMIC_RELAXED) {              \
> +		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> +			: [tmp] "=&r" (dst)              \
> +			: [addr] "r"(src)                \
> +			: "memory");                     \
> +	} else {                                         \
> +		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> +			: [tmp] "=&r" (dst)              \
> +			: [addr] "r"(src)                \
> +			: "memory");                     \
> +	} }
> +
> +/*
> + * Atomic exclusive load from addr, it returns the 64-bit content of
> + * *addr while making it 'monitored', when it is written by someone
> + * else, the 'monitored' state is cleared and an event is generated
> + * implicitly to exit WFE.
> + */
> +#define __LOAD_EXC_64(src, dst, memorder) {              \
> +	if (memorder == __ATOMIC_RELAXED) {              \
> +		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> +			: [tmp] "=&r" (dst)              \
> +			: [addr] "r"(src)                \
> +			: "memory");                     \
> +	} else {                                         \
> +		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> +			: [tmp] "=&r" (dst)              \
> +			: [addr] "r"(src)                \
> +			: "memory");                     \
> +	} }
> +
> +#define __LOAD_EXC(src, dst, memorder, size) {          \
> +	assert(size == 16 || size == 32 || size == 64); \
> +	if (size == 16)                                 \
> +		__LOAD_EXC_16(src, dst, memorder)       \
> +	else if (size == 32)                            \
> +		__LOAD_EXC_32(src, dst, memorder)       \
> +	else if (size == 64)                            \
> +		__LOAD_EXC_64(src, dst, memorder)       \
> +}
> +
> +static __rte_always_inline void
> +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> +		int memorder)
> +{
> +	uint16_t value;
> +
> +	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> __ATOMIC_RELAXED);
> +
>  	__LOAD_EXC_16(addr, value, memorder)
>  	if (value != expected) {
>  		__SEVL()
> @@ -66,7 +114,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr,
> uint16_t expected,
>  			__LOAD_EXC_16(addr, value, memorder)
>  		} while (value != expected);
>  	}
> -#undef __LOAD_EXC_16
>  }
> 
>  static __rte_always_inline void
> @@ -77,25 +124,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> uint32_t expected,
> 
>  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> __ATOMIC_RELAXED);
> 
> -	/*
> -	 * Atomic exclusive load from addr, it returns the 32-bit content of
> -	 * *addr while making it 'monitored',when it is written by someone
> -	 * else, the 'monitored' state is cleared and a event is generated
> -	 * implicitly to exit WFE.
> -	 */
> -#define __LOAD_EXC_32(src, dst, memorder) {              \
> -	if (memorder == __ATOMIC_RELAXED) {              \
> -		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> -			: [tmp] "=&r" (dst)              \
> -			: [addr] "r"(src)                \
> -			: "memory");                     \
> -	} else {                                         \
> -		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> -			: [tmp] "=&r" (dst)              \
> -			: [addr] "r"(src)                \
> -			: "memory");                     \
> -	} }
> -
>  	__LOAD_EXC_32(addr, value, memorder)
>  	if (value != expected) {
>  		__SEVL()
> @@ -104,7 +132,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> uint32_t expected,
>  			__LOAD_EXC_32(addr, value, memorder)
>  		} while (value != expected);
>  	}
> -#undef __LOAD_EXC_32
>  }
> 
>  static __rte_always_inline void
> @@ -115,25 +142,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> uint64_t expected,
> 
>  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> __ATOMIC_RELAXED);
> 
> -	/*
> -	 * Atomic exclusive load from addr, it returns the 64-bit content of
> -	 * *addr while making it 'monitored',when it is written by someone
> -	 * else, the 'monitored' state is cleared and a event is generated
> -	 * implicitly to exit WFE.
> -	 */
> -#define __LOAD_EXC_64(src, dst, memorder) {              \
> -	if (memorder == __ATOMIC_RELAXED) {              \
> -		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> -			: [tmp] "=&r" (dst)              \
> -			: [addr] "r"(src)                \
> -			: "memory");                     \
> -	} else {                                         \
> -		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> -			: [tmp] "=&r" (dst)              \
> -			: [addr] "r"(src)                \
> -			: "memory");                     \
> -	} }
> -
>  	__LOAD_EXC_64(addr, value, memorder)
>  	if (value != expected) {
>  		__SEVL()
> @@ -143,10 +151,23 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> uint64_t expected,
>  		} while (value != expected);
>  	}
>  }
> -#undef __LOAD_EXC_64
> 
> -#undef __SEVL
> -#undef __WFE
> +#define rte_wait_event(addr, mask, cond, expected, memorder)      \
> +do {                                                              \
> +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));        \
> +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&          \
> +				memorder != __ATOMIC_RELAXED);    \
> +	uint32_t size = sizeof(*(addr)) << 3;                     \
> +	typeof(*(addr)) value = 0;                                \
> +	__LOAD_EXC((addr), value, memorder, size)                 \
> +	if ((value & (mask)) cond expected) {                     \
> +		__SEVL()                                          \
> +		do {                                              \
> +			__WFE()                                   \
> +			__LOAD_EXC((addr), value, memorder, size) \
> +		} while ((value & (mask)) cond expected);         \

Hi, Konstantin

For this patch, I cannot add '()' for expected due to patch style check will report:
-------------------------------------------------------------------------------------------------------------------
WARNING:SPACING: space prohibited between function name and open parenthesis '('
#203: FILE: lib/eal/arm/include/rte_pause_64.h:163:
+       if ((value & (mask)) cond (expected)) {                   \

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#208: FILE: lib/eal/arm/include/rte_pause_64.h:168:
+               } while ((value & (mask)) cond (expected));       \

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#246: FILE: lib/eal/include/generic/rte_pause.h:138:
+       while ((__atomic_load_n((addr), (memorder)) & mask) cond (expected)) \

total: 1 errors, 3 warnings, 211 lines checked
-------------------------------------------------------------------------------------------------------------------
So I just add '()' for 'addr' and 'mask'.
  
Ananyev, Konstantin Oct. 26, 2021, 9:46 a.m. UTC | #2
> >
> > Introduce macros as generic interface for address monitoring.
> > For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
> > and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.
> >
> > Furthermore, to prevent compilation warning in arm:
> > ----------------------------------------------
> > 'warning: implicit declaration of function ...'
> > ----------------------------------------------
> > Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.
> >
> > This is because original macros are undefine at the end of the file.
> > If new macro 'rte_wait_event' calls them in other files, they will be seen as
> > 'not defined'.
> >
> > Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > ---
> >  lib/eal/arm/include/rte_pause_64.h  | 135 ++++++++++++++++------------
> > lib/eal/include/generic/rte_pause.h |  27 ++++++
> >  2 files changed, 105 insertions(+), 57 deletions(-)
> >
> > diff --git a/lib/eal/arm/include/rte_pause_64.h
> > b/lib/eal/arm/include/rte_pause_64.h
> > index e87d10b8cc..1fea0dec63 100644
> > --- a/lib/eal/arm/include/rte_pause_64.h
> > +++ b/lib/eal/arm/include/rte_pause_64.h
> > @@ -31,20 +31,12 @@ static inline void rte_pause(void)
> >  /* Put processor into low power WFE(Wait For Event) state. */  #define
> > __WFE() { asm volatile("wfe" : : : "memory"); }
> >
> > -static __rte_always_inline void
> > -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > -		int memorder)
> > -{
> > -	uint16_t value;
> > -
> > -	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> > -
> > -	/*
> > -	 * Atomic exclusive load from addr, it returns the 16-bit content of
> > -	 * *addr while making it 'monitored',when it is written by someone
> > -	 * else, the 'monitored' state is cleared and a event is generated
> > -	 * implicitly to exit WFE.
> > -	 */
> > +/*
> > + * Atomic exclusive load from addr, it returns the 16-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and an event is generated
> > + * implicitly to exit WFE.
> > + */
> >  #define __LOAD_EXC_16(src, dst, memorder) {               \
> >  	if (memorder == __ATOMIC_RELAXED) {               \
> >  		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \ @@ -58,6 +50,62
> > @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> >  			: "memory");                      \
> >  	} }
> >
> > +/*
> > + * Atomic exclusive load from addr, it returns the 32-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and an event is generated
> > + * implicitly to exit WFE.
> > + */
> > +#define __LOAD_EXC_32(src, dst, memorder) {              \
> > +	if (memorder == __ATOMIC_RELAXED) {              \
> > +		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > +			: [tmp] "=&r" (dst)              \
> > +			: [addr] "r"(src)                \
> > +			: "memory");                     \
> > +	} else {                                         \
> > +		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > +			: [tmp] "=&r" (dst)              \
> > +			: [addr] "r"(src)                \
> > +			: "memory");                     \
> > +	} }
> > +
> > +/*
> > + * Atomic exclusive load from addr, it returns the 64-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and an event is generated
> > + * implicitly to exit WFE.
> > + */
> > +#define __LOAD_EXC_64(src, dst, memorder) {              \
> > +	if (memorder == __ATOMIC_RELAXED) {              \
> > +		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > +			: [tmp] "=&r" (dst)              \
> > +			: [addr] "r"(src)                \
> > +			: "memory");                     \
> > +	} else {                                         \
> > +		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > +			: [tmp] "=&r" (dst)              \
> > +			: [addr] "r"(src)                \
> > +			: "memory");                     \
> > +	} }
> > +
> > +#define __LOAD_EXC(src, dst, memorder, size) {          \
> > +	assert(size == 16 || size == 32 || size == 64); \
> > +	if (size == 16)                                 \
> > +		__LOAD_EXC_16(src, dst, memorder)       \
> > +	else if (size == 32)                            \
> > +		__LOAD_EXC_32(src, dst, memorder)       \
> > +	else if (size == 64)                            \
> > +		__LOAD_EXC_64(src, dst, memorder)       \
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +		int memorder)
> > +{
> > +	uint16_t value;
> > +
> > +	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> > +
> >  	__LOAD_EXC_16(addr, value, memorder)
> >  	if (value != expected) {
> >  		__SEVL()
> > @@ -66,7 +114,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr,
> > uint16_t expected,
> >  			__LOAD_EXC_16(addr, value, memorder)
> >  		} while (value != expected);
> >  	}
> > -#undef __LOAD_EXC_16
> >  }
> >
> >  static __rte_always_inline void
> > @@ -77,25 +124,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> > uint32_t expected,
> >
> >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> >
> > -	/*
> > -	 * Atomic exclusive load from addr, it returns the 32-bit content of
> > -	 * *addr while making it 'monitored',when it is written by someone
> > -	 * else, the 'monitored' state is cleared and a event is generated
> > -	 * implicitly to exit WFE.
> > -	 */
> > -#define __LOAD_EXC_32(src, dst, memorder) {              \
> > -	if (memorder == __ATOMIC_RELAXED) {              \
> > -		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > -			: [tmp] "=&r" (dst)              \
> > -			: [addr] "r"(src)                \
> > -			: "memory");                     \
> > -	} else {                                         \
> > -		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > -			: [tmp] "=&r" (dst)              \
> > -			: [addr] "r"(src)                \
> > -			: "memory");                     \
> > -	} }
> > -
> >  	__LOAD_EXC_32(addr, value, memorder)
> >  	if (value != expected) {
> >  		__SEVL()
> > @@ -104,7 +132,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> > uint32_t expected,
> >  			__LOAD_EXC_32(addr, value, memorder)
> >  		} while (value != expected);
> >  	}
> > -#undef __LOAD_EXC_32
> >  }
> >
> >  static __rte_always_inline void
> > @@ -115,25 +142,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> > uint64_t expected,
> >
> >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> >
> > -	/*
> > -	 * Atomic exclusive load from addr, it returns the 64-bit content of
> > -	 * *addr while making it 'monitored',when it is written by someone
> > -	 * else, the 'monitored' state is cleared and a event is generated
> > -	 * implicitly to exit WFE.
> > -	 */
> > -#define __LOAD_EXC_64(src, dst, memorder) {              \
> > -	if (memorder == __ATOMIC_RELAXED) {              \
> > -		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > -			: [tmp] "=&r" (dst)              \
> > -			: [addr] "r"(src)                \
> > -			: "memory");                     \
> > -	} else {                                         \
> > -		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > -			: [tmp] "=&r" (dst)              \
> > -			: [addr] "r"(src)                \
> > -			: "memory");                     \
> > -	} }
> > -
> >  	__LOAD_EXC_64(addr, value, memorder)
> >  	if (value != expected) {
> >  		__SEVL()
> > @@ -143,10 +151,23 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> > uint64_t expected,
> >  		} while (value != expected);
> >  	}
> >  }
> > -#undef __LOAD_EXC_64
> >
> > -#undef __SEVL
> > -#undef __WFE
> > +#define rte_wait_event(addr, mask, cond, expected, memorder)      \
> > +do {                                                              \
> > +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));        \
> > +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&          \
> > +				memorder != __ATOMIC_RELAXED);    \
> > +	uint32_t size = sizeof(*(addr)) << 3;                     \
> > +	typeof(*(addr)) value = 0;                                \
> > +	__LOAD_EXC((addr), value, memorder, size)                 \
> > +	if ((value & (mask)) cond expected) {                     \
> > +		__SEVL()                                          \
> > +		do {                                              \
> > +			__WFE()                                   \
> > +			__LOAD_EXC((addr), value, memorder, size) \
> > +		} while ((value & (mask)) cond expected);         \
> 
> Hi, Konstantin
> 
> For this patch, I cannot add '()' for expected due to patch style check will report:
> -------------------------------------------------------------------------------------------------------------------
> WARNING:SPACING: space prohibited between function name and open parenthesis '('
> #203: FILE: lib/eal/arm/include/rte_pause_64.h:163:
> +       if ((value & (mask)) cond (expected)) {                   \
> 
> WARNING:SPACING: space prohibited between function name and open parenthesis '('
> #208: FILE: lib/eal/arm/include/rte_pause_64.h:168:
> +               } while ((value & (mask)) cond (expected));       \
> 
> WARNING:SPACING: space prohibited between function name and open parenthesis '('
> #246: FILE: lib/eal/include/generic/rte_pause.h:138:
> +       while ((__atomic_load_n((addr), (memorder)) & mask) cond (expected)) \
> 
> total: 1 errors, 3 warnings, 211 lines checked

It is just checkpatch warnings.
Personally I's better live with checkpatch complaints then with problematic macro.

> -------------------------------------------------------------------------------------------------------------------
> So I just add '()' for 'addr' and 'mask'.
  
Ananyev, Konstantin Oct. 26, 2021, 9:59 a.m. UTC | #3
> > > Introduce macros as generic interface for address monitoring.
> > > For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
> > > and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.
> > >
> > > Furthermore, to prevent compilation warning in arm:
> > > ----------------------------------------------
> > > 'warning: implicit declaration of function ...'
> > > ----------------------------------------------
> > > Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.
> > >
> > > This is because original macros are undefine at the end of the file.
> > > If new macro 'rte_wait_event' calls them in other files, they will be seen as
> > > 'not defined'.
> > >
> > > Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> > > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > ---
> > >  lib/eal/arm/include/rte_pause_64.h  | 135 ++++++++++++++++------------
> > > lib/eal/include/generic/rte_pause.h |  27 ++++++
> > >  2 files changed, 105 insertions(+), 57 deletions(-)
> > >
> > > diff --git a/lib/eal/arm/include/rte_pause_64.h
> > > b/lib/eal/arm/include/rte_pause_64.h
> > > index e87d10b8cc..1fea0dec63 100644
> > > --- a/lib/eal/arm/include/rte_pause_64.h
> > > +++ b/lib/eal/arm/include/rte_pause_64.h
> > > @@ -31,20 +31,12 @@ static inline void rte_pause(void)
> > >  /* Put processor into low power WFE(Wait For Event) state. */  #define
> > > __WFE() { asm volatile("wfe" : : : "memory"); }
> > >
> > > -static __rte_always_inline void
> > > -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > -		int memorder)
> > > -{
> > > -	uint16_t value;
> > > -
> > > -	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > __ATOMIC_RELAXED);
> > > -
> > > -	/*
> > > -	 * Atomic exclusive load from addr, it returns the 16-bit content of
> > > -	 * *addr while making it 'monitored',when it is written by someone
> > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > -	 * implicitly to exit WFE.
> > > -	 */
> > > +/*
> > > + * Atomic exclusive load from addr, it returns the 16-bit content of
> > > + * *addr while making it 'monitored', when it is written by someone
> > > + * else, the 'monitored' state is cleared and an event is generated
> > > + * implicitly to exit WFE.
> > > + */
> > >  #define __LOAD_EXC_16(src, dst, memorder) {               \
> > >  	if (memorder == __ATOMIC_RELAXED) {               \
> > >  		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \ @@ -58,6 +50,62
> > > @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > >  			: "memory");                      \
> > >  	} }
> > >
> > > +/*
> > > + * Atomic exclusive load from addr, it returns the 32-bit content of
> > > + * *addr while making it 'monitored', when it is written by someone
> > > + * else, the 'monitored' state is cleared and an event is generated
> > > + * implicitly to exit WFE.
> > > + */
> > > +#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > +		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > +			: [tmp] "=&r" (dst)              \
> > > +			: [addr] "r"(src)                \
> > > +			: "memory");                     \
> > > +	} else {                                         \
> > > +		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > +			: [tmp] "=&r" (dst)              \
> > > +			: [addr] "r"(src)                \
> > > +			: "memory");                     \
> > > +	} }
> > > +
> > > +/*
> > > + * Atomic exclusive load from addr, it returns the 64-bit content of
> > > + * *addr while making it 'monitored', when it is written by someone
> > > + * else, the 'monitored' state is cleared and an event is generated
> > > + * implicitly to exit WFE.
> > > + */
> > > +#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > +		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > +			: [tmp] "=&r" (dst)              \
> > > +			: [addr] "r"(src)                \
> > > +			: "memory");                     \
> > > +	} else {                                         \
> > > +		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > +			: [tmp] "=&r" (dst)              \
> > > +			: [addr] "r"(src)                \
> > > +			: "memory");                     \
> > > +	} }
> > > +
> > > +#define __LOAD_EXC(src, dst, memorder, size) {          \
> > > +	assert(size == 16 || size == 32 || size == 64); \
> > > +	if (size == 16)                                 \
> > > +		__LOAD_EXC_16(src, dst, memorder)       \
> > > +	else if (size == 32)                            \
> > > +		__LOAD_EXC_32(src, dst, memorder)       \
> > > +	else if (size == 64)                            \
> > > +		__LOAD_EXC_64(src, dst, memorder)       \
> > > +}
> > > +
> > > +static __rte_always_inline void
> > > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > +		int memorder)
> > > +{
> > > +	uint16_t value;
> > > +
> > > +	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > __ATOMIC_RELAXED);
> > > +
> > >  	__LOAD_EXC_16(addr, value, memorder)
> > >  	if (value != expected) {
> > >  		__SEVL()
> > > @@ -66,7 +114,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr,
> > > uint16_t expected,
> > >  			__LOAD_EXC_16(addr, value, memorder)
> > >  		} while (value != expected);
> > >  	}
> > > -#undef __LOAD_EXC_16
> > >  }
> > >
> > >  static __rte_always_inline void
> > > @@ -77,25 +124,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> > > uint32_t expected,
> > >
> > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > __ATOMIC_RELAXED);
> > >
> > > -	/*
> > > -	 * Atomic exclusive load from addr, it returns the 32-bit content of
> > > -	 * *addr while making it 'monitored',when it is written by someone
> > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > -	 * implicitly to exit WFE.
> > > -	 */
> > > -#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > -		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > -			: [tmp] "=&r" (dst)              \
> > > -			: [addr] "r"(src)                \
> > > -			: "memory");                     \
> > > -	} else {                                         \
> > > -		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > -			: [tmp] "=&r" (dst)              \
> > > -			: [addr] "r"(src)                \
> > > -			: "memory");                     \
> > > -	} }
> > > -
> > >  	__LOAD_EXC_32(addr, value, memorder)
> > >  	if (value != expected) {
> > >  		__SEVL()
> > > @@ -104,7 +132,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> > > uint32_t expected,
> > >  			__LOAD_EXC_32(addr, value, memorder)
> > >  		} while (value != expected);
> > >  	}
> > > -#undef __LOAD_EXC_32
> > >  }
> > >
> > >  static __rte_always_inline void
> > > @@ -115,25 +142,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> > > uint64_t expected,
> > >
> > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > __ATOMIC_RELAXED);
> > >
> > > -	/*
> > > -	 * Atomic exclusive load from addr, it returns the 64-bit content of
> > > -	 * *addr while making it 'monitored',when it is written by someone
> > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > -	 * implicitly to exit WFE.
> > > -	 */
> > > -#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > -		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > -			: [tmp] "=&r" (dst)              \
> > > -			: [addr] "r"(src)                \
> > > -			: "memory");                     \
> > > -	} else {                                         \
> > > -		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > -			: [tmp] "=&r" (dst)              \
> > > -			: [addr] "r"(src)                \
> > > -			: "memory");                     \
> > > -	} }
> > > -
> > >  	__LOAD_EXC_64(addr, value, memorder)
> > >  	if (value != expected) {
> > >  		__SEVL()
> > > @@ -143,10 +151,23 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> > > uint64_t expected,
> > >  		} while (value != expected);
> > >  	}
> > >  }
> > > -#undef __LOAD_EXC_64
> > >
> > > -#undef __SEVL
> > > -#undef __WFE
> > > +#define rte_wait_event(addr, mask, cond, expected, memorder)      \
> > > +do {                                                              \
> > > +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));        \
> > > +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&          \
> > > +				memorder != __ATOMIC_RELAXED);    \
> > > +	uint32_t size = sizeof(*(addr)) << 3;                     \
> > > +	typeof(*(addr)) value = 0;                                \
> > > +	__LOAD_EXC((addr), value, memorder, size)                 \
> > > +	if ((value & (mask)) cond expected) {                     \
> > > +		__SEVL()                                          \
> > > +		do {                                              \
> > > +			__WFE()                                   \
> > > +			__LOAD_EXC((addr), value, memorder, size) \
> > > +		} while ((value & (mask)) cond expected);         \
> >
> > Hi, Konstantin
> >
> > For this patch, I cannot add '()' for expected due to patch style check will report:
> > -------------------------------------------------------------------------------------------------------------------
> > WARNING:SPACING: space prohibited between function name and open parenthesis '('
> > #203: FILE: lib/eal/arm/include/rte_pause_64.h:163:
> > +       if ((value & (mask)) cond (expected)) {                   \
> >
> > WARNING:SPACING: space prohibited between function name and open parenthesis '('
> > #208: FILE: lib/eal/arm/include/rte_pause_64.h:168:
> > +               } while ((value & (mask)) cond (expected));       \
> >
> > WARNING:SPACING: space prohibited between function name and open parenthesis '('
> > #246: FILE: lib/eal/include/generic/rte_pause.h:138:
> > +       while ((__atomic_load_n((addr), (memorder)) & mask) cond (expected)) \
> >
> > total: 1 errors, 3 warnings, 211 lines checked
> 
> It is just checkpatch warnings.
> Personally I's better live with checkpatch complaints then with problematic macro.
> 
> > -------------------------------------------------------------------------------------------------------------------
> > So I just add '()' for 'addr' and 'mask'.

I wonder can we overcome it by:
typeof(*(addr)) expected_value = (expected); \
...
if ((value & (mask)) cond expected_value) \
  ...
?
  
Feifei Wang Oct. 27, 2021, 6:56 a.m. UTC | #4
> -----邮件原件-----
> 发件人: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> 发送时间: Tuesday, October 26, 2021 5:59 PM
> 收件人: Feifei Wang <Feifei.Wang2@arm.com>
> 抄送: dev@dpdk.org; nd <nd@arm.com>; nd <nd@arm.com>
> 主题: RE: [PATCH v5 1/5] eal: add new definitions for wait scheme
> 
> 
> > > > Introduce macros as generic interface for address monitoring.
> > > > For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
> > > > and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.
> > > >
> > > > Furthermore, to prevent compilation warning in arm:
> > > > ----------------------------------------------
> > > > 'warning: implicit declaration of function ...'
> > > > ----------------------------------------------
> > > > Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and
> '__WFE'.
> > > >
> > > > This is because original macros are undefine at the end of the file.
> > > > If new macro 'rte_wait_event' calls them in other files, they will
> > > > be seen as 'not defined'.
> > > >
> > > > Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
> > > > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > > ---
> > > >  lib/eal/arm/include/rte_pause_64.h  | 135
> > > > ++++++++++++++++------------ lib/eal/include/generic/rte_pause.h |
> > > > 27 ++++++
> > > >  2 files changed, 105 insertions(+), 57 deletions(-)
> > > >
> > > > diff --git a/lib/eal/arm/include/rte_pause_64.h
> > > > b/lib/eal/arm/include/rte_pause_64.h
> > > > index e87d10b8cc..1fea0dec63 100644
> > > > --- a/lib/eal/arm/include/rte_pause_64.h
> > > > +++ b/lib/eal/arm/include/rte_pause_64.h
> > > > @@ -31,20 +31,12 @@ static inline void rte_pause(void)
> > > >  /* Put processor into low power WFE(Wait For Event) state. */
> > > > #define
> > > > __WFE() { asm volatile("wfe" : : : "memory"); }
> > > >
> > > > -static __rte_always_inline void
> > > > -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > > -		int memorder)
> > > > -{
> > > > -	uint16_t value;
> > > > -
> > > > -	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > > -
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 16-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 16-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and an event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > >  #define __LOAD_EXC_16(src, dst, memorder) {               \
> > > >  	if (memorder == __ATOMIC_RELAXED) {               \
> > > >  		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \ @@ -58,6 +50,62
> @@
> > > > rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > >  			: "memory");                      \
> > > >  	} }
> > > >
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 32-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and an event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > > +#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > > +		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} else {                                         \
> > > > +		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} }
> > > > +
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 64-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and an event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > > +#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > > +		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} else {                                         \
> > > > +		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} }
> > > > +
> > > > +#define __LOAD_EXC(src, dst, memorder, size) {          \
> > > > +	assert(size == 16 || size == 32 || size == 64); \
> > > > +	if (size == 16)                                 \
> > > > +		__LOAD_EXC_16(src, dst, memorder)       \
> > > > +	else if (size == 32)                            \
> > > > +		__LOAD_EXC_32(src, dst, memorder)       \
> > > > +	else if (size == 64)                            \
> > > > +		__LOAD_EXC_64(src, dst, memorder)       \
> > > > +}
> > > > +
> > > > +static __rte_always_inline void
> > > > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > > +		int memorder)
> > > > +{
> > > > +	uint16_t value;
> > > > +
> > > > +	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > > +
> > > >  	__LOAD_EXC_16(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -66,7 +114,6 @@ rte_wait_until_equal_16(volatile uint16_t
> > > > *addr, uint16_t expected,
> > > >  			__LOAD_EXC_16(addr, value, memorder)
> > > >  		} while (value != expected);
> > > >  	}
> > > > -#undef __LOAD_EXC_16
> > > >  }
> > > >
> > > >  static __rte_always_inline void
> > > > @@ -77,25 +124,6 @@ rte_wait_until_equal_32(volatile uint32_t
> > > > *addr, uint32_t expected,
> > > >
> > > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > >
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 32-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > -#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > > -		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} else {                                         \
> > > > -		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} }
> > > > -
> > > >  	__LOAD_EXC_32(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -104,7 +132,6 @@ rte_wait_until_equal_32(volatile uint32_t
> > > > *addr, uint32_t expected,
> > > >  			__LOAD_EXC_32(addr, value, memorder)
> > > >  		} while (value != expected);
> > > >  	}
> > > > -#undef __LOAD_EXC_32
> > > >  }
> > > >
> > > >  static __rte_always_inline void
> > > > @@ -115,25 +142,6 @@ rte_wait_until_equal_64(volatile uint64_t
> > > > *addr, uint64_t expected,
> > > >
> > > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > >
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 64-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > -#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > > -		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} else {                                         \
> > > > -		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} }
> > > > -
> > > >  	__LOAD_EXC_64(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -143,10 +151,23 @@ rte_wait_until_equal_64(volatile uint64_t
> > > > *addr, uint64_t expected,
> > > >  		} while (value != expected);
> > > >  	}
> > > >  }
> > > > -#undef __LOAD_EXC_64
> > > >
> > > > -#undef __SEVL
> > > > -#undef __WFE
> > > > +#define rte_wait_event(addr, mask, cond, expected, memorder)      \
> > > > +do {                                                              \
> > > > +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));        \
> > > > +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&          \
> > > > +				memorder != __ATOMIC_RELAXED);    \
> > > > +	uint32_t size = sizeof(*(addr)) << 3;                     \
> > > > +	typeof(*(addr)) value = 0;                                \
> > > > +	__LOAD_EXC((addr), value, memorder, size)                 \
> > > > +	if ((value & (mask)) cond expected) {                     \
> > > > +		__SEVL()                                          \
> > > > +		do {                                              \
> > > > +			__WFE()                                   \
> > > > +			__LOAD_EXC((addr), value, memorder, size) \
> > > > +		} while ((value & (mask)) cond expected);         \
> > >
> > > Hi, Konstantin
> > >
> > > For this patch, I cannot add '()' for expected due to patch style check will
> report:
> > > --------------------------------------------------------------------
> > > -----------------------------------------------
> > > WARNING:SPACING: space prohibited between function name and open
> parenthesis '('
> > > #203: FILE: lib/eal/arm/include/rte_pause_64.h:163:
> > > +       if ((value & (mask)) cond (expected)) {                   \
> > >
> > > WARNING:SPACING: space prohibited between function name and open
> parenthesis '('
> > > #208: FILE: lib/eal/arm/include/rte_pause_64.h:168:
> > > +               } while ((value & (mask)) cond (expected));       \
> > >
> > > WARNING:SPACING: space prohibited between function name and open
> parenthesis '('
> > > #246: FILE: lib/eal/include/generic/rte_pause.h:138:
> > > +       while ((__atomic_load_n((addr), (memorder)) & mask) cond
> > > + (expected)) \
> > >
> > > total: 1 errors, 3 warnings, 211 lines checked
> >
> > It is just checkpatch warnings.
> > Personally I's better live with checkpatch complaints then with problematic
> macro.
> >
> > > --------------------------------------------------------------------
> > > -----------------------------------------------
> > > So I just add '()' for 'addr' and 'mask'.
> 
> I wonder can we overcome it by:
> typeof(*(addr)) expected_value = (expected); \ ...
> if ((value & (mask)) cond expected_value) \
>   ...
> ?
It's a good comments, and I try to do with this and no check log warning happens.
Thanks.
> 
>
  

Patch

diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h
index e87d10b8cc..1fea0dec63 100644
--- a/lib/eal/arm/include/rte_pause_64.h
+++ b/lib/eal/arm/include/rte_pause_64.h
@@ -31,20 +31,12 @@  static inline void rte_pause(void)
 /* Put processor into low power WFE(Wait For Event) state. */
 #define __WFE() { asm volatile("wfe" : : : "memory"); }
 
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
-		int memorder)
-{
-	uint16_t value;
-
-	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-	/*
-	 * Atomic exclusive load from addr, it returns the 16-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
+/*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
 #define __LOAD_EXC_16(src, dst, memorder) {               \
 	if (memorder == __ATOMIC_RELAXED) {               \
 		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \
@@ -58,6 +50,62 @@  rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
 			: "memory");                      \
 	} }
 
+/*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_32(src, dst, memorder) {              \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} }
+
+/*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_64(src, dst, memorder) {              \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} }
+
+#define __LOAD_EXC(src, dst, memorder, size) {          \
+	assert(size == 16 || size == 32 || size == 64); \
+	if (size == 16)                                 \
+		__LOAD_EXC_16(src, dst, memorder)       \
+	else if (size == 32)                            \
+		__LOAD_EXC_32(src, dst, memorder)       \
+	else if (size == 64)                            \
+		__LOAD_EXC_64(src, dst, memorder)       \
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+		int memorder)
+{
+	uint16_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
 	__LOAD_EXC_16(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -66,7 +114,6 @@  rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
 			__LOAD_EXC_16(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_16
 }
 
 static __rte_always_inline void
@@ -77,25 +124,6 @@  rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
 
 	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
 
-	/*
-	 * Atomic exclusive load from addr, it returns the 32-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_32(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
-
 	__LOAD_EXC_32(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -104,7 +132,6 @@  rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
 			__LOAD_EXC_32(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_32
 }
 
 static __rte_always_inline void
@@ -115,25 +142,6 @@  rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 
 	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
 
-	/*
-	 * Atomic exclusive load from addr, it returns the 64-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_64(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
-
 	__LOAD_EXC_64(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -143,10 +151,23 @@  rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 		} while (value != expected);
 	}
 }
-#undef __LOAD_EXC_64
 
-#undef __SEVL
-#undef __WFE
+#define rte_wait_event(addr, mask, cond, expected, memorder)      \
+do {                                                              \
+	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));        \
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&          \
+				memorder != __ATOMIC_RELAXED);    \
+	uint32_t size = sizeof(*(addr)) << 3;                     \
+	typeof(*(addr)) value = 0;                                \
+	__LOAD_EXC((addr), value, memorder, size)                 \
+	if ((value & (mask)) cond expected) {                     \
+		__SEVL()                                          \
+		do {                                              \
+			__WFE()                                   \
+			__LOAD_EXC((addr), value, memorder, size) \
+		} while ((value & (mask)) cond expected);         \
+	}                                                         \
+} while (0)
 
 #endif
 
diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h
index 668ee4a184..e31a006844 100644
--- a/lib/eal/include/generic/rte_pause.h
+++ b/lib/eal/include/generic/rte_pause.h
@@ -111,6 +111,33 @@  rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 	while (__atomic_load_n(addr, memorder) != expected)
 		rte_pause();
 }
+
+/*
+ * Wait until *addr breaks the condition, with a relaxed memory
+ * ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ *  A mask of value bits in interest.
+ * @param cond
+ *  A symbol representing the condition.
+ * @param expected
+ *  An expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+#define rte_wait_event(addr, mask, cond, expected, memorder)                   \
+do {                                                                           \
+	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));                     \
+	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                       \
+				memorder != __ATOMIC_RELAXED);                 \
+	while ((__atomic_load_n((addr), (memorder)) & (mask)) cond expected)   \
+		rte_pause();                                                   \
+} while (0)
 #endif
 
 #endif /* _RTE_PAUSE_H_ */