[v9,2/5] eal: add the APIs to wait until equal

Message ID 1571913748-51735-3-git-send-email-gavin.hu@arm.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series use WFE for aarch64 |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Gavin Hu Oct. 24, 2019, 10:42 a.m. UTC
  The rte_wait_until_equal_xx APIs abstract the functionality of
'polling for a memory location to become equal to a given value'.

Add the RTE_ARM_USE_WFE configuration entry for aarch64, disabled
by default. When it is enabled, the above APIs will call WFE instruction
to save CPU cycles and power.

From a VM, when calling this API on aarch64, it may trap in and out to
release vCPUs whereas cause high exit latency. Since kernel 4.18.20 an
adaptive trapping mechanism is introduced to balance the latency and
workload.

Signed-off-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
 config/arm/meson.build                             |   1 +
 config/common_base                                 |   5 +
 .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++
 lib/librte_eal/common/include/generic/rte_pause.h  | 217 +++++++++++++++++++++
 4 files changed, 293 insertions(+)
  

Comments

Ananyev, Konstantin Oct. 24, 2019, 1:52 p.m. UTC | #1
Hi Gavin,

> The rte_wait_until_equal_xx APIs abstract the functionality of
> 'polling for a memory location to become equal to a given value'.
> 
> Add the RTE_ARM_USE_WFE configuration entry for aarch64, disabled
> by default. When it is enabled, the above APIs will call WFE instruction
> to save CPU cycles and power.
> 
> From a VM, when calling this API on aarch64, it may trap in and out to
> release vCPUs whereas cause high exit latency. Since kernel 4.18.20 an
> adaptive trapping mechanism is introduced to balance the latency and
> workload.
> 
> Signed-off-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> Reviewed-by: Steve Capper <steve.capper@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Phil Yang <phil.yang@arm.com>
> Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Jerin Jacob <jerinj@marvell.com>
> ---
>  config/arm/meson.build                             |   1 +
>  config/common_base                                 |   5 +
>  .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++
>  lib/librte_eal/common/include/generic/rte_pause.h  | 217 +++++++++++++++++++++
>  4 files changed, 293 insertions(+)
> 
> diff --git a/config/arm/meson.build b/config/arm/meson.build
> index 979018e..b4b4cac 100644
> --- a/config/arm/meson.build
> +++ b/config/arm/meson.build
> @@ -26,6 +26,7 @@ flags_common_default = [
>  	['RTE_LIBRTE_AVP_PMD', false],
> 
>  	['RTE_SCHED_VECTOR', false],
> +	['RTE_ARM_USE_WFE', false],
>  ]
> 
>  flags_generic = [
> diff --git a/config/common_base b/config/common_base
> index e843a21..c812156 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -111,6 +111,11 @@ CONFIG_RTE_MAX_VFIO_CONTAINERS=64
>  CONFIG_RTE_MALLOC_DEBUG=n
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>  CONFIG_RTE_USE_LIBBSD=n
> +# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,
> +# calling these APIs put the cores in low power state while waiting
> +# for the memory address to become equal to the expected value.
> +# This is supported only by aarch64.
> +CONFIG_RTE_ARM_USE_WFE=n
> 
>  #
>  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
> diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> index 93895d3..7bc8efb 100644
> --- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> +++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> @@ -1,5 +1,6 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
>   * Copyright(c) 2017 Cavium, Inc
> + * Copyright(c) 2019 Arm Limited
>   */
> 
>  #ifndef _RTE_PAUSE_ARM64_H_
> @@ -17,6 +18,75 @@ static inline void rte_pause(void)
>  	asm volatile("yield" ::: "memory");
>  }
> 
> +#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> +static inline void rte_sevl(void)
> +{
> +	asm volatile("sevl" : : : "memory");
> +}
> +
> +static inline void rte_wfe(void)
> +{
> +	asm volatile("wfe" : : : "memory");
> +}
> +
> +static __rte_always_inline uint16_t
> +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> +{
> +	uint16_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	if (memorder == __ATOMIC_ACQUIRE)
> +		asm volatile("ldaxrh %w[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	else if (memorder == __ATOMIC_RELAXED)
> +		asm volatile("ldxrh %w[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	return tmp;
> +}
> +
> +static __rte_always_inline uint32_t
> +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> +{
> +	uint32_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	if (memorder == __ATOMIC_ACQUIRE)
> +		asm volatile("ldaxr %w[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	else if (memorder == __ATOMIC_RELAXED)
> +		asm volatile("ldxr %w[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	return tmp;
> +}
> +
> +static __rte_always_inline uint64_t
> +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> +{
> +	uint64_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	if (memorder == __ATOMIC_ACQUIRE)
> +		asm volatile("ldaxr %x[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	else if (memorder == __ATOMIC_RELAXED)
> +		asm volatile("ldxr %x[tmp], [%x[addr]]"
> +			: [tmp] "=&r" (tmp)
> +			: [addr] "r"(addr)
> +			: "memory");
> +	return tmp;
> +}
> +#endif
> +

The function themselves seems good to me... 
But I think it was some misunderstanding about code layout/placement.
I think arm specific functionsand defines  need to be defined in arm specific headers only.
But we still can have one instance of rte_wait_until_equal_* for arm.

To be more specific, I am talking about something like that here:

lib/librte_eal/common/include/generic/rte_pause.h:
...
#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
static __rte_always_inline void					
rte_wait_until_equal_32(volatile type * addr, type expected, int memorder)							\
{				
	while (__atomic_load_n(addr, memorder) != expected) {
		rte_pause();					\
							\
}
....
#endif
...

lib/librte_eal/common/include/arch/arm/rte_pause_64.h:

...
#ifdef RTE_ARM_USE_WFE 
#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
#endif
#include "generic/rte_pause.h"

...
#ifdef RTE_ARM_USE_WFE
static inline void rte_sevl(void)
{
	asm volatile("sevl" : : : "memory");
}
static inline void rte_wfe(void)
{
	asm volatile("wfe" : : : "memory");
}
#else
static inline void rte_sevl(void)
{
}
static inline void rte_wfe(void)
{
	rte_pause();
}
...

static __rte_always_inline void
rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int memorder)
{
	if (__atomic_load_ex_32(addr, memorder) != expected) {
		rte_sevl();
		do {
			rte_wfe();
		} while (__atomic_load_ex_32(addr, memorder) != expected);
	}
}

#endif


>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h
> index 52bd4db..4db44f9 100644
> --- a/lib/librte_eal/common/include/generic/rte_pause.h
> +++ b/lib/librte_eal/common/include/generic/rte_pause.h
> @@ -1,5 +1,6 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
>   * Copyright(c) 2017 Cavium, Inc
> + * Copyright(c) 2019 Arm Limited
>   */
> 
>  #ifndef _RTE_PAUSE_H_
> @@ -12,6 +13,12 @@
>   *
>   */
> 
> +#include <stdint.h>
> +#include <rte_common.h>
> +#include <rte_atomic.h>
> +#include <rte_compat.h>
> +#include <assert.h>
> +
>  /**
>   * Pause CPU execution for a short while
>   *
> @@ -20,4 +27,214 @@
>   */
>  static inline void rte_pause(void);
> 
> +static inline void rte_sevl(void);
> +static inline void rte_wfe(void);
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Atomic load from addr, it returns the 16-bit content of *addr.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param memorder
> + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> + *  These map to C++11 memory orders with the same names, see the C++11 standard
> + *  the GCC wiki on atomic synchronization for detailed definitions.
> + */
> +static __rte_always_inline uint16_t
> +__atomic_load_ex_16(volatile uint16_t *addr, int memorder);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Atomic load from addr, it returns the 32-bit content of *addr.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param memorder
> + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> + *  These map to C++11 memory orders with the same names, see the C++11 standard
> + *  the GCC wiki on atomic synchronization for detailed definitions.
> + */
> +static __rte_always_inline uint32_t
> +__atomic_load_ex_32(volatile uint32_t *addr, int memorder);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Atomic load from addr, it returns the 64-bit content of *addr.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param memorder
> + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> + *  These map to C++11 memory orders with the same names, see the C++11 standard
> + *  the GCC wiki on atomic synchronization for detailed definitions.
> + */
> +static __rte_always_inline uint64_t
> +__atomic_load_ex_64(volatile uint64_t *addr, int memorder);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Wait for *addr to be updated with a 16-bit expected value, with a relaxed
> + * memory ordering model meaning the loads around this API can be reordered.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param expected
> + *  A 16-bit expected value to be in the memory location.
> + * @param memorder
> + *  Two different memory orders that can be specified:
> + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> + *  C++11 memory orders with the same names, see the C++11 standard or
> + *  the GCC wiki on atomic synchronization for detailed definition.
> + */
> +__rte_experimental
> +static __rte_always_inline void
> +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> +int memorder);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Wait for *addr to be updated with a 32-bit expected value, with a relaxed
> + * memory ordering model meaning the loads around this API can be reordered.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param expected
> + *  A 32-bit expected value to be in the memory location.
> + * @param memorder
> + *  Two different memory orders that can be specified:
> + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> + *  C++11 memory orders with the same names, see the C++11 standard or
> + *  the GCC wiki on atomic synchronization for detailed definition.
> + */
> +__rte_experimental
> +static __rte_always_inline void
> +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> +int memorder);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Wait for *addr to be updated with a 64-bit expected value, with a relaxed
> + * memory ordering model meaning the loads around this API can be reordered.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param expected
> + *  A 64-bit expected value to be in the memory location.
> + * @param memorder
> + *  Two different memory orders that can be specified:
> + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> + *  C++11 memory orders with the same names, see the C++11 standard or
> + *  the GCC wiki on atomic synchronization for detailed definition.
> + */
> +__rte_experimental
> +static __rte_always_inline void
> +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> +int memorder);
> +
> +#ifdef RTE_ARM_USE_WFE
> +#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> +#endif
> +
> +#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> +static inline void rte_sevl(void)
> +{
> +}
> +
> +static inline void rte_wfe(void)
> +{
> +	rte_pause();
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + *
> + * Atomic load from addr, it returns the 16-bit content of *addr.
> + *
> + * @param addr
> + *  A pointer to the memory location.
> + * @param memorder
> + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> + *  These map to C++11 memory orders with the same names, see the C++11 standard
> + *  the GCC wiki on atomic synchronization for detailed definitions.
> + */
> +static __rte_always_inline uint16_t
> +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> +{
> +	uint16_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	tmp = __atomic_load_n(addr, memorder);
> +	return tmp;
> +}
> +
> +static __rte_always_inline uint32_t
> +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> +{
> +	uint32_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	tmp = __atomic_load_n(addr, memorder);
> +	return tmp;
> +}
> +
> +static __rte_always_inline uint64_t
> +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> +{
> +	uint64_t tmp;
> +	assert((memorder == __ATOMIC_ACQUIRE)
> +			|| (memorder == __ATOMIC_RELAXED));
> +	tmp = __atomic_load_n(addr, memorder);
> +	return tmp;
> +}
> +
> +static __rte_always_inline void
> +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> +int memorder)
> +{
> +	if (__atomic_load_n(addr, memorder) != expected) {
> +		rte_sevl();
> +		do {
> +			rte_wfe();
> +		} while (__atomic_load_ex_16(addr, memorder) != expected);
> +	}
> +}
> +
> +static __rte_always_inline void
> +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> +int memorder)
> +{
> +	if (__atomic_load_ex_32(addr, memorder) != expected) {
> +		rte_sevl();
> +		do {
> +			rte_wfe();
> +		} while (__atomic_load_ex_32(addr, memorder) != expected);
> +	}
> +}
> +
> +static __rte_always_inline void
> +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> +int memorder)
> +{
> +	if (__atomic_load_ex_64(addr, memorder) != expected) {
> +		rte_sevl();
> +		do {
> +			rte_wfe();
> +		} while (__atomic_load_ex_64(addr, memorder) != expected);
> +	}
> +}
> +#endif
> +
>  #endif /* _RTE_PAUSE_H_ */
> --
> 2.7.4
  
Ananyev, Konstantin Oct. 24, 2019, 1:57 p.m. UTC | #2
> 
> Hi Gavin,
> 
> > The rte_wait_until_equal_xx APIs abstract the functionality of
> > 'polling for a memory location to become equal to a given value'.
> >
> > Add the RTE_ARM_USE_WFE configuration entry for aarch64, disabled
> > by default. When it is enabled, the above APIs will call WFE instruction
> > to save CPU cycles and power.
> >
> > From a VM, when calling this API on aarch64, it may trap in and out to
> > release vCPUs whereas cause high exit latency. Since kernel 4.18.20 an
> > adaptive trapping mechanism is introduced to balance the latency and
> > workload.
> >
> > Signed-off-by: Gavin Hu <gavin.hu@arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > Reviewed-by: Steve Capper <steve.capper@arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Phil Yang <phil.yang@arm.com>
> > Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> > Acked-by: Jerin Jacob <jerinj@marvell.com>
> > ---
> >  config/arm/meson.build                             |   1 +
> >  config/common_base                                 |   5 +
> >  .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++
> >  lib/librte_eal/common/include/generic/rte_pause.h  | 217 +++++++++++++++++++++
> >  4 files changed, 293 insertions(+)
> >
> > diff --git a/config/arm/meson.build b/config/arm/meson.build
> > index 979018e..b4b4cac 100644
> > --- a/config/arm/meson.build
> > +++ b/config/arm/meson.build
> > @@ -26,6 +26,7 @@ flags_common_default = [
> >  	['RTE_LIBRTE_AVP_PMD', false],
> >
> >  	['RTE_SCHED_VECTOR', false],
> > +	['RTE_ARM_USE_WFE', false],
> >  ]
> >
> >  flags_generic = [
> > diff --git a/config/common_base b/config/common_base
> > index e843a21..c812156 100644
> > --- a/config/common_base
> > +++ b/config/common_base
> > @@ -111,6 +111,11 @@ CONFIG_RTE_MAX_VFIO_CONTAINERS=64
> >  CONFIG_RTE_MALLOC_DEBUG=n
> >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> >  CONFIG_RTE_USE_LIBBSD=n
> > +# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,
> > +# calling these APIs put the cores in low power state while waiting
> > +# for the memory address to become equal to the expected value.
> > +# This is supported only by aarch64.
> > +CONFIG_RTE_ARM_USE_WFE=n
> >
> >  #
> >  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
> > diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > index 93895d3..7bc8efb 100644
> > --- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > +++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_ARM64_H_
> > @@ -17,6 +18,75 @@ static inline void rte_pause(void)
> >  	asm volatile("yield" ::: "memory");
> >  }
> >
> > +#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +	asm volatile("sevl" : : : "memory");
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	asm volatile("wfe" : : : "memory");
> > +}
> > +
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +#endif
> > +
> 
> The function themselves seems good to me...
> But I think it was some misunderstanding about code layout/placement.
> I think arm specific functionsand defines  need to be defined in arm specific headers only.
> But we still can have one instance of rte_wait_until_equal_* for arm.
> 
> To be more specific, I am talking about something like that here:
> 
> lib/librte_eal/common/include/generic/rte_pause.h:
> ...
> #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile type * addr, type expected, int memorder)							\
> {
> 	while (__atomic_load_n(addr, memorder) != expected) {
> 		rte_pause();					\
> 							\
> }
> ....
> #endif
> ...
> 
> lib/librte_eal/common/include/arch/arm/rte_pause_64.h:
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> #define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> #endif
> #include "generic/rte_pause.h"
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> static inline void rte_sevl(void)
> {
> 	asm volatile("sevl" : : : "memory");
> }
> static inline void rte_wfe(void)
> {
> 	asm volatile("wfe" : : : "memory");
> }
> #else
> static inline void rte_sevl(void)
> {
> }
> static inline void rte_wfe(void)
> {
> 	rte_pause();
> }
> ...
> 
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int memorder)
> {
> 	if (__atomic_load_ex_32(addr, memorder) != expected) {
> 		rte_sevl();
> 		do {
> 			rte_wfe();
> 		} while (__atomic_load_ex_32(addr, memorder) != expected);
> 	}
> }

One more nit (nearly forgot): I think it is better to have rte_ (or __rte__) prefix for all
functions defined in public files, so: __rte_atomic_load_ex_32() or just rte_atomic_load_ex_32().

> 
> #endif
> 
> 
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h
> > index 52bd4db..4db44f9 100644
> > --- a/lib/librte_eal/common/include/generic/rte_pause.h
> > +++ b/lib/librte_eal/common/include/generic/rte_pause.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_H_
> > @@ -12,6 +13,12 @@
> >   *
> >   */
> >
> > +#include <stdint.h>
> > +#include <rte_common.h>
> > +#include <rte_atomic.h>
> > +#include <rte_compat.h>
> > +#include <assert.h>
> > +
> >  /**
> >   * Pause CPU execution for a short while
> >   *
> > @@ -20,4 +27,214 @@
> >   */
> >  static inline void rte_pause(void);
> >
> > +static inline void rte_sevl(void);
> > +static inline void rte_wfe(void);
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Atomic load from addr, it returns the 32-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Atomic load from addr, it returns the 64-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Wait for *addr to be updated with a 16-bit expected value, with a relaxed
> > + * memory ordering model meaning the loads around this API can be reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 16-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Wait for *addr to be updated with a 32-bit expected value, with a relaxed
> > + * memory ordering model meaning the loads around this API can be reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 32-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Wait for *addr to be updated with a 64-bit expected value, with a relaxed
> > + * memory ordering model meaning the loads around this API can be reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 64-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder);
> > +
> > +#ifdef RTE_ARM_USE_WFE
> > +#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +#endif
> > +
> > +#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	rte_pause();
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_n(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_16(addr, memorder) != expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_32(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_32(addr, memorder) != expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_64(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_64(addr, memorder) != expected);
> > +	}
> > +}
> > +#endif
> > +
> >  #endif /* _RTE_PAUSE_H_ */
> > --
> > 2.7.4
  
Gavin Hu Oct. 24, 2019, 5 p.m. UTC | #3
Hi Konstantin,

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Thursday, October 24, 2019 9:52 PM
> To: Gavin Hu (Arm Technology China) <Gavin.Hu@arm.com>;
> dev@dpdk.org
> Cc: nd <nd@arm.com>; david.marchand@redhat.com;
> thomas@monjalon.net; stephen@networkplumber.org;
> hemant.agrawal@nxp.com; jerinj@marvell.com;
> pbhagavatula@marvell.com; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; Ruifeng Wang (Arm Technology China)
> <Ruifeng.Wang@arm.com>; Phil Yang (Arm Technology China)
> <Phil.Yang@arm.com>; Steve Capper <Steve.Capper@arm.com>
> Subject: RE: [PATCH v9 2/5] eal: add the APIs to wait until equal
> 
> Hi Gavin,
> 
> > The rte_wait_until_equal_xx APIs abstract the functionality of
> > 'polling for a memory location to become equal to a given value'.
> >
> > Add the RTE_ARM_USE_WFE configuration entry for aarch64, disabled
> > by default. When it is enabled, the above APIs will call WFE instruction
> > to save CPU cycles and power.
> >
> > From a VM, when calling this API on aarch64, it may trap in and out to
> > release vCPUs whereas cause high exit latency. Since kernel 4.18.20 an
> > adaptive trapping mechanism is introduced to balance the latency and
> > workload.
> >
> > Signed-off-by: Gavin Hu <gavin.hu@arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > Reviewed-by: Steve Capper <steve.capper@arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Phil Yang <phil.yang@arm.com>
> > Acked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> > Acked-by: Jerin Jacob <jerinj@marvell.com>
> > ---
> >  config/arm/meson.build                             |   1 +
> >  config/common_base                                 |   5 +
> >  .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++
> >  lib/librte_eal/common/include/generic/rte_pause.h  | 217
> +++++++++++++++++++++
> >  4 files changed, 293 insertions(+)
> >
> > diff --git a/config/arm/meson.build b/config/arm/meson.build
> > index 979018e..b4b4cac 100644
> > --- a/config/arm/meson.build
> > +++ b/config/arm/meson.build
> > @@ -26,6 +26,7 @@ flags_common_default = [
> >  	['RTE_LIBRTE_AVP_PMD', false],
> >
> >  	['RTE_SCHED_VECTOR', false],
> > +	['RTE_ARM_USE_WFE', false],
> >  ]
> >
> >  flags_generic = [
> > diff --git a/config/common_base b/config/common_base
> > index e843a21..c812156 100644
> > --- a/config/common_base
> > +++ b/config/common_base
> > @@ -111,6 +111,11 @@ CONFIG_RTE_MAX_VFIO_CONTAINERS=64
> >  CONFIG_RTE_MALLOC_DEBUG=n
> >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> >  CONFIG_RTE_USE_LIBBSD=n
> > +# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,
> > +# calling these APIs put the cores in low power state while waiting
> > +# for the memory address to become equal to the expected value.
> > +# This is supported only by aarch64.
> > +CONFIG_RTE_ARM_USE_WFE=n
> >
> >  #
> >  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power
> testing.
> > diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > index 93895d3..7bc8efb 100644
> > --- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > +++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_ARM64_H_
> > @@ -17,6 +18,75 @@ static inline void rte_pause(void)
> >  	asm volatile("yield" ::: "memory");
> >  }
> >
> > +#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +	asm volatile("sevl" : : : "memory");
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	asm volatile("wfe" : : : "memory");
> > +}
> > +
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +#endif
> > +
> 
> The function themselves seems good to me...
> But I think it was some misunderstanding about code layout/placement.
> I think arm specific functionsand defines  need to be defined in arm specific
> headers only.
> But we still can have one instance of rte_wait_until_equal_* for arm.
I will move that part to arm specific headers. 
/Gavin
> 
> To be more specific, I am talking about something like that here:
> 
> lib/librte_eal/common/include/generic/rte_pause.h:
> ...
> #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile type * addr, type expected, int memorder)
> 							\
> {
> 	while (__atomic_load_n(addr, memorder) != expected) {
> 		rte_pause();					\
> 							\
> }
> ....
> #endif
> ...
> 
> lib/librte_eal/common/include/arch/arm/rte_pause_64.h:
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> #define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> #endif
> #include "generic/rte_pause.h"
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> static inline void rte_sevl(void)
> {
> 	asm volatile("sevl" : : : "memory");
> }
> static inline void rte_wfe(void)
> {
> 	asm volatile("wfe" : : : "memory");
> }
> #else
> static inline void rte_sevl(void)
> {
> }
> static inline void rte_wfe(void)
> {
> 	rte_pause();
> }
Should these arm specific APIs, including rte_load_ex_xxx APIs, be added the doxygen comments? 
These APIs are arm specific, not intended to expose, but they are in the public files(arm specific headers be considered public?) 
/Gavin
> ...
> 
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int
> memorder)
> {
> 	if (__atomic_load_ex_32(addr, memorder) != expected) {
> 		rte_sevl();
> 		do {
> 			rte_wfe();
> 		} while (__atomic_load_ex_32(addr, memorder) !=
> expected);
> 	}
> }
> 
> #endif
> 
> 
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/librte_eal/common/include/generic/rte_pause.h
> b/lib/librte_eal/common/include/generic/rte_pause.h
> > index 52bd4db..4db44f9 100644
> > --- a/lib/librte_eal/common/include/generic/rte_pause.h
> > +++ b/lib/librte_eal/common/include/generic/rte_pause.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_H_
> > @@ -12,6 +13,12 @@
> >   *
> >   */
> >
> > +#include <stdint.h>
> > +#include <rte_common.h>
> > +#include <rte_atomic.h>
> > +#include <rte_compat.h>
> > +#include <assert.h>
> > +
> >  /**
> >   * Pause CPU execution for a short while
> >   *
> > @@ -20,4 +27,214 @@
> >   */
> >  static inline void rte_pause(void);
> >
> > +static inline void rte_sevl(void);
> > +static inline void rte_wfe(void);
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 32-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 64-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 16-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 16-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 32-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 32-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 64-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 64-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder);
> > +
> > +#ifdef RTE_ARM_USE_WFE
> > +#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +#endif
> > +
> > +#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	rte_pause();
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_n(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_16(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_32(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_32(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_64(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_64(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +#endif
> > +
> >  #endif /* _RTE_PAUSE_H_ */
> > --
> > 2.7.4
  

Patch

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 979018e..b4b4cac 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -26,6 +26,7 @@  flags_common_default = [
 	['RTE_LIBRTE_AVP_PMD', false],
 
 	['RTE_SCHED_VECTOR', false],
+	['RTE_ARM_USE_WFE', false],
 ]
 
 flags_generic = [
diff --git a/config/common_base b/config/common_base
index e843a21..c812156 100644
--- a/config/common_base
+++ b/config/common_base
@@ -111,6 +111,11 @@  CONFIG_RTE_MAX_VFIO_CONTAINERS=64
 CONFIG_RTE_MALLOC_DEBUG=n
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 CONFIG_RTE_USE_LIBBSD=n
+# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,
+# calling these APIs put the cores in low power state while waiting
+# for the memory address to become equal to the expected value.
+# This is supported only by aarch64.
+CONFIG_RTE_ARM_USE_WFE=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
index 93895d3..7bc8efb 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
@@ -1,5 +1,6 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
  */
 
 #ifndef _RTE_PAUSE_ARM64_H_
@@ -17,6 +18,75 @@  static inline void rte_pause(void)
 	asm volatile("yield" ::: "memory");
 }
 
+#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+static inline void rte_sevl(void)
+{
+	asm volatile("sevl" : : : "memory");
+}
+
+static inline void rte_wfe(void)
+{
+	asm volatile("wfe" : : : "memory");
+}
+
+static __rte_always_inline uint16_t
+__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
+{
+	uint16_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	if (memorder == __ATOMIC_ACQUIRE)
+		asm volatile("ldaxrh %w[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	else if (memorder == __ATOMIC_RELAXED)
+		asm volatile("ldxrh %w[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	return tmp;
+}
+
+static __rte_always_inline uint32_t
+__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
+{
+	uint32_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	if (memorder == __ATOMIC_ACQUIRE)
+		asm volatile("ldaxr %w[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	else if (memorder == __ATOMIC_RELAXED)
+		asm volatile("ldxr %w[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	return tmp;
+}
+
+static __rte_always_inline uint64_t
+__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
+{
+	uint64_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	if (memorder == __ATOMIC_ACQUIRE)
+		asm volatile("ldaxr %x[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	else if (memorder == __ATOMIC_RELAXED)
+		asm volatile("ldxr %x[tmp], [%x[addr]]"
+			: [tmp] "=&r" (tmp)
+			: [addr] "r"(addr)
+			: "memory");
+	return tmp;
+}
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h
index 52bd4db..4db44f9 100644
--- a/lib/librte_eal/common/include/generic/rte_pause.h
+++ b/lib/librte_eal/common/include/generic/rte_pause.h
@@ -1,5 +1,6 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
  */
 
 #ifndef _RTE_PAUSE_H_
@@ -12,6 +13,12 @@ 
  *
  */
 
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+#include <rte_compat.h>
+#include <assert.h>
+
 /**
  * Pause CPU execution for a short while
  *
@@ -20,4 +27,214 @@ 
  */
 static inline void rte_pause(void);
 
+static inline void rte_sevl(void);
+static inline void rte_wfe(void);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Atomic load from addr, it returns the 16-bit content of *addr.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param memorder
+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
+ *  These map to C++11 memory orders with the same names, see the C++11 standard
+ *  the GCC wiki on atomic synchronization for detailed definitions.
+ */
+static __rte_always_inline uint16_t
+__atomic_load_ex_16(volatile uint16_t *addr, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Atomic load from addr, it returns the 32-bit content of *addr.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param memorder
+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
+ *  These map to C++11 memory orders with the same names, see the C++11 standard
+ *  the GCC wiki on atomic synchronization for detailed definitions.
+ */
+static __rte_always_inline uint32_t
+__atomic_load_ex_32(volatile uint32_t *addr, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Atomic load from addr, it returns the 64-bit content of *addr.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param memorder
+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
+ *  These map to C++11 memory orders with the same names, see the C++11 standard
+ *  the GCC wiki on atomic synchronization for detailed definitions.
+ */
+static __rte_always_inline uint64_t
+__atomic_load_ex_64(volatile uint64_t *addr, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 16-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param expected
+ *  A 16-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 32-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param expected
+ *  A 32-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Wait for *addr to be updated with a 64-bit expected value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param expected
+ *  A 64-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+int memorder);
+
+#ifdef RTE_ARM_USE_WFE
+#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+#endif
+
+#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+static inline void rte_sevl(void)
+{
+}
+
+static inline void rte_wfe(void)
+{
+	rte_pause();
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Atomic load from addr, it returns the 16-bit content of *addr.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param memorder
+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.
+ *  These map to C++11 memory orders with the same names, see the C++11 standard
+ *  the GCC wiki on atomic synchronization for detailed definitions.
+ */
+static __rte_always_inline uint16_t
+__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
+{
+	uint16_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	tmp = __atomic_load_n(addr, memorder);
+	return tmp;
+}
+
+static __rte_always_inline uint32_t
+__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
+{
+	uint32_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	tmp = __atomic_load_n(addr, memorder);
+	return tmp;
+}
+
+static __rte_always_inline uint64_t
+__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
+{
+	uint64_t tmp;
+	assert((memorder == __ATOMIC_ACQUIRE)
+			|| (memorder == __ATOMIC_RELAXED));
+	tmp = __atomic_load_n(addr, memorder);
+	return tmp;
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+int memorder)
+{
+	if (__atomic_load_n(addr, memorder) != expected) {
+		rte_sevl();
+		do {
+			rte_wfe();
+		} while (__atomic_load_ex_16(addr, memorder) != expected);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+int memorder)
+{
+	if (__atomic_load_ex_32(addr, memorder) != expected) {
+		rte_sevl();
+		do {
+			rte_wfe();
+		} while (__atomic_load_ex_32(addr, memorder) != expected);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+int memorder)
+{
+	if (__atomic_load_ex_64(addr, memorder) != expected) {
+		rte_sevl();
+		do {
+			rte_wfe();
+		} while (__atomic_load_ex_64(addr, memorder) != expected);
+	}
+}
+#endif
+
 #endif /* _RTE_PAUSE_H_ */