[v8,3/3] spinlock: reimplement with atomic one-way barrier builtins
Checks
Commit Message
The __sync builtin based implementation generates full memory barriers
('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one way
barriers.
Here is the assembly code of __sync_compare_and_swap builtin.
__sync_bool_compare_and_swap(dst, exp, src);
0x000000000090f1b0 <+16>: e0 07 40 f9 ldr x0, [sp, #8]
0x000000000090f1b4 <+20>: e1 0f 40 79 ldrh w1, [sp, #6]
0x000000000090f1b8 <+24>: e2 0b 40 79 ldrh w2, [sp, #4]
0x000000000090f1bc <+28>: 21 3c 00 12 and w1, w1, #0xffff
0x000000000090f1c0 <+32>: 03 7c 5f 48 ldxrh w3, [x0]
0x000000000090f1c4 <+36>: 7f 00 01 6b cmp w3, w1
0x000000000090f1c8 <+40>: 61 00 00 54 b.ne 0x90f1d4
<rte_atomic16_cmpset+52> // b.any
0x000000000090f1cc <+44>: 02 fc 04 48 stlxrh w4, w2, [x0]
0x000000000090f1d0 <+48>: 84 ff ff 35 cbnz w4, 0x90f1c0
<rte_atomic16_cmpset+32>
0x000000000090f1d4 <+52>: bf 3b 03 d5 dmb ish
0x000000000090f1d8 <+56>: e0 17 9f 1a cset w0, eq // eq = none
The benchmarking results showed constant improvements on all available
platforms:
1. Cavium ThunderX2: 126% performance;
2. Hisilicon 1616: 30%;
3. Qualcomm Falkor: 13%;
4. Marvell ARMADA 8040 with A72 cores on macchiatobin: 3.7%
Here is the example test result on TX2:
$sudo ./build/app/test -l 16-27 -- i
RTE>>spinlock_autotest
*** spinlock_autotest without this patch ***
Test with lock on 12 cores...
Core [16] Cost Time = 53886 us
Core [17] Cost Time = 53605 us
Core [18] Cost Time = 53163 us
Core [19] Cost Time = 49419 us
Core [20] Cost Time = 34317 us
Core [21] Cost Time = 53408 us
Core [22] Cost Time = 53970 us
Core [23] Cost Time = 53930 us
Core [24] Cost Time = 53283 us
Core [25] Cost Time = 51504 us
Core [26] Cost Time = 50718 us
Core [27] Cost Time = 51730 us
Total Cost Time = 612933 us
*** spinlock_autotest with this patch ***
Test with lock on 12 cores...
Core [16] Cost Time = 18808 us
Core [17] Cost Time = 29497 us
Core [18] Cost Time = 29132 us
Core [19] Cost Time = 26150 us
Core [20] Cost Time = 21892 us
Core [21] Cost Time = 24377 us
Core [22] Cost Time = 27211 us
Core [23] Cost Time = 11070 us
Core [24] Cost Time = 29802 us
Core [25] Cost Time = 15793 us
Core [26] Cost Time = 7474 us
Core [27] Cost Time = 29550 us
Total Cost Time = 270756 us
In the tests on ThunderX2, with more cores contending, the performance gain
was even higher, indicating the __atomic implementation scales up better
than __sync.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <Ola.Liljedahl@arm.com>
Reviewed-by: Steve Capper <Steve.Capper@arm.com>
---
lib/librte_eal/common/include/generic/rte_spinlock.h | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
Comments
On Fri, 2019-03-08 at 15:56 +0800, Gavin Hu wrote:
> -------------------------------------------------------------------
> ---
> The __sync builtin based implementation generates full memory
> barriers
> ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate
> one way
> barriers.
>
>
> lib/librte_eal/common/include/generic/rte_spinlock.h | 18
> +++++++++++++-----
> 1 file changed, 13 insertions(+), 5 deletions(-)
>
> diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h
> b/lib/librte_eal/common/include/generic/rte_spinlock.h
> index c4c3fc3..87ae7a4 100644
> --- a/lib/librte_eal/common/include/generic/rte_spinlock.h
> +++ b/lib/librte_eal/common/include/generic/rte_spinlock.h
> @@ -61,9 +61,14 @@ rte_spinlock_lock(rte_spinlock_t *sl);
> static inline void
> rte_spinlock_lock(rte_spinlock_t *sl)
> {
> - while (__sync_lock_test_and_set(&sl->locked, 1))
> - while(sl->locked)
> + int exp = 0;
> +
> + while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
> + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
Would it be clean to use __atomic_test_and_set()
to avoid explicit exp =
0.
> + while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
> rte_pause();
> + exp = 0;
> + }
> }
> #endif
>
> @@ -80,7 +85,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl);
> static inline void
> rte_spinlock_unlock (rte_spinlock_t *sl)
> {
> - __sync_lock_release(&sl->locked);
> + __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
__atomic_clear(.., __ATOMIC_RELEASE) looks more clean to me.
> }
> #endif
>
> @@ -99,7 +104,10 @@ rte_spinlock_trylock (rte_spinlock_t *sl);
> static inline int
> rte_spinlock_trylock (rte_spinlock_t *sl)
> {
> - return __sync_lock_test_and_set(&sl->locked,1) == 0;
> + int exp = 0;
> + return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
> + 0, /* disallow spurious failure */
> + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
return (__atomic_test_and_set(.., __ATOMIC_ACQUIRE) == 0) will be more
clean version.
> }
> #endif
>
> @@ -113,7 +121,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
> */
> static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
> {
> - return sl->locked;
> + return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
Does __ATOMIC_RELAXED will be sufficient?
> }
>
> /**
> > -------------------------------------------------------------------
> > ---
> > The __sync builtin based implementation generates full memory barriers
> > ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate
> > one way barriers.
> >
> >
> > lib/librte_eal/common/include/generic/rte_spinlock.h | 18
> > +++++++++++++-----
> > 1 file changed, 13 insertions(+), 5 deletions(-)
> >
> > diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h
> > b/lib/librte_eal/common/include/generic/rte_spinlock.h
> > index c4c3fc3..87ae7a4 100644
> > --- a/lib/librte_eal/common/include/generic/rte_spinlock.h
> > +++ b/lib/librte_eal/common/include/generic/rte_spinlock.h
> > @@ -61,9 +61,14 @@ rte_spinlock_lock(rte_spinlock_t *sl); static
> > inline void rte_spinlock_lock(rte_spinlock_t *sl) {
> > - while (__sync_lock_test_and_set(&sl->locked, 1))
> > - while(sl->locked)
> > + int exp = 0;
> > +
> > + while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
> > + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
> {
>
> Would it be clean to use __atomic_test_and_set() to avoid explicit exp = 0.
We addressed it here: http://mails.dpdk.org/archives/dev/2019-January/122363.html
>
>
> > + while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
> > rte_pause();
> > + exp = 0;
> > + }
> > }
> > #endif
> >
> > @@ -80,7 +85,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl); static
> > inline void rte_spinlock_unlock (rte_spinlock_t *sl) {
> > - __sync_lock_release(&sl->locked);
> > + __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
>
> __atomic_clear(.., __ATOMIC_RELEASE) looks more clean to me.
This needs the operand to be of type bool.
>
> > }
> > #endif
> >
> > @@ -99,7 +104,10 @@ rte_spinlock_trylock (rte_spinlock_t *sl); static
> > inline int rte_spinlock_trylock (rte_spinlock_t *sl) {
> > - return __sync_lock_test_and_set(&sl->locked,1) == 0;
> > + int exp = 0;
> > + return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
> > + 0, /* disallow spurious failure */
> > + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
>
> return (__atomic_test_and_set(.., __ATOMIC_ACQUIRE) == 0) will be more
> clean version.
>
> > }
> > #endif
> >
> > @@ -113,7 +121,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
> > */
> > static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) {
> > - return sl->locked;
> > + return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
>
> Does __ATOMIC_RELAXED will be sufficient?
This is also addressed here: http://mails.dpdk.org/archives/dev/2019-January/122363.html
I think you approved the patch here: http://mails.dpdk.org/archives/dev/2019-January/123238.html
I think this patch just needs your reviewed-by tag :)
>
>
> > }
> >
> > /**
> -----Original Message-----
> From: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>
> Sent: Thursday, March 14, 2019 8:31 AM
> To: jerinj@marvell.com; Gavin Hu (Arm Technology China)
> <Gavin.Hu@arm.com>; dev@dpdk.org
> Cc: i.maximets@samsung.com; chaozhu@linux.vnet.ibm.com; nd
> <nd@arm.com>; Nipun.gupta@nxp.com; thomas@monjalon.net;
> hemant.agrawal@nxp.com; stable@dpdk.org; nd <nd@arm.com>
> Subject: RE: [EXT] [PATCH v8 3/3] spinlock: reimplement with atomic one-
> way barrier builtins
>
> > > -------------------------------------------------------------------
> > > ---
> > > The __sync builtin based implementation generates full memory barriers
> > > ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate
> > > one way barriers.
> > >
> > >
> > > lib/librte_eal/common/include/generic/rte_spinlock.h | 18
> > > +++++++++++++-----
> > > 1 file changed, 13 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h
> > > b/lib/librte_eal/common/include/generic/rte_spinlock.h
> > > index c4c3fc3..87ae7a4 100644
> > > --- a/lib/librte_eal/common/include/generic/rte_spinlock.h
> > > +++ b/lib/librte_eal/common/include/generic/rte_spinlock.h
> > > @@ -61,9 +61,14 @@ rte_spinlock_lock(rte_spinlock_t *sl); static
> > > inline void rte_spinlock_lock(rte_spinlock_t *sl) {
> > > - while (__sync_lock_test_and_set(&sl->locked, 1))
> > > - while(sl->locked)
> > > + int exp = 0;
> > > +
> > > + while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
> > > + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
> > {
> >
> > Would it be clean to use __atomic_test_and_set() to avoid explicit exp = 0.
> We addressed it here: http://mails.dpdk.org/archives/dev/2019-
> January/122363.html
__atomic_test_and_set causes 10 times of performance degradation in our
micro benchmarking on ThunderX2. Here it is explained why:
http://mails.dpdk.org/archives/dev/2019-January/123340.html
>
> >
> >
> > > + while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
> > > rte_pause();
> > > + exp = 0;
> > > + }
> > > }
> > > #endif
> > >
> > > @@ -80,7 +85,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl); static
> > > inline void rte_spinlock_unlock (rte_spinlock_t *sl) {
> > > - __sync_lock_release(&sl->locked);
> > > + __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
> >
> > __atomic_clear(.., __ATOMIC_RELEASE) looks more clean to me.
> This needs the operand to be of type bool.
>
> >
> > > }
> > > #endif
> > >
> > > @@ -99,7 +104,10 @@ rte_spinlock_trylock (rte_spinlock_t *sl); static
> > > inline int rte_spinlock_trylock (rte_spinlock_t *sl) {
> > > - return __sync_lock_test_and_set(&sl->locked,1) == 0;
> > > + int exp = 0;
> > > + return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
> > > + 0, /* disallow spurious failure */
> > > + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
> >
> > return (__atomic_test_and_set(.., __ATOMIC_ACQUIRE) == 0) will be
> more
> > clean version.
> >
> > > }
> > > #endif
> > >
> > > @@ -113,7 +121,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
> > > */
> > > static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) {
> > > - return sl->locked;
> > > + return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
> >
> > Does __ATOMIC_RELAXED will be sufficient?
> This is also addressed here: http://mails.dpdk.org/archives/dev/2019-
> January/122363.html
>
> I think you approved the patch here:
> http://mails.dpdk.org/archives/dev/2019-January/123238.html
> I think this patch just needs your reviewed-by tag :)
>
> >
> >
> > > }
> > >
> > > /**
On Fri, Mar 08, 2019 at 03:56:37PM +0800, Gavin Hu wrote:
> External Email
>
> ----------------------------------------------------------------------
> The __sync builtin based implementation generates full memory barriers
> ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one way
> barriers.
>
> Here is the assembly code of __sync_compare_and_swap builtin.
> __sync_bool_compare_and_swap(dst, exp, src);
> 0x000000000090f1b0 <+16>: e0 07 40 f9 ldr x0, [sp, #8]
> 0x000000000090f1b4 <+20>: e1 0f 40 79 ldrh w1, [sp, #6]
> 0x000000000090f1b8 <+24>: e2 0b 40 79 ldrh w2, [sp, #4]
> 0x000000000090f1bc <+28>: 21 3c 00 12 and w1, w1, #0xffff
> 0x000000000090f1c0 <+32>: 03 7c 5f 48 ldxrh w3, [x0]
> 0x000000000090f1c4 <+36>: 7f 00 01 6b cmp w3, w1
> 0x000000000090f1c8 <+40>: 61 00 00 54 b.ne 0x90f1d4
> <rte_atomic16_cmpset+52> // b.any
> 0x000000000090f1cc <+44>: 02 fc 04 48 stlxrh w4, w2, [x0]
> 0x000000000090f1d0 <+48>: 84 ff ff 35 cbnz w4, 0x90f1c0
> <rte_atomic16_cmpset+32>
> 0x000000000090f1d4 <+52>: bf 3b 03 d5 dmb ish
> 0x000000000090f1d8 <+56>: e0 17 9f 1a cset w0, eq // eq = none
>
> The benchmarking results showed constant improvements on all available
> platforms:
> 1. Cavium ThunderX2: 126% performance;
> 2. Hisilicon 1616: 30%;
> 3. Qualcomm Falkor: 13%;
> 4. Marvell ARMADA 8040 with A72 cores on macchiatobin: 3.7%
>
> Here is the example test result on TX2:
> $sudo ./build/app/test -l 16-27 -- i
> RTE>>spinlock_autotest
>
> *** spinlock_autotest without this patch ***
> Test with lock on 12 cores...
> Core [16] Cost Time = 53886 us
> Core [17] Cost Time = 53605 us
> Core [18] Cost Time = 53163 us
> Core [19] Cost Time = 49419 us
> Core [20] Cost Time = 34317 us
> Core [21] Cost Time = 53408 us
> Core [22] Cost Time = 53970 us
> Core [23] Cost Time = 53930 us
> Core [24] Cost Time = 53283 us
> Core [25] Cost Time = 51504 us
> Core [26] Cost Time = 50718 us
> Core [27] Cost Time = 51730 us
> Total Cost Time = 612933 us
>
> *** spinlock_autotest with this patch ***
> Test with lock on 12 cores...
> Core [16] Cost Time = 18808 us
> Core [17] Cost Time = 29497 us
> Core [18] Cost Time = 29132 us
> Core [19] Cost Time = 26150 us
> Core [20] Cost Time = 21892 us
> Core [21] Cost Time = 24377 us
> Core [22] Cost Time = 27211 us
> Core [23] Cost Time = 11070 us
> Core [24] Cost Time = 29802 us
> Core [25] Cost Time = 15793 us
> Core [26] Cost Time = 7474 us
> Core [27] Cost Time = 29550 us
> Total Cost Time = 270756 us
>
> In the tests on ThunderX2, with more cores contending, the performance gain
> was even higher, indicating the __atomic implementation scales up better
> than __sync.
>
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
>
> Signed-off-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Phil Yang <phil.yang@arm.com>
> Reviewed-by: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <Ola.Liljedahl@arm.com>
> Reviewed-by: Steve Capper <Steve.Capper@arm.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
@@ -61,9 +61,14 @@ rte_spinlock_lock(rte_spinlock_t *sl);
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
{
- while (__sync_lock_test_and_set(&sl->locked, 1))
- while(sl->locked)
+ int exp = 0;
+
+ while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
rte_pause();
+ exp = 0;
+ }
}
#endif
@@ -80,7 +85,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl);
static inline void
rte_spinlock_unlock (rte_spinlock_t *sl)
{
- __sync_lock_release(&sl->locked);
+ __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
}
#endif
@@ -99,7 +104,10 @@ rte_spinlock_trylock (rte_spinlock_t *sl);
static inline int
rte_spinlock_trylock (rte_spinlock_t *sl)
{
- return __sync_lock_test_and_set(&sl->locked,1) == 0;
+ int exp = 0;
+ return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
+ 0, /* disallow spurious failure */
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
#endif
@@ -113,7 +121,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
*/
static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
{
- return sl->locked;
+ return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
}
/**