[v1,02/12] test/ring_perf: use compiler atomic builtins for lcores sync
Checks
Commit Message
Convert rte_atomic usages to compiler atomic built-ins
for lcores sync in ring_perf test cases.
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
app/test/test_ring_perf.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
Comments
<snip>
>
> Convert rte_atomic usages to compiler atomic built-ins for lcores sync in
> ring_perf test cases.
>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Looks good.
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> ---
> app/test/test_ring_perf.c | 9 ++++-----
> 1 file changed, 4 insertions(+), 5 deletions(-)
>
> diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c index
> fd82e20412..2d8bb675a3 100644
> --- a/app/test/test_ring_perf.c
> +++ b/app/test/test_ring_perf.c
> @@ -320,7 +320,7 @@ run_on_core_pair(struct lcore_pair *cores, struct
> rte_ring *r, const int esize)
> return 0;
> }
>
> -static rte_atomic32_t synchro;
> +static uint32_t synchro;
> static uint64_t queue_count[RTE_MAX_LCORE];
>
> #define TIME_MS 100
> @@ -342,8 +342,7 @@ load_loop_fn_helper(struct thread_params *p, const
> int esize)
>
> /* wait synchro for workers */
> if (lcore != rte_get_main_lcore())
> - while (rte_atomic32_read(&synchro) == 0)
> - rte_pause();
> + rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
>
> begin = rte_get_timer_cycles();
> while (time_diff < hz * TIME_MS / 1000) { @@ -398,12 +397,12 @@
> run_on_all_cores(struct rte_ring *r, const int esize)
> param.r = r;
>
> /* clear synchro and start workers */
> - rte_atomic32_set(&synchro, 0);
> + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
> if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN)
> < 0)
> return -1;
>
> /* start synchro and launch test on main */
> - rte_atomic32_set(&synchro, 1);
> + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
> lcore_f(¶m);
>
> rte_eal_mp_wait_lcore();
> --
> 2.17.1
@@ -320,7 +320,7 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r, const int esize)
return 0;
}
-static rte_atomic32_t synchro;
+static uint32_t synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,8 +342,7 @@ load_loop_fn_helper(struct thread_params *p, const int esize)
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- while (rte_atomic32_read(&synchro) == 0)
- rte_pause();
+ rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -398,12 +397,12 @@ run_on_all_cores(struct rte_ring *r, const int esize)
param.r = r;
/* clear synchro and start workers */
- rte_atomic32_set(&synchro, 0);
+ __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- rte_atomic32_set(&synchro, 1);
+ __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
lcore_f(¶m);
rte_eal_mp_wait_lcore();