[v5,11/15] test/distributor: replace delays with spin locks

Message ID 20201008052323.11547-12-l.wojciechow@partner.samsung.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series fix distributor synchronization issues |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Lukasz Wojciechowski Oct. 8, 2020, 5:23 a.m. UTC
  Instead of making delays in test code and waiting
for worker hopefully to reach proper states,
synchronize worker shutdown test cases with spin lock
on atomic variable.

Fixes: c0de0eb82e40 ("distributor: switch over to new API")
Cc: david.hunt@intel.com
Cc: stable@dpdk.org

Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
---
 app/test/test_distributor.c | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)
  

Comments

Hunt, David Oct. 9, 2020, 12:23 p.m. UTC | #1
On 8/10/2020 6:23 AM, Lukasz Wojciechowski wrote:
> Instead of making delays in test code and waiting
> for worker hopefully to reach proper states,
> synchronize worker shutdown test cases with spin lock
> on atomic variable.
>
> Fixes: c0de0eb82e40 ("distributor: switch over to new API")
> Cc: david.hunt@intel.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
> ---
>   app/test/test_distributor.c | 19 +++++++++++++++++--
>   1 file changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
> index 838a67515..1e0a079ff 100644
> --- a/app/test/test_distributor.c
> +++ b/app/test/test_distributor.c
> @@ -27,6 +27,7 @@ struct worker_params worker_params;
>   /* statics - all zero-initialized by default */
>   static volatile int quit;      /**< general quit variable for all threads */
>   static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
> +static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
>   static volatile unsigned worker_idx;
>   static volatile unsigned zero_idx;
>   
> @@ -376,8 +377,10 @@ handle_work_for_shutdown_test(void *arg)
>   		/* for worker zero, allow it to restart to pick up last packet
>   		 * when all workers are shutting down.
>   		 */
> +		__atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
>   		while (zero_quit)
>   			usleep(100);
> +		__atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
>   
>   		num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
>   
> @@ -445,7 +448,12 @@ sanity_test_with_worker_shutdown(struct worker_params *wp,
>   
>   	/* flush the distributor */
>   	rte_distributor_flush(d);
> -	rte_delay_us(10000);
> +	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
> +		rte_distributor_flush(d);
> +
> +	zero_quit = 0;
> +	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
> +		rte_delay_us(100);
>   
>   	for (i = 0; i < rte_lcore_count() - 1; i++)
>   		printf("Worker %u handled %u packets\n", i,
> @@ -505,9 +513,14 @@ test_flush_with_worker_shutdown(struct worker_params *wp,
>   	/* flush the distributor */
>   	rte_distributor_flush(d);
>   
> -	rte_delay_us(10000);
> +	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
> +		rte_distributor_flush(d);
>   
>   	zero_quit = 0;
> +
> +	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
> +		rte_delay_us(100);
> +
>   	for (i = 0; i < rte_lcore_count() - 1; i++)
>   		printf("Worker %u handled %u packets\n", i,
>   			__atomic_load_n(&worker_stats[i].handled_packets,
> @@ -615,6 +628,8 @@ quit_workers(struct worker_params *wp, struct rte_mempool *p)
>   	quit = 0;
>   	worker_idx = 0;
>   	zero_idx = RTE_MAX_LCORE;
> +	zero_quit = 0;
> +	zero_sleep = 0;
>   }
>   
>   static int

Acked-by: David Hunt <david.hunt@intel.com>
  

Patch

diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index 838a67515..1e0a079ff 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -27,6 +27,7 @@  struct worker_params worker_params;
 /* statics - all zero-initialized by default */
 static volatile int quit;      /**< general quit variable for all threads */
 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
 static volatile unsigned worker_idx;
 static volatile unsigned zero_idx;
 
@@ -376,8 +377,10 @@  handle_work_for_shutdown_test(void *arg)
 		/* for worker zero, allow it to restart to pick up last packet
 		 * when all workers are shutting down.
 		 */
+		__atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
 		while (zero_quit)
 			usleep(100);
+		__atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
 
 		num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 
@@ -445,7 +448,12 @@  sanity_test_with_worker_shutdown(struct worker_params *wp,
 
 	/* flush the distributor */
 	rte_distributor_flush(d);
-	rte_delay_us(10000);
+	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_distributor_flush(d);
+
+	zero_quit = 0;
+	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_delay_us(100);
 
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
@@ -505,9 +513,14 @@  test_flush_with_worker_shutdown(struct worker_params *wp,
 	/* flush the distributor */
 	rte_distributor_flush(d);
 
-	rte_delay_us(10000);
+	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_distributor_flush(d);
 
 	zero_quit = 0;
+
+	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_delay_us(100);
+
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
 			__atomic_load_n(&worker_stats[i].handled_packets,
@@ -615,6 +628,8 @@  quit_workers(struct worker_params *wp, struct rte_mempool *p)
 	quit = 0;
 	worker_idx = 0;
 	zero_idx = RTE_MAX_LCORE;
+	zero_quit = 0;
+	zero_sleep = 0;
 }
 
 static int