[v1,3/9] test/bbdev: fix interrupt tests

Message ID 20240422190800.123027-4-hernan.vargas@intel.com (mailing list archive)
State New
Delegated to: Maxime Coquelin
Headers
Series test-bbdev fixes and improvements for 24.07 |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Hernan Vargas April 22, 2024, 7:07 p.m. UTC
  Fix possible error with regards to setting the burst size from the
enqueue thread.

Fixes: b2e2aec3239e ("app/bbdev: enhance interrupt test")
Cc: stable@dpdk.org

Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
---
 app/test-bbdev/test_bbdev_perf.c | 98 ++++++++++++++++----------------
 1 file changed, 49 insertions(+), 49 deletions(-)
  

Patch

diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 9ed0c4648d24..28d78e73a9c1 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -3406,15 +3406,6 @@  throughput_intr_lcore_ldpc_dec(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_ldpc_dec_ops(
-						tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(num_to_enq != enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3424,6 +3415,15 @@  throughput_intr_lcore_ldpc_dec(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_ldpc_dec_ops(
+						tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(num_to_enq != enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3498,14 +3498,6 @@  throughput_intr_lcore_dec(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(num_to_enq != enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3515,6 +3507,14 @@  throughput_intr_lcore_dec(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(num_to_enq != enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3584,14 +3584,6 @@  throughput_intr_lcore_enc(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3601,6 +3593,14 @@  throughput_intr_lcore_enc(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3672,15 +3672,6 @@  throughput_intr_lcore_ldpc_enc(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_ldpc_enc_ops(
-						tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3690,6 +3681,15 @@  throughput_intr_lcore_ldpc_enc(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_ldpc_enc_ops(
+						tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3761,14 +3761,6 @@  throughput_intr_lcore_fft(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3778,6 +3770,14 @@  throughput_intr_lcore_fft(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3844,13 +3844,6 @@  throughput_intr_lcore_mldts(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
-						queue_id, &ops[enqueued], num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3860,6 +3853,13 @@  throughput_intr_lcore_mldts(void *arg)
 			 */
 			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+						queue_id, &ops[enqueued], num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */