[v17,5/8] random: keep PRNG state in lcore variable

Message ID 20241025084149.873037-6-mattias.ronnblom@ericsson.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Lcore variables |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Mattias Rönnblom Oct. 25, 2024, 8:41 a.m. UTC
Replace keeping PRNG state in a RTE_MAX_LCORE-sized static array of
cache-aligned and RTE_CACHE_GUARDed struct instances with keeping the
same state in a more cache-friendly lcore variable.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>

--

RFC v3:
 * Remove cache alignment on unregistered threads' rte_rand_state.
   (Morten Brørup)
---
 lib/eal/common/rte_random.c | 28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)
  

Patch

diff --git a/lib/eal/common/rte_random.c b/lib/eal/common/rte_random.c
index 90e91b3c4f..cf0756f26a 100644
--- a/lib/eal/common/rte_random.c
+++ b/lib/eal/common/rte_random.c
@@ -11,6 +11,7 @@ 
 #include <rte_branch_prediction.h>
 #include <rte_cycles.h>
 #include <rte_lcore.h>
+#include <rte_lcore_var.h>
 #include <rte_random.h>
 
 struct __rte_cache_aligned rte_rand_state {
@@ -19,14 +20,12 @@  struct __rte_cache_aligned rte_rand_state {
 	uint64_t z3;
 	uint64_t z4;
 	uint64_t z5;
-	RTE_CACHE_GUARD;
 };
 
-/* One instance each for every lcore id-equipped thread, and one
- * additional instance to be shared by all others threads (i.e., all
- * unregistered non-EAL threads).
- */
-static struct rte_rand_state rand_states[RTE_MAX_LCORE + 1];
+RTE_LCORE_VAR_HANDLE(struct rte_rand_state, rand_state);
+
+/* instance to be shared by all unregistered non-EAL threads */
+static struct rte_rand_state unregistered_rand_state;
 
 static uint32_t
 __rte_rand_lcg32(uint32_t *seed)
@@ -85,8 +84,14 @@  rte_srand(uint64_t seed)
 	unsigned int lcore_id;
 
 	/* add lcore_id to seed to avoid having the same sequence */
-	for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++)
-		__rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]);
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		struct rte_rand_state *lcore_state =
+			RTE_LCORE_VAR_LCORE(lcore_id, rand_state);
+
+		__rte_srand_lfsr258(seed + lcore_id, lcore_state);
+	}
+
+	__rte_srand_lfsr258(seed + lcore_id, &unregistered_rand_state);
 }
 
 static __rte_always_inline uint64_t
@@ -124,11 +129,10 @@  struct rte_rand_state *__rte_rand_get_state(void)
 
 	idx = rte_lcore_id();
 
-	/* last instance reserved for unregistered non-EAL threads */
 	if (unlikely(idx == LCORE_ID_ANY))
-		idx = RTE_MAX_LCORE;
+		return &unregistered_rand_state;
 
-	return &rand_states[idx];
+	return RTE_LCORE_VAR(rand_state);
 }
 
 uint64_t
@@ -228,6 +232,8 @@  RTE_INIT(rte_rand_init)
 {
 	uint64_t seed;
 
+	RTE_LCORE_VAR_ALLOC(rand_state);
+
 	seed = __rte_random_initial_seed();
 
 	rte_srand(seed);