The spinlock implementation is unfair, some threads may take locks
aggressively while leaving the other threads starving for long time.
This patch introduces ticketlock which gives each waiting thread a
ticket and they can take the lock one by one. First come, first serviced.
This avoids starvation for too long time and is more predictable.
Suggested-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Joyce kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
MAINTAINERS | 4 +
doc/api/doxy-api-index.md | 1 +
lib/librte_eal/common/Makefile | 2 +-
.../common/include/generic/rte_ticketlock.h | 201 +++++++++++++++++++++
lib/librte_eal/common/meson.build | 1 +
5 files changed, 208 insertions(+), 1 deletion(-)
create mode 100644 lib/librte_eal/common/include/generic/rte_ticketlock.h
@@ -212,6 +212,10 @@ M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_eal/common/include/rte_bitmap.h
F: test/test/test_bitmap.c
+Ticketlock
+M: Joyce Kong <joyce.kong@arm.com>
+F: lib/librte_eal/common/include/generic/rte_ticketlock.h
+
ARM v7
M: Jan Viktorin <viktorin@rehivetech.com>
M: Gavin Hu <gavin.hu@arm.com>
@@ -65,6 +65,7 @@ The public API headers are grouped by topics:
[atomic] (@ref rte_atomic.h),
[rwlock] (@ref rte_rwlock.h),
[spinlock] (@ref rte_spinlock.h)
+ [ticketlock] (@ref rte_ticketlock.h)
- **CPU arch**:
[branch prediction] (@ref rte_branch_prediction.h),
@@ -20,7 +20,7 @@ INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h
INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
-GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
+GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h rte_ticketlock.h
GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
new file mode 100644
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_H_
+#define _RTE_TICKETLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE ticket locks
+ *
+ * This file defines an API for ticket locks, which give each waiting
+ * thread a ticket and take the lock one by one, first come, first
+ * serviced.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_pause.h>
+
+/**
+ * The rte_ticketlock_t type.
+ */
+typedef struct {
+ unsigned int current;
+ unsigned int next;
+} rte_ticketlock_t;
+
+/**
+ * A static ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the ticketlock to an unlocked state.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_init(rte_ticketlock_t *tl)
+{
+ __atomic_store_n(&tl->current, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&tl->next, 0, __ATOMIC_RELAXED);
+}
+
+/**
+ * Take the ticketlock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_lock(rte_ticketlock_t *tl)
+{
+ unsigned int me = __atomic_fetch_add(&tl->next, 1, __ATOMIC_RELAXED);
+ while (__atomic_load_n(&tl->current, __ATOMIC_ACQUIRE) != me)
+ rte_pause();
+}
+
+/**
+ * Release the ticketlock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_unlock(rte_ticketlock_t *tl)
+{
+ unsigned int i = __atomic_load_n(&tl->current, __ATOMIC_RELAXED);
+ i++;
+ __atomic_store_n(&tl->current, i, __ATOMIC_RELEASE);
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline __rte_experimental int
+rte_ticketlock_trylock(rte_ticketlock_t *tl)
+{
+ unsigned int next = __atomic_load_n(&tl->next, __ATOMIC_RELAXED);
+ unsigned int cur = __atomic_load_n(&tl->current, __ATOMIC_RELAXED);
+ if (next == cur) {
+ if (__atomic_compare_exchange_n(&tl->next, &next, next+1,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param tl
+ * A pointer to the ticketlock.
+ * @return
+ * 1 if the lock icurrently taken; 0 otherwise.
+ */
+static inline __rte_experimental int
+rte_ticketlock_is_locked(rte_ticketlock_t *tl)
+{
+ return (__atomic_load_n(&tl->current, __ATOMIC_ACQUIRE) !=
+ __atomic_load_n(&tl->next, __ATOMIC_ACQUIRE));
+}
+
+/**
+ * The rte_ticketlock_recursive_t type.
+ */
+#define INVALID_THREAD_ID -1
+
+typedef struct {
+ rte_ticketlock_t tl; /**< the actual ticketlock */
+ int user; /**< core id using lock, INVALID_THREAD_ID for unused */
+ unsigned int count; /**< count of time this lock has been called */
+} rte_ticketlock_recursive_t;
+
+/**
+ * A static recursive ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, -1, 0}
+
+/**
+ * Initialize the recursive ticketlock to an unlocked state.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_init(
+ rte_ticketlock_recursive_t *tlr)
+{
+ rte_ticketlock_init(&tlr->tl);
+ __atomic_store_n(&tlr->user, INVALID_THREAD_ID, __ATOMIC_RELAXED);
+ __atomic_store_n(&tlr->count, 0, __ATOMIC_RELAXED);
+}
+
+/**
+ * Take the recursive ticketlock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_lock(
+ rte_ticketlock_recursive_t *tlr)
+{
+ int id = rte_gettid();
+
+ if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
+ rte_ticketlock_lock(&tlr->tl);
+ tlr->user = id;
+ }
+ tlr->count++;
+}
+
+/**
+ * Release the recursive ticketlock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_unlock(
+ rte_ticketlock_recursive_t *tlr)
+{
+ if (--(tlr->count) == 0) {
+ tlr->user = INVALID_THREAD_ID;
+ rte_ticketlock_unlock(&tlr->tl);
+ }
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param tlr
+ * A pointer to the recursive ticketlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline __rte_experimental int rte_ticketlock_recursive_trylock(
+ rte_ticketlock_recursive_t *tlr)
+{
+ int id = rte_gettid();
+
+ if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
+ if (rte_ticketlock_trylock(&tlr->tl) == 0)
+ return 0;
+ tlr->user = id;
+ }
+ tlr->count++;
+ return 1;
+}
+
+#endif /* _RTE_TICKETLOCK_H_ */
@@ -98,6 +98,7 @@ generic_headers = files(
'include/generic/rte_prefetch.h',
'include/generic/rte_rwlock.h',
'include/generic/rte_spinlock.h',
+ 'include/generic/rte_ticketlock.h',
'include/generic/rte_vect.h')
install_headers(generic_headers, subdir: 'generic')