[v2,4/9] eal: introduce thread uninit helper
Checks
Commit Message
This is a preparation step for dynamically unregistering threads.
Since we explicitly allocate a per thread trace buffer in
rte_thread_init, add an internal helper to free this buffer.
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Note: I preferred renaming the current internal function to free all
threads trace buffers (new name trace_mem_free()) and reuse the previous
name (trace_mem_per_thread_free()) when freeing this buffer for a given
thread.
Changes since v1:
- rebased on master, removed Windows workaround wrt traces support,
---
lib/librte_eal/common/eal_common_thread.c | 9 +++++
lib/librte_eal/common/eal_common_trace.c | 49 +++++++++++++++++++----
lib/librte_eal/common/eal_thread.h | 5 +++
lib/librte_eal/common/eal_trace.h | 1 +
4 files changed, 57 insertions(+), 7 deletions(-)
@@ -20,6 +20,7 @@
#include "eal_internal_cfg.h"
#include "eal_private.h"
#include "eal_thread.h"
+#include "eal_trace.h"
RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
@@ -161,6 +162,14 @@ rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
__rte_trace_mem_per_thread_alloc();
}
+void
+rte_thread_uninit(void)
+{
+ trace_mem_per_thread_free();
+
+ RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY;
+}
+
struct rte_thread_ctrl_params {
void *(*start_routine)(void *);
void *arg;
@@ -101,7 +101,7 @@ eal_trace_fini(void)
{
if (!rte_trace_is_enabled())
return;
- trace_mem_per_thread_free();
+ trace_mem_free();
trace_metadata_destroy();
eal_trace_args_free();
}
@@ -370,24 +370,59 @@ __rte_trace_mem_per_thread_alloc(void)
rte_spinlock_unlock(&trace->lock);
}
+static void
+trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
+{
+ if (meta->area == TRACE_AREA_HUGEPAGE)
+ eal_free_no_trace(meta->mem);
+ else if (meta->area == TRACE_AREA_HEAP)
+ free(meta->mem);
+}
+
void
trace_mem_per_thread_free(void)
+{
+ struct trace *trace = trace_obj_get();
+ struct __rte_trace_header *header;
+ uint32_t count;
+
+ if (RTE_PER_LCORE(trace_mem) == NULL)
+ return;
+
+ header = RTE_PER_LCORE(trace_mem);
+ rte_spinlock_lock(&trace->lock);
+ for (count = 0; count < trace->nb_trace_mem_list; count++) {
+ if (trace->lcore_meta[count].mem == header)
+ break;
+ }
+ if (count != trace->nb_trace_mem_list) {
+ struct thread_mem_meta *meta = &trace->lcore_meta[count];
+
+ trace_mem_per_thread_free_unlocked(meta);
+ if (count != trace->nb_trace_mem_list - 1) {
+ memmove(meta, meta + 1,
+ sizeof(*meta) *
+ (trace->nb_trace_mem_list - count - 1));
+ }
+ trace->nb_trace_mem_list--;
+ }
+ rte_spinlock_unlock(&trace->lock);
+}
+
+void
+trace_mem_free(void)
{
struct trace *trace = trace_obj_get();
uint32_t count;
- void *mem;
if (!rte_trace_is_enabled())
return;
rte_spinlock_lock(&trace->lock);
for (count = 0; count < trace->nb_trace_mem_list; count++) {
- mem = trace->lcore_meta[count].mem;
- if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
- eal_free_no_trace(mem);
- else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
- free(mem);
+ trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
}
+ trace->nb_trace_mem_list = 0;
rte_spinlock_unlock(&trace->lock);
}
@@ -25,6 +25,11 @@ __rte_noreturn void *eal_thread_loop(void *arg);
*/
void rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset);
+/**
+ * Uninitialize per-lcore info for current thread.
+ */
+void rte_thread_uninit(void);
+
/**
* Get the NUMA socket id from cpu id.
* This function is private to EAL.
@@ -106,6 +106,7 @@ int trace_metadata_create(void);
void trace_metadata_destroy(void);
int trace_mkdir(void);
int trace_epoch_time_save(void);
+void trace_mem_free(void);
void trace_mem_per_thread_free(void);
/* EAL interface */