From patchwork Thu Dec 11 02:04:44 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cunming Liang X-Patchwork-Id: 1941 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 028818025; Thu, 11 Dec 2014 03:05:10 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 6EAE77E23 for ; Thu, 11 Dec 2014 03:05:03 +0100 (CET) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga101.fm.intel.com with ESMTP; 10 Dec 2014 18:05:02 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.07,555,1413270000"; d="scan'208";a="645805905" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga002.fm.intel.com with ESMTP; 10 Dec 2014 18:05:01 -0800 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id sBB24xNX010855; Thu, 11 Dec 2014 10:04:59 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id sBB24vBq021292; Thu, 11 Dec 2014 10:04:59 +0800 Received: (from cliang18@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id sBB24v9G021288; Thu, 11 Dec 2014 10:04:57 +0800 From: Cunming Liang To: dev@dpdk.org Date: Thu, 11 Dec 2014 10:04:44 +0800 Message-Id: <1418263490-21088-2-git-send-email-cunming.liang@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1418263490-21088-1-git-send-email-cunming.liang@intel.com> References: <1418263490-21088-1-git-send-email-cunming.liang@intel.com> Subject: [dpdk-dev] [RFC PATCH 1/7] eal: add linear thread id as pthread-local variable X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Cunming Liang --- lib/librte_eal/common/include/rte_eal.h | 5 ++ lib/librte_eal/common/include/rte_lcore.h | 12 ++++ lib/librte_eal/linuxapp/eal/eal_thread.c | 115 ++++++++++++++++++++++++++++-- 3 files changed, 126 insertions(+), 6 deletions(-) diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h index f4ecd2e..2640167 100644 --- a/lib/librte_eal/common/include/rte_eal.h +++ b/lib/librte_eal/common/include/rte_eal.h @@ -262,6 +262,11 @@ rte_set_application_usage_hook( rte_usage_hook_t usage_func ); */ int rte_eal_has_hugepages(void); +#ifndef RTE_MAX_THREAD +#define RTE_MAX_THREAD RTE_MAX_LCORE +#endif + + #ifdef __cplusplus } #endif diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h index 49b2c03..cd83d47 100644 --- a/lib/librte_eal/common/include/rte_lcore.h +++ b/lib/librte_eal/common/include/rte_lcore.h @@ -73,6 +73,7 @@ struct lcore_config { extern struct lcore_config lcore_config[RTE_MAX_LCORE]; RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per core "core id". */ +RTE_DECLARE_PER_LCORE(unsigned, _thread_id); /**< Per thread "linear tid". */ /** * Return the ID of the execution unit we are running on. @@ -86,6 +87,17 @@ rte_lcore_id(void) } /** + * Return the linear thread ID of the cache unit we are running on. + * @return + * core ID + */ +static inline unsigned long +rte_linear_thread_id(void) +{ + return RTE_PER_LCORE(_thread_id); +} + +/** * Get the id of the master lcore * * @return diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c index 80a985f..52478d6 100644 --- a/lib/librte_eal/linuxapp/eal/eal_thread.c +++ b/lib/librte_eal/linuxapp/eal/eal_thread.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -51,12 +52,19 @@ #include #include #include +#include +#include #include "eal_private.h" #include "eal_thread.h" +#define LINEAR_THREAD_ID_POOL "THREAD_ID_POOL" + RTE_DEFINE_PER_LCORE(unsigned, _lcore_id); +/* define linear thread id as thread-local variables */ +RTE_DEFINE_PER_LCORE(unsigned, _thread_id); + /* * Send a message to a slave lcore identified by slave_id to call a * function f with argument arg. Once the execution is done, the @@ -94,12 +102,13 @@ rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id) return 0; } + /* set affinity for current thread */ static int -eal_thread_set_affinity(void) +__eal_thread_set_affinity(pthread_t thread, unsigned lcore) { + int s; - pthread_t thread; /* * According to the section VERSIONS of the CPU_ALLOC man page: @@ -126,9 +135,8 @@ eal_thread_set_affinity(void) size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); CPU_ZERO_S(size, cpusetp); - CPU_SET_S(rte_lcore_id(), size, cpusetp); + CPU_SET_S(lcore, size, cpusetp); - thread = pthread_self(); s = pthread_setaffinity_np(thread, size, cpusetp); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); @@ -140,9 +148,8 @@ eal_thread_set_affinity(void) #else /* CPU_ALLOC */ cpu_set_t cpuset; CPU_ZERO( &cpuset ); - CPU_SET( rte_lcore_id(), &cpuset ); + CPU_SET(lcore, &cpuset ); - thread = pthread_self(); s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); @@ -152,6 +159,15 @@ eal_thread_set_affinity(void) return 0; } +/* set affinity for current thread */ +static int +eal_thread_set_affinity(void) +{ + pthread_t thread = pthread_self(); + + return __eal_thread_set_affinity(thread, rte_lcore_id()); +} + void eal_thread_init_master(unsigned lcore_id) { /* set the lcore ID in per-lcore memory area */ @@ -162,6 +178,87 @@ void eal_thread_init_master(unsigned lcore_id) rte_panic("cannot set affinity\n"); } +/* linear thread id control block */ +struct eal_thread_cb { + rte_spinlock_t lock; + uint64_t nb_bucket; + uint64_t bitmap[0]; +}; + +static struct eal_thread_cb * +__create_tid_pool(void) +{ + const struct rte_memzone *mz; + struct eal_thread_cb *pcb; + uint64_t sz; + uint64_t nb_bucket; + + nb_bucket = RTE_ALIGN_CEIL(RTE_MAX_THREAD, 64) / 64; + sz = sizeof(*pcb) + nb_bucket * sizeof(uint64_t); + mz = rte_memzone_reserve(LINEAR_THREAD_ID_POOL, + sz, rte_socket_id(), 0); + if (mz == NULL) + rte_panic("Cannot allocate linear thread ID pool\n"); + + pcb = mz->addr; + rte_spinlock_init(&pcb->lock); + pcb->nb_bucket = nb_bucket; + memset(pcb->bitmap, 0, nb_bucket * sizeof(uint64_t)); + + return pcb; +} + +static int +__get_linear_tid(uint64_t *tid) +{ + const struct rte_memzone *mz; + struct eal_thread_cb *pcb; + uint64_t i; + uint8_t shift = 0; + + mz = rte_memzone_lookup(LINEAR_THREAD_ID_POOL); + if (mz != NULL) + pcb = mz->addr; + else + pcb = __create_tid_pool(); + + rte_spinlock_lock(&pcb->lock); + for (i = 0; i < pcb->nb_bucket; i++) { + if (pcb->bitmap[i] == (uint64_t)-1) + continue; + shift = 0; + while (pcb->bitmap[i] & (1UL << shift)) + shift ++; + pcb->bitmap[i] |= (1UL << shift); + break; + } + rte_spinlock_unlock(&pcb->lock); + + if (i == pcb->nb_bucket) + return -1; + + *tid = i * 64 + shift; + return 0; +} + +static void __rte_unused +__put_linear_tid(uint64_t tid) +{ + const struct rte_memzone *mz; + struct eal_thread_cb *pcb; + uint8_t shift; + + mz = rte_memzone_lookup(LINEAR_THREAD_ID_POOL); + if (!mz) + return; + + pcb = mz->addr; + rte_spinlock_lock(&pcb->lock); + shift = tid & 0x3F; + pcb->bitmap[tid / 64] &= ~(1UL << shift); + rte_spinlock_unlock(&pcb->lock); +} + /* main loop of threads */ __attribute__((noreturn)) void * eal_thread_loop(__attribute__((unused)) void *arg) @@ -169,6 +266,7 @@ eal_thread_loop(__attribute__((unused)) void *arg) char c; int n, ret; unsigned lcore_id; + unsigned long ltid = 0; pthread_t thread_id; int m2s, s2m; @@ -191,6 +289,11 @@ eal_thread_loop(__attribute__((unused)) void *arg) /* set the lcore ID in per-lcore memory area */ RTE_PER_LCORE(_lcore_id) = lcore_id; + /* set the linear thread ID in per-lcore memory area */ + if (__get_linear_tid(<id) < 0) + rte_panic("cannot get cache slot id\n"); + RTE_PER_LCORE(_thread_id) = ltid; + /* set CPU affinity */ if (eal_thread_set_affinity() < 0) rte_panic("cannot set affinity\n");