From patchwork Tue Jun 1 20:55:43 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93730 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5A31EA0524; Tue, 1 Jun 2021 22:56:11 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2BAC040E6E; Tue, 1 Jun 2021 22:56:06 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 365DA40E50 for ; Tue, 1 Jun 2021 22:56:04 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id 92FED20B7188; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 92FED20B7188 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=biX4NpJbMOwOvGIdvoTHikgth+9J0g3ptPizxvdWlpc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=YTTr9kQPHstBH6E9Ct0eVJfnrQyqidCEN24BKmtupR8P2PtSJJIBgZa+lBqtGYL3D PQ+ktIexv3AmzeP1W195yGdLPnhlS517S3vBKvvUheYJnDgfF/9K4p8U6JkaT1H0cn uu9//86iqICySJ4Qe0anFY4qBqYXiIfE0etJ5RFU= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:43 -0700 Message-Id: <1622580952-25169-2-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 01/10] eal: add thread id and simple thread functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Use a portable, type-safe representation for the thread identifier. Add functions for comparing thread ids and obtaining the thread id for the current thread. --- lib/eal/common/rte_thread.c | 105 ++++++++++++++++++ lib/eal/include/rte_thread.h | 53 +++++++-- lib/eal/include/rte_thread_types.h | 10 ++ .../include/rte_windows_thread_types.h | 10 ++ lib/eal/windows/rte_thread.c | 17 +++ 5 files changed, 186 insertions(+), 9 deletions(-) create mode 100644 lib/eal/common/rte_thread.c create mode 100644 lib/eal/include/rte_thread_types.h create mode 100644 lib/eal/windows/include/rte_windows_thread_types.h diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c new file mode 100644 index 0000000000..1292f7a8f8 --- /dev/null +++ b/lib/eal/common/rte_thread.c @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2021 Mellanox Technologies, Ltd + * Copyright(c) 2021 Microsoft Corporation + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +struct eal_tls_key { + pthread_key_t thread_index; +}; + +rte_thread_t +rte_thread_self(void) +{ + rte_thread_t thread_id = { 0 }; + + thread_id.opaque_id = pthread_self(); + + return thread_id; +} + +int +rte_thread_equal(rte_thread_t t1, rte_thread_t t2) +{ + return pthread_equal(t1.opaque_id, t2.opaque_id); +} + +int +rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *)) +{ + int err; + rte_thread_key k; + + k = malloc(sizeof(*k)); + if (k == NULL) { + RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n"); + return EINVAL; + } + err = pthread_key_create(&(k->thread_index), destructor); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_key_create failed: %s\n", + strerror(err)); + free(k); + return err; + } + *key = k; + return 0; +} + +int +rte_thread_key_delete(rte_thread_key key) +{ + int err; + + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + return EINVAL; + } + err = pthread_key_delete(key->thread_index); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_key_delete failed: %s\n", + strerror(err)); + free(key); + return err; + } + free(key); + return 0; +} + +int +rte_thread_value_set(rte_thread_key key, const void *value) +{ + int err; + + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + return EINVAL; + } + err = pthread_setspecific(key->thread_index, value); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_setspecific failed: %s\n", + strerror(err)); + return err; + } + return 0; +} + +void * +rte_thread_value_get(rte_thread_key key) +{ + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + rte_errno = EINVAL; + return NULL; + } + return pthread_getspecific(key->thread_index); +} diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 8be8ed8f36..347df1a6ae 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2021 Mellanox Technologies, Ltd + * Copyright(c) 2021 Microsoft Corporation */ +#include #include #include @@ -20,11 +22,50 @@ extern "C" { #endif +#include +#if defined(RTE_USE_WINDOWS_THREAD_TYPES) +#include +#else +#include +#endif + +/** + * Thread id descriptor. + */ +typedef struct rte_thread_tag { + uintptr_t opaque_id; /**< thread identifier */ +} rte_thread_t; + /** * TLS key type, an opaque pointer. */ typedef struct eal_tls_key *rte_thread_key; +/** + * Get the id of the calling thread. + * + * @return + * Return the thread id of the calling thread. + */ +__rte_experimental +rte_thread_t rte_thread_self(void); + +/** + * Check if 2 thread ids are equal. + * + * @param t1 + * First thread id. + * + * @param t2 + * Second thread id. + * + * @return + * If the ids are equal, return nonzero. + * Otherwise, return 0. + */ +__rte_experimental +int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); + #ifdef RTE_HAS_CPUSET /** @@ -63,9 +104,7 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp); * * @return * On success, zero. - * On failure, a negative number and an error number is set in rte_errno. - * rte_errno can be: ENOMEM - Memory allocation error. - * ENOEXEC - Specific OS error. + * On failure, return a positive errno-style error number. */ __rte_experimental @@ -80,9 +119,7 @@ int rte_thread_key_create(rte_thread_key *key, * * @return * On success, zero. - * On failure, a negative number and an error number is set in rte_errno. - * rte_errno can be: EINVAL - Invalid parameter passed. - * ENOEXEC - Specific OS error. + * On failure, return a positive errno-style error number. */ __rte_experimental int rte_thread_key_delete(rte_thread_key key); @@ -97,9 +134,7 @@ int rte_thread_key_delete(rte_thread_key key); * * @return * On success, zero. - * On failure, a negative number and an error number is set in rte_errno. - * rte_errno can be: EINVAL - Invalid parameter passed. - * ENOEXEC - Specific OS error. + * On failure, return a positive errno-style error number. */ __rte_experimental int rte_thread_value_set(rte_thread_key key, const void *value); diff --git a/lib/eal/include/rte_thread_types.h b/lib/eal/include/rte_thread_types.h new file mode 100644 index 0000000000..d67b24a563 --- /dev/null +++ b/lib/eal/include/rte_thread_types.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Microsoft Corporation + */ + +#ifndef _RTE_THREAD_TYPES_H_ +#define _RTE_THREAD_TYPES_H_ + +#include + +#endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/include/rte_windows_thread_types.h b/lib/eal/windows/include/rte_windows_thread_types.h new file mode 100644 index 0000000000..60e6d94553 --- /dev/null +++ b/lib/eal/windows/include/rte_windows_thread_types.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Microsoft Corporation + */ + +#ifndef _RTE_THREAD_TYPES_H_ +#define _RTE_THREAD_TYPES_H_ + +#include + +#endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 667287c387..3f3ebba21f 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2021 Mellanox Technologies, Ltd + * Copyright(c) 2021 Microsoft Corporation */ #include @@ -11,6 +12,22 @@ struct eal_tls_key { DWORD thread_index; }; +rte_thread_t +rte_thread_self(void) +{ + rte_thread_t thread_id = { 0 }; + + thread_id.opaque_id = GetCurrentThreadId(); + + return thread_id; +} + +int +rte_thread_equal(rte_thread_t t1, rte_thread_t t2) +{ + return t1.opaque_id == t2.opaque_id; +} + int rte_thread_key_create(rte_thread_key *key, __rte_unused void (*destructor)(void *)) From patchwork Tue Jun 1 20:55:44 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93732 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E32D7A0524; Tue, 1 Jun 2021 22:56:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 69792410EE; Tue, 1 Jun 2021 22:56:08 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 5555240E6E for ; Tue, 1 Jun 2021 22:56:04 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id 9ED7E20B8006; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 9ED7E20B8006 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=c+v35NpJL9jEubNuj4nUXwvNEP4jhfkIhKrnMfgRAfQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pkbkk5QPI2Ag6CjNRSmFBsbeIJZerEM83Lq9513IpVTA8aS0kMsb2XvvWjbcKmRnb s/fQjDNYhVqeVetdJkB+7L60TGm9aK326JKYNd4ujnMMkYW92MGEnftrulW6fQ6Dw/ N5KRFFJbO1/RZmqSX3MIgmggo3Ni8tqHqUjECnQY= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:44 -0700 Message-Id: <1622580952-25169-3-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v7 02/10] eal: add thread attributes X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Implement thread attributes for: * thread affinity * thread priority Implement functions for managing thread attributes. Priority is represented through an enum that allows for two levels: - RTE_THREAD_PRIORITY_NORMAL - RTE_THREAD_PRIORITY_REALTIME_CRITICAL Affinity is described by the already known “rte_cpuset_t” type. An rte_thread_attr_t object can be set to the default values by calling *rte_thread_attr_init()*. --- lib/eal/common/rte_thread.c | 51 +++++++++++ lib/eal/include/rte_thread.h | 89 +++++++++++++++++++ lib/eal/include/rte_thread_types.h | 3 + .../include/rte_windows_thread_types.h | 3 + lib/eal/windows/rte_thread.c | 53 +++++++++++ 5 files changed, 199 insertions(+) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index 1292f7a8f8..4b1e8f995e 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -33,6 +34,56 @@ rte_thread_equal(rte_thread_t t1, rte_thread_t t2) return pthread_equal(t1.opaque_id, t2.opaque_id); } +int +rte_thread_attr_init(rte_thread_attr_t *attr) +{ + RTE_ASSERT(attr != NULL); + + CPU_ZERO(&attr->cpuset); + attr->priority = RTE_THREAD_PRIORITY_NORMAL; + + return 0; +} + +int +rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL || cpuset == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid thread attributes parameter\n"); + return EINVAL; + } + thread_attr->cpuset = *cpuset; + return 0; +} + +int +rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset) +{ + if ((thread_attr == NULL) || (cpuset == NULL)) { + RTE_LOG(DEBUG, EAL, "Invalid thread attributes parameter\n"); + return EINVAL; + } + + *cpuset = thread_attr->cpuset; + return 0; +} + +int +rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, + enum rte_thread_priority priority) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set priority attribute, invalid parameter\n"); + return EINVAL; + } + + thread_attr->priority = priority; + return 0; +} + int rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *)) { diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 347df1a6ae..eff00023d7 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -36,6 +36,26 @@ typedef struct rte_thread_tag { uintptr_t opaque_id; /**< thread identifier */ } rte_thread_t; +/** + * Thread priority values. + */ +enum rte_thread_priority { + RTE_THREAD_PRIORITY_UNDEFINED = 0, + /**< priority hasn't been defined */ + RTE_THREAD_PRIORITY_NORMAL = 1, + /**< normal thread priority, the default */ + RTE_THREAD_PRIORITY_REALTIME_CRITICAL = 2, + /**< highest thread priority allowed */ +}; + +/** + * Representation for thread attributes. + */ +typedef struct { + enum rte_thread_priority priority; /**< thread priority */ + rte_cpuset_t cpuset; /**< thread affinity */ +} rte_thread_attr_t; + /** * TLS key type, an opaque pointer. */ @@ -66,6 +86,75 @@ rte_thread_t rte_thread_self(void); __rte_experimental int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); +/** + * Initialize the attributes of a thread. + * These attributes can be passed to the rte_thread_create() function + * that will create a new thread and set its attributes according to attr. + * + * @param attr + * Thread attributes to initialize. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_attr_init(rte_thread_attr_t *attr); + +/** + * Set the CPU affinity value in the thread attributes pointed to + * by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes in which affinity will be updated. + * + * @param cpuset + * Points to the value of the affinity to be set. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset); + +/** + * Get the value of CPU affinity that is set in the thread attributes pointed + * to by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes from which affinity will be retrieved. + * + * @param cpuset + * Pointer to the memory that will store the affinity. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset); + +/** + * Set the thread priority value in the thread attributes pointed to + * by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes in which priority will be updated. + * + * @param priority + * Points to the value of the priority to be set. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, + enum rte_thread_priority priority); + #ifdef RTE_HAS_CPUSET /** diff --git a/lib/eal/include/rte_thread_types.h b/lib/eal/include/rte_thread_types.h index d67b24a563..996232c636 100644 --- a/lib/eal/include/rte_thread_types.h +++ b/lib/eal/include/rte_thread_types.h @@ -7,4 +7,7 @@ #include +#define EAL_THREAD_PRIORITY_NORMAL 0 +#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL 99 + #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/include/rte_windows_thread_types.h b/lib/eal/windows/include/rte_windows_thread_types.h index 60e6d94553..5bdeaad3d4 100644 --- a/lib/eal/windows/include/rte_windows_thread_types.h +++ b/lib/eal/windows/include/rte_windows_thread_types.h @@ -7,4 +7,7 @@ #include +#define EAL_THREAD_PRIORITY_NORMAL THREAD_PRIORITY_NORMAL +#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL THREAD_PRIORITY_TIME_CRITICAL + #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 3f3ebba21f..cc319d3628 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -28,6 +29,58 @@ rte_thread_equal(rte_thread_t t1, rte_thread_t t2) return t1.opaque_id == t2.opaque_id; } +int +rte_thread_attr_init(rte_thread_attr_t *attr) +{ + RTE_ASSERT(attr != NULL); + + attr->priority = RTE_THREAD_PRIORITY_NORMAL; + CPU_ZERO(&attr->cpuset); + return 0; +} + +int +rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set affinity attribute, invalid parameter\n"); + return EINVAL; + } + + thread_attr->cpuset = *cpuset; + return 0; +} + +int +rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, + rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set affinity attribute, invalid parameter\n"); + return EINVAL; + } + + *cpuset = thread_attr->cpuset; + return 0; +} + +int +rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, + enum rte_thread_priority priority) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set priority attribute, invalid parameter\n"); + return EINVAL; + } + + thread_attr->priority = priority; + return 0; +} + int rte_thread_key_create(rte_thread_key *key, __rte_unused void (*destructor)(void *)) From patchwork Tue Jun 1 20:55:45 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93731 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 164E5A0524; Tue, 1 Jun 2021 22:56:18 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4F4D5410E2; Tue, 1 Jun 2021 22:56:07 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 487B840E64 for ; Tue, 1 Jun 2021 22:56:04 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id AA4E320B8008; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com AA4E320B8008 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=hsiMU0MVocxBXIF+7iw9aUB8IydDuodKSS+/3eRyt7Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rLRNCA/RbHzkzzF1mQsUpzGTl3MD2ivnBABLofG/mKErdb8tZ72+5h6lXeUhRO+Hq v6Ek8Qliz9ToqvRVfBSzE9LurjxhdXQgr++uIKnNHU2hs/jnH5dFaKOljPT0rfSFYQ RnbNv8kBfxnYPsywku/2KdjShXIqD19GbJcb4p/g= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:45 -0700 Message-Id: <1622580952-25169-4-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 03/10] eal/windows: translate Windows errors to errno-style errors X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Add function to translate Windows error codes to errno-style error codes. The possible return values are chosen so that we have as much semantical compatibility between platforms as possible. --- lib/eal/include/rte_thread.h | 5 +- lib/eal/windows/rte_thread.c | 90 +++++++++++++++++++++++++++--------- 2 files changed, 71 insertions(+), 24 deletions(-) diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index eff00023d7..f3eeb28753 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -236,9 +236,8 @@ int rte_thread_value_set(rte_thread_key key, const void *value); * * @return * On success, value data pointer (can also be NULL). - * On failure, NULL and an error number is set in rte_errno. - * rte_errno can be: EINVAL - Invalid parameter passed. - * ENOEXEC - Specific OS error. + * On failure, NULL and a positive error number is set in rte_errno. + * */ __rte_experimental void *rte_thread_value_get(rte_thread_key key); diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index cc319d3628..6ea1dc2a05 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -13,6 +13,54 @@ struct eal_tls_key { DWORD thread_index; }; +/* Translates the most common error codes related to threads */ +static int +thread_translate_win32_error(DWORD error) +{ + switch (error) { + case ERROR_SUCCESS: + return 0; + + case ERROR_INVALID_PARAMETER: + return EINVAL; + + case ERROR_INVALID_HANDLE: + return EFAULT; + + case ERROR_NOT_ENOUGH_MEMORY: + /* FALLTHROUGH */ + case ERROR_NO_SYSTEM_RESOURCES: + return ENOMEM; + + case ERROR_PRIVILEGE_NOT_HELD: + /* FALLTHROUGH */ + case ERROR_ACCESS_DENIED: + return EACCES; + + case ERROR_ALREADY_EXISTS: + return EEXIST; + + case ERROR_POSSIBLE_DEADLOCK: + return EDEADLK; + + case ERROR_INVALID_FUNCTION: + /* FALLTHROUGH */ + case ERROR_CALL_NOT_IMPLEMENTED: + return ENOSYS; + } + + return EINVAL; +} + +static int +thread_log_last_error(const char* message) +{ + DWORD error = GetLastError(); + RTE_LOG(DEBUG, EAL, "GetLastError()=%lu: %s\n", error, message); + + return thread_translate_win32_error(error); +} + rte_thread_t rte_thread_self(void) { @@ -85,18 +133,18 @@ int rte_thread_key_create(rte_thread_key *key, __rte_unused void (*destructor)(void *)) { + int ret; + *key = malloc(sizeof(**key)); if ((*key) == NULL) { RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n"); - rte_errno = ENOMEM; - return -1; + return ENOMEM; } (*key)->thread_index = TlsAlloc(); if ((*key)->thread_index == TLS_OUT_OF_INDEXES) { - RTE_LOG_WIN32_ERR("TlsAlloc()"); + ret = thread_log_last_error("TlsAlloc()"); free(*key); - rte_errno = ENOEXEC; - return -1; + return ret; } return 0; } @@ -104,16 +152,16 @@ rte_thread_key_create(rte_thread_key *key, int rte_thread_key_delete(rte_thread_key key) { - if (!key) { + int ret; + + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - rte_errno = EINVAL; - return -1; + return EINVAL; } if (!TlsFree(key->thread_index)) { - RTE_LOG_WIN32_ERR("TlsFree()"); + ret = thread_log_last_error("TlsFree()"); free(key); - rte_errno = ENOEXEC; - return -1; + return ret; } free(key); return 0; @@ -122,19 +170,17 @@ rte_thread_key_delete(rte_thread_key key) int rte_thread_value_set(rte_thread_key key, const void *value) { + int ret; char *p; - if (!key) { + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - rte_errno = EINVAL; - return -1; + return EINVAL; } /* discard const qualifier */ p = (char *) (uintptr_t) value; if (!TlsSetValue(key->thread_index, p)) { - RTE_LOG_WIN32_ERR("TlsSetValue()"); - rte_errno = ENOEXEC; - return -1; + return thread_log_last_error("TlsSetValue()"); } return 0; } @@ -143,16 +189,18 @@ void * rte_thread_value_get(rte_thread_key key) { void *output; + DWORD ret = 0; - if (!key) { + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); rte_errno = EINVAL; return NULL; } output = TlsGetValue(key->thread_index); - if (GetLastError() != ERROR_SUCCESS) { - RTE_LOG_WIN32_ERR("TlsGetValue()"); - rte_errno = ENOEXEC; + ret = GetLastError(); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "GetLastError()=%lu: TlsGetValue()\n", ret); + rte_errno = thread_translate_win32_error(ret); return NULL; } return output; From patchwork Tue Jun 1 20:55:46 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93733 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 00785A0524; Tue, 1 Jun 2021 22:56:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7CC5F410F7; Tue, 1 Jun 2021 22:56:09 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 617F540FDF for ; Tue, 1 Jun 2021 22:56:04 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id B613F20B800A; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com B613F20B800A DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=6W9rdvCriE0F6hGTQSnaAn6CohxTGOAOdL1aOx+uWT4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Xi7CFX8EuKRdLGULfhBgZ9XT1cPLvZfQFa229Y/+jjJ0P5sUVYz99xBO59RShcRnh G3B9n/gvc7EYugg3PNt4Nsragk4TC1c4TsohEcJ2SmLLrmE0OnPGXpOesVGeOk3Nnj R8V/KYBaqd0GhBIEN+8fmuQK1im+KGmQE2yCTHMo= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:46 -0700 Message-Id: <1622580952-25169-5-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 04/10] eal: implement functions for thread affinity management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Implement functions for getting/setting thread affinity. Threads can be pinned to specific cores by setting their affinity attribute. --- lib/eal/common/rte_thread.c | 14 +++ lib/eal/include/rte_thread.h | 36 ++++++++ lib/eal/windows/eal_lcore.c | 169 +++++++++++++++++++++++++--------- lib/eal/windows/eal_windows.h | 10 ++ lib/eal/windows/rte_thread.c | 127 ++++++++++++++++++++++++- 5 files changed, 310 insertions(+), 46 deletions(-) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index 4b1e8f995e..ceb27feaa7 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -34,6 +34,20 @@ rte_thread_equal(rte_thread_t t1, rte_thread_t t2) return pthread_equal(t1.opaque_id, t2.opaque_id); } +int +rte_thread_set_affinity_by_id(rte_thread_t thread_id, + const rte_cpuset_t *cpuset) +{ + return pthread_setaffinity_np(thread_id.opaque_id, sizeof(*cpuset), cpuset); +} + +int +rte_thread_get_affinity_by_id(rte_thread_t thread_id, + rte_cpuset_t *cpuset) +{ + return pthread_getaffinity_np(thread_id.opaque_id, sizeof(*cpuset), cpuset); +} + int rte_thread_attr_init(rte_thread_attr_t *attr) { diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index f3eeb28753..1f02962146 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -86,6 +86,42 @@ rte_thread_t rte_thread_self(void); __rte_experimental int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); +/** + * Set the affinity of thread 'thread_id' to the cpu set + * specified by 'cpuset'. + * + * @param thread_id + * Id of the thread for which to set the affinity. + * + * @param cpuset + * Pointer to CPU affinity to set. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_set_affinity_by_id(rte_thread_t thread_id, + const rte_cpuset_t *cpuset); + +/** + * Get the affinity of thread 'thread_id' and store it + * in 'cpuset'. + * + * @param thread_id + * Id of the thread for which to get the affinity. + * + * @param cpuset + * Pointer for storing the affinity value. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_get_affinity_by_id(rte_thread_t thread_id, + rte_cpuset_t *cpuset); + /** * Initialize the attributes of a thread. * These attributes can be passed to the rte_thread_create() function diff --git a/lib/eal/windows/eal_lcore.c b/lib/eal/windows/eal_lcore.c index 476c2d2bdf..519a62b96d 100644 --- a/lib/eal/windows/eal_lcore.c +++ b/lib/eal/windows/eal_lcore.c @@ -2,7 +2,6 @@ * Copyright(c) 2019 Intel Corporation */ -#include #include #include @@ -27,13 +26,15 @@ struct socket_map { }; struct cpu_map { - unsigned int socket_count; unsigned int lcore_count; + unsigned int socket_count; + unsigned int cpu_count; struct lcore_map lcores[RTE_MAX_LCORE]; struct socket_map sockets[RTE_MAX_NUMA_NODES]; + GROUP_AFFINITY cpus[CPU_SETSIZE]; }; -static struct cpu_map cpu_map = { 0 }; +static struct cpu_map cpu_map; /* eal_create_cpu_map() is called before logging is initialized */ static void @@ -47,13 +48,111 @@ log_early(const char *format, ...) va_end(va); } +static int +eal_query_group_affinity(void) +{ + SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos = NULL; + DWORD infos_size = 0; + int ret = 0; + + if (!GetLogicalProcessorInformationEx(RelationGroup, NULL, + &infos_size)) { + DWORD error = GetLastError(); + if (error != ERROR_INSUFFICIENT_BUFFER) { + log_early("Cannot get group information size, " + "error %lu\n", error); + rte_errno = EINVAL; + ret = -1; + goto cleanup; + } + } + + infos = malloc(infos_size); + if (infos == NULL) { + log_early("Cannot allocate memory for NUMA node information\n"); + rte_errno = ENOMEM; + ret = -1; + goto cleanup; + } + + if (!GetLogicalProcessorInformationEx(RelationGroup, infos, + &infos_size)) { + log_early("Cannot get group information, error %lu\n", + GetLastError()); + rte_errno = EINVAL; + ret = -1; + goto cleanup; + } + + cpu_map.cpu_count = 0; + USHORT group_count = infos->Group.ActiveGroupCount; + for (USHORT group_number = 0; group_number < group_count; group_number++) { + KAFFINITY affinity = infos->Group.GroupInfo[group_number].ActiveProcessorMask; + + for (unsigned int i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { + if ((affinity & ((KAFFINITY)1 << i)) == 0) + continue; + cpu_map.cpus[cpu_map.cpu_count].Group = group_number; + cpu_map.cpus[cpu_map.cpu_count].Mask = (KAFFINITY)1 << i; + cpu_map.cpu_count++; + } + } + +cleanup: + free(infos); + return ret; +} + +static bool +eal_create_lcore_map(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *info) +{ + const unsigned int node_id = info->NumaNode.NodeNumber; + const GROUP_AFFINITY *cores = &info->NumaNode.GroupMask; + struct lcore_map *lcore; + unsigned int socket_id; + + /* NUMA node may be reported multiple times if it includes + * cores from different processor groups, e. g. 80 cores + * of a physical processor comprise one NUMA node, but two + * processor groups, because group size is limited by 32/64. + */ + for (socket_id = 0; socket_id < cpu_map.socket_count; socket_id++) { + if (cpu_map.sockets[socket_id].node_id == node_id) + break; + } + + if (socket_id == cpu_map.socket_count) { + if (socket_id == RTE_DIM(cpu_map.sockets)) + return true; + + cpu_map.sockets[socket_id].node_id = node_id; + cpu_map.socket_count++; + } + + for (unsigned int i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { + if ((cores->Mask & ((KAFFINITY)1 << i)) == 0) + continue; + + if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores)) + return true; + + lcore = &cpu_map.lcores[cpu_map.lcore_count]; + lcore->socket_id = socket_id; + lcore->core_id = cores->Group * EAL_PROCESSOR_GROUP_SIZE + i; + cpu_map.lcore_count++; + } + return false; +} + int eal_create_cpu_map(void) { SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos, *info; DWORD infos_size; bool full = false; + int ret = 0; + infos = NULL; infos_size = 0; if (!GetLogicalProcessorInformationEx( RelationNumaNode, NULL, &infos_size)) { @@ -78,57 +177,29 @@ eal_create_cpu_map(void) log_early("Cannot get NUMA node information, error %lu\n", GetLastError()); rte_errno = EINVAL; - return -1; + ret = -1; + goto exit; } info = infos; while ((uint8_t *)info - (uint8_t *)infos < infos_size) { - unsigned int node_id = info->NumaNode.NodeNumber; - GROUP_AFFINITY *cores = &info->NumaNode.GroupMask; - struct lcore_map *lcore; - unsigned int i, socket_id; - - /* NUMA node may be reported multiple times if it includes - * cores from different processor groups, e. g. 80 cores - * of a physical processor comprise one NUMA node, but two - * processor groups, because group size is limited by 32/64. - */ - for (socket_id = 0; socket_id < cpu_map.socket_count; - socket_id++) { - if (cpu_map.sockets[socket_id].node_id == node_id) - break; - } - - if (socket_id == cpu_map.socket_count) { - if (socket_id == RTE_DIM(cpu_map.sockets)) { - full = true; - goto exit; - } - - cpu_map.sockets[socket_id].node_id = node_id; - cpu_map.socket_count++; - } - - for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { - if ((cores->Mask & ((KAFFINITY)1 << i)) == 0) - continue; - - if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores)) { - full = true; - goto exit; - } - - lcore = &cpu_map.lcores[cpu_map.lcore_count]; - lcore->socket_id = socket_id; - lcore->core_id = - cores->Group * EAL_PROCESSOR_GROUP_SIZE + i; - cpu_map.lcore_count++; + if (eal_create_lcore_map(info)) { + full = true; + break; } info = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)( (uint8_t *)info + info->Size); } + if (eal_query_group_affinity()) { + /* + * No need to set rte_errno here. + * It is set by eal_query_group_affinity(). + */ + ret = -1; + goto exit; + } exit: if (full) { /* Not a fatal error, but important for troubleshooting. */ @@ -138,7 +209,7 @@ eal_create_cpu_map(void) free(infos); - return 0; + return ret; } int @@ -164,3 +235,11 @@ eal_socket_numa_node(unsigned int socket_id) { return cpu_map.sockets[socket_id].node_id; } + +PGROUP_AFFINITY +eal_get_cpu_affinity(size_t cpu_index) +{ + RTE_VERIFY(cpu_index < CPU_SETSIZE); + + return &cpu_map.cpus[cpu_index]; +} diff --git a/lib/eal/windows/eal_windows.h b/lib/eal/windows/eal_windows.h index 478accc1b9..dc5dc8240a 100644 --- a/lib/eal/windows/eal_windows.h +++ b/lib/eal/windows/eal_windows.h @@ -55,6 +55,16 @@ int eal_thread_create(pthread_t *thread); */ unsigned int eal_socket_numa_node(unsigned int socket_id); +/** + * Get pointer to the group affinity for the cpu. + * + * @param cpu_index + * Index of the cpu, as it comes from rte_cpuset_t. + * @return + * Pointer to the group affinity for the cpu. + */ +PGROUP_AFFINITY eal_get_cpu_affinity(size_t cpu_index); + /** * Schedule code for execution in the interrupt thread. * diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 6ea1dc2a05..9e74a538c2 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -7,7 +7,8 @@ #include #include #include -#include + +#include "eal_windows.h" struct eal_tls_key { DWORD thread_index; @@ -77,6 +78,130 @@ rte_thread_equal(rte_thread_t t1, rte_thread_t t2) return t1.opaque_id == t2.opaque_id; } +static int +rte_convert_cpuset_to_affinity(const rte_cpuset_t *cpuset, + PGROUP_AFFINITY affinity) +{ + int ret = 0; + PGROUP_AFFINITY cpu_affinity = NULL; + + memset(affinity, 0, sizeof(GROUP_AFFINITY)); + affinity->Group = (USHORT)-1; + + /* Check that all cpus of the set belong to the same processor group and + * accumulate thread affinity to be applied. + */ + for (unsigned int cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { + if (!CPU_ISSET(cpu_idx, cpuset)) + continue; + + cpu_affinity = eal_get_cpu_affinity(cpu_idx); + + if (affinity->Group == (USHORT)-1) { + affinity->Group = cpu_affinity->Group; + } else if (affinity->Group != cpu_affinity->Group) { + ret = EINVAL; + goto cleanup; + } + + affinity->Mask |= cpu_affinity->Mask; + } + + if (affinity->Mask == 0) { + ret = EINVAL; + goto cleanup; + } + +cleanup: + return ret; +} + +int +rte_thread_set_affinity_by_id(rte_thread_t thread_id, + const rte_cpuset_t *cpuset) +{ + int ret = 0; + GROUP_AFFINITY thread_affinity; + HANDLE thread_handle = NULL; + + if (cpuset == NULL) { + ret = EINVAL; + goto cleanup; + } + + ret = rte_convert_cpuset_to_affinity(cpuset, &thread_affinity); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); + goto cleanup; + } + + thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) { + ret = thread_log_last_error("SetThreadGroupAffinity()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_get_affinity_by_id(rte_thread_t thread_id, + rte_cpuset_t *cpuset) +{ + HANDLE thread_handle = NULL; + PGROUP_AFFINITY cpu_affinity; + GROUP_AFFINITY thread_affinity; + int ret = 0; + + if (cpuset == NULL) { + ret = EINVAL; + goto cleanup; + } + + thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + /* obtain previous thread affinity */ + if (!GetThreadGroupAffinity(thread_handle, &thread_affinity)) { + ret = thread_log_last_error("GetThreadGroupAffinity()"); + goto cleanup; + } + + CPU_ZERO(cpuset); + + /* Convert affinity to DPDK cpu set */ + for (unsigned int cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { + + cpu_affinity = eal_get_cpu_affinity(cpu_idx); + + if ((cpu_affinity->Group == thread_affinity.Group) && + ((cpu_affinity->Mask & thread_affinity.Mask) != 0)) { + CPU_SET(cpu_idx, cpuset); + } + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + int rte_thread_attr_init(rte_thread_attr_t *attr) { From patchwork Tue Jun 1 20:55:47 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93734 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5BABEA0524; Tue, 1 Jun 2021 22:56:41 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A4E1C410FE; Tue, 1 Jun 2021 22:56:10 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 5D3B940E78 for ; Tue, 1 Jun 2021 22:56:04 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id C1C1320B7178; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com C1C1320B7178 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=PhKyHKifR6jXFpXDN7BgBz2g2ulkCUHevGivHG8xSuk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=WDi/fgIgEbnZoCueMB4xfki/19wkq3gW9KxGSb0sddfZPLpfrXEfOpcm/n+GjJEMF QUymXiwNv9fnKk1xZUgHpN24vGZ8LU7fkQi/reFQJQpOtJk7tctGKbW0tXDd6gVLVh tbNkpdeBiVWFG0HSXl3HWVqFnI6R7M+gNa2PMDBE= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:47 -0700 Message-Id: <1622580952-25169-6-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 05/10] eal: implement thread priority management functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Add function for setting the priority for a thread. Priorities on multiple platforms are similarly determined by a priority value and a priority class/policy. On Linux, the following mapping is created: RTE_THREAD_PRIORITY_NORMAL corresponds to * policy SCHED_OTHER * priority value: (sched_get_priority_min(SCHED_OTHER) + sched_get_priority_max(SCHED_OTHER))/2; RTE_THREAD_PRIORITY_REALTIME_CRITICAL corresponds to * policy SCHED_RR * priority value: sched_get_priority_max(SCHED_RR); On Windows, the following mapping is created: RTE_THREAD_PRIORITY_NORMAL corresponds to * class NORMAL_PRIORITY_CLASS * priority THREAD_PRIORITY_NORMAL RTE_THREAD_PRIORITY_REALTIME_CRITICAL corresponds to * class REALTIME_PRIORITY_CLASS * priority THREAD_PRIORITY_TIME_CRITICAL --- lib/eal/common/rte_thread.c | 51 ++++++++++ lib/eal/include/rte_thread.h | 17 ++++ lib/eal/include/rte_thread_types.h | 3 - .../include/rte_windows_thread_types.h | 3 - lib/eal/windows/rte_thread.c | 92 +++++++++++++++++++ 5 files changed, 160 insertions(+), 6 deletions(-) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index ceb27feaa7..5cee19bb7d 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -48,6 +48,57 @@ rte_thread_get_affinity_by_id(rte_thread_t thread_id, return pthread_getaffinity_np(thread_id.opaque_id, sizeof(*cpuset), cpuset); } +static int +thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri, int *pol) +{ + RTE_VERIFY(os_pri != NULL); + RTE_VERIFY(pol != NULL); + + /* Clear the output parameters */ + *os_pri = sched_get_priority_min(SCHED_OTHER) - 1; + *pol = -1; + + switch (eal_pri) + { + case RTE_THREAD_PRIORITY_NORMAL: + *pol = SCHED_OTHER; + + /* + * Choose the middle of the range to represent + * the priority 'normal'. + * On Linux, this should be 0, since both + * sched_get_priority_min/_max return 0 for SCHED_OTHER. + */ + *os_pri = (sched_get_priority_min(SCHED_OTHER) + + sched_get_priority_max(SCHED_OTHER))/2; + break; + case RTE_THREAD_PRIORITY_REALTIME_CRITICAL: + *pol = SCHED_RR; + *os_pri = sched_get_priority_max(SCHED_RR); + break; + default: + RTE_LOG(DEBUG, EAL, "The requested priority value is invalid.\n"); + return EINVAL; + } + return 0; +} + +int +rte_thread_set_priority(rte_thread_t thread_id, + enum rte_thread_priority priority) +{ + int ret; + int policy; + struct sched_param param; + + ret = thread_map_priority_to_os_value(priority, ¶m.sched_priority, &policy); + if (ret != 0) { + return ret; + } + + return pthread_setschedparam(thread_id.opaque_id, policy, ¶m); +} + int rte_thread_attr_init(rte_thread_attr_t *attr) { diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 1f02962146..5c54cd9d67 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -122,6 +122,23 @@ __rte_experimental int rte_thread_get_affinity_by_id(rte_thread_t thread_id, rte_cpuset_t *cpuset); +/** + * Set the priority of a thread. + * + * @param thread_id + * Id of the thread for which to set priority. + * + * @param priority + * Priority value to be set. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_set_priority(rte_thread_t thread_id, + enum rte_thread_priority priority); + /** * Initialize the attributes of a thread. * These attributes can be passed to the rte_thread_create() function diff --git a/lib/eal/include/rte_thread_types.h b/lib/eal/include/rte_thread_types.h index 996232c636..d67b24a563 100644 --- a/lib/eal/include/rte_thread_types.h +++ b/lib/eal/include/rte_thread_types.h @@ -7,7 +7,4 @@ #include -#define EAL_THREAD_PRIORITY_NORMAL 0 -#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL 99 - #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/include/rte_windows_thread_types.h b/lib/eal/windows/include/rte_windows_thread_types.h index 5bdeaad3d4..60e6d94553 100644 --- a/lib/eal/windows/include/rte_windows_thread_types.h +++ b/lib/eal/windows/include/rte_windows_thread_types.h @@ -7,7 +7,4 @@ #include -#define EAL_THREAD_PRIORITY_NORMAL THREAD_PRIORITY_NORMAL -#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL THREAD_PRIORITY_TIME_CRITICAL - #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 9e74a538c2..6dc3d575c0 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -202,6 +202,98 @@ rte_thread_get_affinity_by_id(rte_thread_t thread_id, return ret; } +static HANDLE +get_process_handle_from_thread_handle(HANDLE thread_handle) +{ + DWORD process_id = 0; + + process_id = GetProcessIdOfThread(thread_handle); + if (process_id == 0) { + RTE_LOG_WIN32_ERR("GetProcessIdOfThread()"); + return NULL; + } + + return OpenProcess(PROCESS_SET_INFORMATION, FALSE, process_id); +} + +static int +thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, + int *os_pri, int *pri_class) +{ + RTE_VERIFY(os_pri != NULL); + RTE_VERIFY(pri_class != NULL); + + /* Clear the output parameters */ + *os_pri = -1; + *pri_class = -1; + + switch (eal_pri) + { + case RTE_THREAD_PRIORITY_NORMAL: + *pri_class = NORMAL_PRIORITY_CLASS; + *os_pri = THREAD_PRIORITY_NORMAL; + break; + case RTE_THREAD_PRIORITY_REALTIME_CRITICAL: + *pri_class = REALTIME_PRIORITY_CLASS; + *os_pri = THREAD_PRIORITY_TIME_CRITICAL; + break; + default: + RTE_LOG(DEBUG, EAL, "The requested priority value is invalid.\n"); + return EINVAL; + } + + return 0; +} + +int +rte_thread_set_priority(rte_thread_t thread_id, + enum rte_thread_priority priority) +{ + HANDLE thread_handle = NULL; + HANDLE process_handle = NULL; + int priority_class = NORMAL_PRIORITY_CLASS; + int os_priority = THREAD_PRIORITY_NORMAL; + int ret = 0; + + thread_handle = OpenThread(THREAD_SET_INFORMATION | + THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + ret = thread_map_priority_to_os_value(priority, &os_priority, &priority_class); + if (ret != 0) { + return ret; + } + process_handle = get_process_handle_from_thread_handle(thread_handle); + if (process_handle == NULL) { + ret = thread_log_last_error("OpenProcess()"); + goto cleanup; + } + + if (!SetPriorityClass(process_handle, priority_class)) { + ret = thread_log_last_error("SetPriorityClass()"); + goto cleanup; + } + + if (!SetThreadPriority(thread_handle, os_priority)) { + ret = thread_log_last_error("SetThreadPriority()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + if (process_handle != NULL) { + CloseHandle(process_handle); + process_handle = NULL; + } + return ret; +} + int rte_thread_attr_init(rte_thread_attr_t *attr) { From patchwork Tue Jun 1 20:55:48 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93735 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 240A7A0524; Tue, 1 Jun 2021 22:56:50 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 22A754111D; Tue, 1 Jun 2021 22:56:12 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 13CD040E64 for ; Tue, 1 Jun 2021 22:56:05 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id CD7F320B800D; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com CD7F320B800D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=IKk/Prv7BfhaYSAyP5fyk++fi2QJsOs1718xLLKMEQI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ieQ/+K1UblbEQOjLuzEjyWLtibZ3yTfuOx2zFJQUaX/ASLah0oBgI0Fbqaux2lYTM noYEERl70JtdZzcaWTokjTH+40aLJorIyQ3cGByFCHyW4R6rddKhqcMn0JEm/qITAh kGbl4mji09oKsNRhTE6FqiZ+ReHHg70OvMKy9fZM= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:48 -0700 Message-Id: <1622580952-25169-7-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 06/10] eal: add thread lifetime management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Add function for thread creation, join, canceling. The *rte_thread_create()* function can optionally receive an rte_thread_attr_t object that will cause the thread to be created with the affinity and priority described by the attributes object. If no rte_thread_attr_t is passed (parameter is NULL), the default affinity and priority are used. On Windows, the function executed by a thread when the thread starts is represeneted by a function pointer of type DWORD (*func) (void*). On other platforms, the function pointer is a void* (*func) (void*). Performing a cast between these two types of function pointers to uniformize the API on all platforms may result in undefined behavior. TO fix this issue, a wrapper that respects the signature required by CreateThread() has been created on Windows. --- lib/eal/common/rte_thread.c | 110 +++++++++++++++++++++++++ lib/eal/include/rte_thread.h | 53 ++++++++++++ lib/eal/windows/rte_thread.c | 155 +++++++++++++++++++++++++++++++++++ 3 files changed, 318 insertions(+) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index 5cee19bb7d..2e06f16a69 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -149,6 +149,116 @@ rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, return 0; } +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + void *(*thread_func)(void *), void *args) +{ + int ret = 0; + pthread_attr_t attr; + pthread_attr_t *attrp = NULL; + struct sched_param param = { + .sched_priority = 0, + }; + int policy = SCHED_OTHER; + + if (thread_attr != NULL) { + ret = pthread_attr_init(&attr); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_init failed\n"); + goto cleanup; + } + + attrp = &attr; + + /* + * Set the inherit scheduler parameter to explicit, + * otherwise the priority attribute is ignored. + */ + ret = pthread_attr_setinheritsched(attrp, + PTHREAD_EXPLICIT_SCHED); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setinheritsched failed\n"); + goto cleanup; + } + + /* + * In case a realtime scheduling policy is requested, + * the sched_priority parameter is set to the value stored in + * thread_attr. Otherwise, for the default scheduling policy + * (SCHED_OTHER) sched_priority needs to be initialized to 0. + */ + if (thread_attr->priority == RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { + policy = SCHED_RR; + param.sched_priority = thread_attr->priority; + } + + ret = pthread_attr_setschedpolicy(attrp, policy); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedpolicy failed\n"); + goto cleanup; + } + + ret = pthread_attr_setschedparam(attrp, ¶m); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedparam failed\n"); + goto cleanup; + } + + ret = pthread_attr_setaffinity_np(attrp, + sizeof(thread_attr->cpuset), + &thread_attr->cpuset); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setaffinity_np failed\n"); + goto cleanup; + } + } + + ret = pthread_create(&thread_id->opaque_id, attrp, thread_func, args); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); + goto cleanup; + } + +cleanup: + if (attrp != NULL) + pthread_attr_destroy(&attr); + + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, int *value_ptr) +{ + int ret = 0; + void *res = NULL; + void **pres = NULL; + + if (value_ptr != NULL) + pres = &res; + + ret = pthread_join(thread_id.opaque_id, pres); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_join failed\n"); + return ret; + } + + if (pres != NULL) + *value_ptr = *(int *)(*pres); + + return 0; +} + +int rte_thread_cancel(rte_thread_t thread_id) +{ + /* + * TODO: Behavior is different between POSIX and Windows threads. + * POSIX threads wait for a cancellation point. + * Current Windows emulation kills thread at any point. + */ + return pthread_cancel(thread_id.opaque_id); +} + int rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *)) { diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 5c54cd9d67..2ff207f8bb 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -208,6 +208,59 @@ __rte_experimental int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, enum rte_thread_priority priority); +/** + * Create a new thread that will invoke the 'thread_func' routine. + * + * @param thread_id + * A pointer that will store the id of the newly created thread. + * + * @param thread_attr + * Attributes that are used at the creation of the new thread. + * + * @param thread_func + * The routine that the new thread will invoke when starting execution. + * + * @param args + * Arguments to be passed to the 'thread_func' routine. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + void *(*thread_func)(void *), void *args); + +/** + * Waits for the thread identified by 'thread_id' to terminate + * + * @param thread_id + * The identifier of the thread. + * + * @param value_ptr + * Stores the exit status of the thread. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_join(rte_thread_t thread_id, int *value_ptr); + +/** + * Terminates a thread. + * + * @param thread_id + * The id of the thread to be cancelled. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_cancel(rte_thread_t thread_id); + #ifdef RTE_HAS_CPUSET /** diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 6dc3d575c0..321b44caf6 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -14,6 +14,11 @@ struct eal_tls_key { DWORD thread_index; }; +struct thread_routine_ctx { + void* (*start_routine) (void*); + void *routine_args; +}; + /* Translates the most common error codes related to threads */ static int thread_translate_win32_error(DWORD error) @@ -346,6 +351,156 @@ rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, return 0; } +static DWORD +thread_func_wrapper(void *args) +{ + struct thread_routine_ctx *pctx = args; + intptr_t func_ret = 0; + struct thread_routine_ctx ctx = { NULL, NULL }; + + ctx.start_routine = pctx->start_routine; + ctx.routine_args = pctx->routine_args; + + free(pctx); + + func_ret = (intptr_t)ctx.start_routine(ctx.routine_args); + return (DWORD)func_ret; +} + +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + void *(*thread_func)(void *), void *args) +{ + int ret = 0; + DWORD tid = 0; + HANDLE thread_handle = NULL; + GROUP_AFFINITY thread_affinity; + struct thread_routine_ctx *ctx = NULL; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n"); + ret = ENOMEM; + goto cleanup; + } + ctx->routine_args = args; + ctx->start_routine = thread_func; + + thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx, + CREATE_SUSPENDED, &tid); + if (thread_handle == NULL) { + ret = thread_log_last_error("CreateThread()"); + goto cleanup; + } + thread_id->opaque_id = tid; + + if (thread_attr != NULL) { + if (CPU_COUNT(&thread_attr->cpuset) > 0) { + ret = rte_convert_cpuset_to_affinity(&thread_attr->cpuset, &thread_affinity); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); + goto cleanup; + } + + if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) { + ret = thread_log_last_error("SetThreadGroupAffinity()"); + goto cleanup; + } + } + ret = rte_thread_set_priority(*thread_id, thread_attr->priority); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n"); + goto cleanup; + } + } + + if (ResumeThread(thread_handle) == (DWORD)-1) { + ret = thread_log_last_error("ResumeThread()"); + goto cleanup; + } + + return 0; + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, int *value_ptr) +{ + HANDLE thread_handle = NULL; + DWORD result = 0; + DWORD exit_code = 0; + BOOL err = 0; + int ret = 0; + + thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, + FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + result = WaitForSingleObject(thread_handle, INFINITE); + if (result != WAIT_OBJECT_0) { + ret = thread_log_last_error("WaitForSingleObject()"); + goto cleanup; + } + + if (value_ptr != NULL) { + err = GetExitCodeThread(thread_handle, &exit_code); + if (err == 0) { + ret = thread_log_last_error("GetExitCodeThread()"); + goto cleanup; + } + *value_ptr = exit_code; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_cancel(rte_thread_t thread_id) +{ + int ret = 0; + HANDLE thread_handle = NULL; + + thread_handle = OpenThread(THREAD_TERMINATE, FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + /* + * TODO: Behavior is different between POSIX and Windows threads. + * POSIX threads wait for a cancellation point. + * Current Windows emulation kills thread at any point. + */ + ret = TerminateThread(thread_handle, 0); + if (ret != 0) { + ret = thread_log_last_error("TerminateThread()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + int rte_thread_key_create(rte_thread_key *key, __rte_unused void (*destructor)(void *)) From patchwork Tue Jun 1 20:55:49 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93736 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F9C0A0524; Tue, 1 Jun 2021 22:56:58 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6811241135; Tue, 1 Jun 2021 22:56:13 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 198C340E6E for ; Tue, 1 Jun 2021 22:56:05 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id D909D20B8010; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com D909D20B8010 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=e4JBdbYUKINTjp0M6dQGbeAJIsjHx4yhmejNT9j0mYw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=M6Wdre7dYvaxVr7WHD1S+4N5tP3bTsHWTeTajJ+cRK3hJ9DFenDEYH7GxltNkiNtU 1WhLtZDyFjit2fq/1EAtJMG7DzllORoxhZnPjGEVmtFoTS1ONV8fMkfMsX4SUmNZpq I94VzFnHilcDTOn3SxG79bHZ441cuCjRH2sHKi9g= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:49 -0700 Message-Id: <1622580952-25169-8-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 07/10] eal: implement functions for mutex management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Add functions for mutex init, destroy, lock, unlock. On Linux, static initialization of a mutex is possible through PTHREAD_MUTEX_INITIALIZER. Windows does not have a static initializer. Initialization is only done through InitializeCriticalSection(). To simulate static initialization, a fake initializator has been added: The rte_mutex_lock() function will verify if the mutex has been initialized using this fake initializer and if so, it will perform additional initialization. --- lib/eal/common/rte_thread.c | 24 ++++++ lib/eal/include/rte_thread.h | 53 ++++++++++++ lib/eal/include/rte_thread_types.h | 4 + .../include/rte_windows_thread_types.h | 9 ++ lib/eal/windows/rte_thread.c | 83 ++++++++++++++++++- 5 files changed, 172 insertions(+), 1 deletion(-) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index 2e06f16a69..e8e4af0451 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -249,6 +249,30 @@ rte_thread_join(rte_thread_t thread_id, int *value_ptr) return 0; } +int +rte_thread_mutex_init(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_init(mutex, NULL); +} + +int +rte_thread_mutex_lock(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_lock(mutex); +} + +int +rte_thread_mutex_unlock(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_unlock(mutex); +} + +int +rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_destroy(mutex); +} + int rte_thread_cancel(rte_thread_t thread_id) { /* diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 2ff207f8bb..2fca662616 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -248,6 +248,58 @@ int rte_thread_create(rte_thread_t *thread_id, __rte_experimental int rte_thread_join(rte_thread_t thread_id, int *value_ptr); +/** + * Initializes a mutex. + * + * @param mutex + * The mutex to be initialized. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_mutex_init(rte_thread_mutex_t *mutex); + +/** + * Locks a mutex. + * + * @param mutex + * The mutex to be locked. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_mutex_lock(rte_thread_mutex_t *mutex); + +/** + * Unlocks a mutex. + * + * @param mutex + * The mutex to be unlocked. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_mutex_unlock(rte_thread_mutex_t *mutex); + +/** + * Releases all resources associated with a mutex. + * + * @param mutex + * The mutex to be uninitialized. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_mutex_destroy(rte_thread_mutex_t *mutex); + /** * Terminates a thread. * @@ -269,6 +321,7 @@ int rte_thread_cancel(rte_thread_t thread_id); * * @param cpusetp * Pointer to CPU affinity to set. + * * @return * On success, return 0; otherwise return -1; */ diff --git a/lib/eal/include/rte_thread_types.h b/lib/eal/include/rte_thread_types.h index d67b24a563..7bb0d2948c 100644 --- a/lib/eal/include/rte_thread_types.h +++ b/lib/eal/include/rte_thread_types.h @@ -7,4 +7,8 @@ #include +#define RTE_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER + +typedef pthread_mutex_t rte_thread_mutex_t; + #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/include/rte_windows_thread_types.h b/lib/eal/windows/include/rte_windows_thread_types.h index 60e6d94553..c6c8502bfb 100644 --- a/lib/eal/windows/include/rte_windows_thread_types.h +++ b/lib/eal/windows/include/rte_windows_thread_types.h @@ -7,4 +7,13 @@ #include +#define WINDOWS_MUTEX_INITIALIZER (void*)-1 +#define RTE_THREAD_MUTEX_INITIALIZER {WINDOWS_MUTEX_INITIALIZER} + +struct thread_mutex_t { + void* mutex_id; +}; + +typedef struct thread_mutex_t rte_thread_mutex_t; + #endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 321b44caf6..f81876f4f2 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -470,6 +470,88 @@ rte_thread_join(rte_thread_t thread_id, int *value_ptr) return ret; } +int +rte_thread_mutex_init(rte_thread_mutex_t *mutex) +{ + int ret = 0; + CRITICAL_SECTION *m = NULL; + + RTE_VERIFY(mutex != NULL); + + m = calloc(1, sizeof(*m)); + if (m == NULL) { + RTE_LOG(DEBUG, EAL, "Unable to initialize mutex. Insufficient memory!\n"); + ret = ENOMEM; + goto cleanup; + } + + InitializeCriticalSection(m); + mutex->mutex_id = m; + m = NULL; + +cleanup: + return ret; +} + +int +rte_thread_mutex_lock(rte_thread_mutex_t *mutex) +{ + int ret = 0; + void* id = 0; + rte_thread_mutex_t m; + + RTE_VERIFY(mutex != NULL); + + /* Check if mutex has been statically initialized */ + id = InterlockedCompareExchangePointer(&mutex->mutex_id, mutex->mutex_id, WINDOWS_MUTEX_INITIALIZER); + /* If mutex has been statically initialized */ + if (id == WINDOWS_MUTEX_INITIALIZER) { + ret = rte_thread_mutex_init(&m); + if (ret != 0) { + return ret; + } + + id = InterlockedCompareExchangePointer(&mutex->mutex_id, m.mutex_id, WINDOWS_MUTEX_INITIALIZER); + /* If meanwhile the mutex was initialized by a different thread, + * destroy the local initialization. + */ + if (id != WINDOWS_MUTEX_INITIALIZER) { + rte_thread_mutex_destroy(&m); + } + } + + EnterCriticalSection(mutex->mutex_id); + + return 0; +} + +int +rte_thread_mutex_unlock(rte_thread_mutex_t *mutex) +{ + RTE_VERIFY(mutex != NULL); + + LeaveCriticalSection(mutex->mutex_id); + return 0; +} + +int +rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) +{ + RTE_VERIFY(mutex != NULL); + + if (mutex->mutex_id == WINDOWS_MUTEX_INITIALIZER) { + goto cleanup; + } + + DeleteCriticalSection(mutex->mutex_id); + free(mutex->mutex_id); + +cleanup: + mutex->mutex_id = NULL; + + return 0; +} + int rte_thread_cancel(rte_thread_t thread_id) { @@ -542,7 +624,6 @@ rte_thread_key_delete(rte_thread_key key) int rte_thread_value_set(rte_thread_key key, const void *value) { - int ret; char *p; if (key == NULL) { From patchwork Tue Jun 1 20:55:50 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93738 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2EE07A0524; Tue, 1 Jun 2021 22:57:11 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9111541140; Tue, 1 Jun 2021 22:56:15 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 279FA40FDF for ; Tue, 1 Jun 2021 22:56:05 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id E49BA20B8013; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com E49BA20B8013 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=B78+d0jaWOOf4w7Tx+dXoA1OK4F+EjrOe2dL863Q6O0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=biWmy++wUjI68uJDUK93snvGT2gbBYm4F2/E8lfwgxbMwKvPknG5l8PFjLxCIOzg4 b91Uv3K0dJzKfJAUKWB0xt/d4H83D8Q/oeSXXl2HijhkYbS0Avh8j6CAmQIDXITwzI 7bhqvgh7X/ZjVlKaAHuIH8CJ5h5VIWmVnUam4jtk= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:50 -0700 Message-Id: <1622580952-25169-9-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 08/10] eal: implement functions for thread barrier management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Add functions for barrier init, destroy, wait. A portable type is used to represent a barrier identifier. The rte_thread_barrier_wait() function returns the same value on all platforms. --- lib/eal/common/rte_thread.c | 61 ++++++++++++++++++++++++++++++++++++ lib/eal/include/rte_thread.h | 58 ++++++++++++++++++++++++++++++++++ lib/eal/windows/rte_thread.c | 56 +++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+) diff --git a/lib/eal/common/rte_thread.c b/lib/eal/common/rte_thread.c index e8e4af0451..7560585784 100644 --- a/lib/eal/common/rte_thread.c +++ b/lib/eal/common/rte_thread.c @@ -273,6 +273,67 @@ rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) return pthread_mutex_destroy(mutex); } +int +rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count) +{ + int ret = 0; + pthread_barrier_t *pthread_barrier = NULL; + + RTE_VERIFY(barrier != NULL); + RTE_VERIFY(count > 0); + + pthread_barrier = calloc(1, sizeof(*pthread_barrier)); + if (pthread_barrier == NULL) { + RTE_LOG(DEBUG, EAL, "Unable to initialize barrier. Insufficient memory!\n"); + ret = ENOMEM; + goto cleanup; + } + ret = pthread_barrier_init(pthread_barrier, NULL, count); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to initialize barrier, ret = %d\n", ret); + goto cleanup; + } + + barrier->barrier_id = pthread_barrier; + pthread_barrier = NULL; + +cleanup: + free(pthread_barrier); + return ret; +} + +int rte_thread_barrier_wait(rte_thread_barrier_t *barrier) +{ + int ret = 0; + + RTE_VERIFY(barrier != NULL); + RTE_VERIFY(barrier->barrier_id != NULL); + + ret = pthread_barrier_wait(barrier->barrier_id); + if (ret == PTHREAD_BARRIER_SERIAL_THREAD) { + ret = RTE_THREAD_BARRIER_SERIAL_THREAD; + } + + return ret; +} + +int rte_thread_barrier_destroy(rte_thread_barrier_t *barrier) +{ + int ret = 0; + + RTE_VERIFY(barrier != NULL); + + ret = pthread_barrier_destroy(barrier->barrier_id); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to destroy barrier, ret = %d\n", ret); + } + + free(barrier->barrier_id); + barrier->barrier_id = NULL; + + return ret; +} + int rte_thread_cancel(rte_thread_t thread_id) { /* diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 2fca662616..06b23571a1 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -29,6 +29,11 @@ extern "C" { #include #endif +/** + * Returned by rte_thread_barrier_wait() when call is successful. + */ +#define RTE_THREAD_BARRIER_SERIAL_THREAD -1 + /** * Thread id descriptor. */ @@ -56,6 +61,13 @@ typedef struct { rte_cpuset_t cpuset; /**< thread affinity */ } rte_thread_attr_t; +/** + * Thread barrier representation. + */ +typedef struct rte_thread_barrier_tag { + void* barrier_id; /**< barrrier identifier */ +} rte_thread_barrier_t; + /** * TLS key type, an opaque pointer. */ @@ -300,6 +312,52 @@ int rte_thread_mutex_unlock(rte_thread_mutex_t *mutex); __rte_experimental int rte_thread_mutex_destroy(rte_thread_mutex_t *mutex); +/** + * Initializes a synchronization barrier. + * + * @param barrier + * A pointer that references the newly created 'barrier' object. + * + * @param count + * The number of threads that must enter the barrier before + * the threads can continue execution. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count); + +/** + * Causes the calling thread to wait at the synchronization barrier 'barrier'. + * + * @param barrier + * The barrier used for synchronizing the threads. + * + * @return + * Return RTE_THREAD_BARRIER_SERIAL_THREAD for the thread synchronized + * at the barrier. + * Return 0 for all other threads. + * Return a positive errno-style error number, in case of failure. + */ +__rte_experimental +int rte_thread_barrier_wait(rte_thread_barrier_t *barrier); + +/** + * Releases all resources used by a synchronization barrier + * and uninitializes it. + * + * @param barrier + * The barrier to be destroyed. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_barrier_destroy(rte_thread_barrier_t *barrier); + /** * Terminates a thread. * diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index f81876f4f2..e1778b603e 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -552,6 +552,62 @@ rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) return 0; } +int +rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count) +{ + int ret = 0; + SYNCHRONIZATION_BARRIER *sync_barrier = NULL; + + RTE_VERIFY(barrier != NULL); + RTE_VERIFY(count > 0); + + sync_barrier = calloc(1, sizeof(*sync_barrier)); + if (sync_barrier == NULL) { + RTE_LOG(DEBUG, EAL, "Unable to initialize barrier. Insufficient memory!\n"); + ret = ENOMEM; + goto cleanup; + } + if (!InitializeSynchronizationBarrier(sync_barrier, count, -1)) { + ret = thread_log_last_error("InitializeSynchronizationBarrier()"); + goto cleanup; + } + + barrier->barrier_id = sync_barrier; + sync_barrier = NULL; + +cleanup: + free(sync_barrier); + return ret; +} + +int +rte_thread_barrier_wait(rte_thread_barrier_t *barrier) +{ + RTE_VERIFY(barrier != NULL); + RTE_VERIFY(barrier->barrier_id != NULL); + + if (EnterSynchronizationBarrier(barrier->barrier_id, + SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY)) { + + return RTE_THREAD_BARRIER_SERIAL_THREAD; + } + + return 0; +} + +int +rte_thread_barrier_destroy(rte_thread_barrier_t *barrier) +{ + RTE_VERIFY(barrier != NULL); + + DeleteSynchronizationBarrier(barrier->barrier_id); + + free(barrier->barrier_id); + barrier->barrier_id = NULL; + + return 0; +} + int rte_thread_cancel(rte_thread_t thread_id) { From patchwork Tue Jun 1 20:55:51 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93737 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8F902A0524; Tue, 1 Jun 2021 22:57:04 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 87E9C41139; Tue, 1 Jun 2021 22:56:14 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 278CB40E78 for ; Tue, 1 Jun 2021 22:56:05 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id EFEB920B8016; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com EFEB920B8016 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580963; bh=j63JMy33j21PYhAYkgTA71KdO4pTrZ0ChEgqLAk+NQk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BZCH+89Q2n1le8IesCP2mwFuFj7R/qtsA0whCJ9rYjhyY4QfEvif7QNmQdnAP9SBb cLnH2f47eR8NkeFY0ko6Nzsi3RuLtOucQ8RnXbWkMo0eYnGuvWRs2aevHSuad40y/y P1jOnG2avU37IPdxXvUixCvfCNTKsZ2iF8FOx1rI= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:51 -0700 Message-Id: <1622580952-25169-10-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v7 09/10] eal: add EAL argument for setting thread priority X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Allow the user to choose the thread priority through an EAL command line argument. The user can choose thread priority through an EAL parameter, when starting an application. If EAL parameter is not used, the per-platform default value for thread priority is used. Otherwise administrator has an option to set one of available options: --thread-prio normal --thread-prio realtime Example: ./dpdk-l2fwd -l 0-3 -n 4 –thread-prio normal -- -q 8 -p ffff --- lib/eal/common/eal_common_options.c | 28 +++++++++++++++++++++++++++- lib/eal/common/eal_internal_cfg.h | 2 ++ lib/eal/common/eal_options.h | 2 ++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c index 66f9114715..773cefdff7 100644 --- a/lib/eal/common/eal_common_options.c +++ b/lib/eal/common/eal_common_options.c @@ -107,6 +107,7 @@ eal_long_options[] = { {OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM }, {OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM }, {OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM}, + {OPT_THREAD_PRIORITY, 1, NULL, OPT_THREAD_PRIORITY_NUM}, /* legacy options that will be removed in future */ {OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM }, @@ -1406,6 +1407,24 @@ eal_parse_simd_bitwidth(const char *arg) return 0; } +static int +eal_parse_thread_priority(const char *arg) +{ + struct internal_config *internal_conf = + eal_get_internal_configuration(); + enum rte_thread_priority priority; + + if (!strncmp("normal", arg, sizeof("normal"))) + priority = RTE_THREAD_PRIORITY_NORMAL; + else if (!strncmp("realtime", arg, sizeof("realtime"))) + priority = RTE_THREAD_PRIORITY_REALTIME_CRITICAL; + else + return -1; + + internal_conf->thread_priority = priority; + return 0; +} + static int eal_parse_base_virtaddr(const char *arg) { @@ -1819,7 +1838,13 @@ eal_parse_common_option(int opt, const char *optarg, return -1; } break; - + case OPT_THREAD_PRIORITY_NUM: + if (eal_parse_thread_priority(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameter for --" + OPT_THREAD_PRIORITY "\n"); + return -1; + } + break; /* don't know what to do, leave this to caller */ default: return 1; @@ -2082,6 +2107,7 @@ eal_common_usage(void) " (can be used multiple times)\n" " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" + " --"OPT_THREAD_PRIORITY" Set threads priority (normal|realtime)\n" #ifndef RTE_EXEC_ENV_WINDOWS " --"OPT_SYSLOG" Set syslog facility\n" #endif diff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h index d6c0470eb8..b2996cd65b 100644 --- a/lib/eal/common/eal_internal_cfg.h +++ b/lib/eal/common/eal_internal_cfg.h @@ -94,6 +94,8 @@ struct internal_config { unsigned int no_telemetry; /**< true to disable Telemetry */ struct simd_bitwidth max_simd_bitwidth; /**< max simd bitwidth path to use */ + enum rte_thread_priority thread_priority; + /**< thread priority to configure */ }; void eal_reset_internal_config(struct internal_config *internal_cfg); diff --git a/lib/eal/common/eal_options.h b/lib/eal/common/eal_options.h index 7b348e707f..9f5b209f64 100644 --- a/lib/eal/common/eal_options.h +++ b/lib/eal/common/eal_options.h @@ -93,6 +93,8 @@ enum { OPT_NO_TELEMETRY_NUM, #define OPT_FORCE_MAX_SIMD_BITWIDTH "force-max-simd-bitwidth" OPT_FORCE_MAX_SIMD_BITWIDTH_NUM, +#define OPT_THREAD_PRIORITY "thread-prio" + OPT_THREAD_PRIORITY_NUM, /* legacy option that will be removed in future */ #define OPT_PCI_BLACKLIST "pci-blacklist" From patchwork Tue Jun 1 20:55:52 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Narcisa Ana Maria Vasile X-Patchwork-Id: 93739 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 27893A0524; Tue, 1 Jun 2021 22:57:18 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E70754114E; Tue, 1 Jun 2021 22:56:16 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 33764410DE for ; Tue, 1 Jun 2021 22:56:05 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1059) id 075D020B8019; Tue, 1 Jun 2021 13:56:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 075D020B8019 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1622580964; bh=myDjHBwMSN8CqWRAwM1phzfSO1x5xv1SX8+wTvKzAwA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Qiw+mRLdqKSN12Qk2B8pAH7aNTzWBvodXHNZpkVVIqcJEY/+7kWUWZPid/yUZYfyi BcTZ6ys7uEbZxL+ee+01bne3eQ10zR9o3ABvlj0zHEeK+d06oxcttRSe1w/is90uhW sSgR/jmn/JWt+DgBlY0Vm8L7D+gR8t38JqsIbbYU= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, talshn@nvidia.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Tue, 1 Jun 2021 13:55:52 -0700 Message-Id: <1622580952-25169-11-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> References: <1617413948-10504-1-git-send-email-navasile@linux.microsoft.com> <1622580952-25169-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH v7 10/10] Enable the new EAL thread API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile Rename pthread_* occurrences with the new rte_thread_* API. Enable the new API in the build system. --- app/test/process.h | 8 +- app/test/test_lcores.c | 16 +- app/test/test_link_bonding.c | 10 +- app/test/test_lpm_perf.c | 12 +- config/meson.build | 4 + drivers/bus/dpaa/base/qbman/bman_driver.c | 5 +- drivers/bus/dpaa/base/qbman/dpaa_sys.c | 14 +- drivers/bus/dpaa/base/qbman/process.c | 6 +- drivers/bus/dpaa/dpaa_bus.c | 14 +- drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 19 +- drivers/compress/mlx5/mlx5_compress.c | 10 +- drivers/event/dlb2/pf/base/dlb2_osdep.h | 4 +- drivers/net/af_xdp/rte_eth_af_xdp.c | 18 +- drivers/net/ark/ark_ethdev.c | 2 +- drivers/net/atlantic/atl_ethdev.c | 4 +- drivers/net/atlantic/atl_types.h | 5 +- .../net/atlantic/hw_atl/hw_atl_utils_fw2x.c | 26 +-- drivers/net/axgbe/axgbe_common.h | 2 +- drivers/net/axgbe/axgbe_dev.c | 8 +- drivers/net/axgbe/axgbe_ethdev.c | 8 +- drivers/net/axgbe/axgbe_ethdev.h | 8 +- drivers/net/axgbe/axgbe_i2c.c | 4 +- drivers/net/axgbe/axgbe_mdio.c | 8 +- drivers/net/axgbe/axgbe_phy_impl.c | 6 +- drivers/net/bnxt/bnxt.h | 16 +- drivers/net/bnxt/bnxt_cpr.c | 4 +- drivers/net/bnxt/bnxt_ethdev.c | 52 ++--- drivers/net/bnxt/bnxt_irq.c | 8 +- drivers/net/bnxt/bnxt_reps.c | 10 +- drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 34 ++-- drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 4 +- drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c | 24 +-- drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h | 2 +- drivers/net/ena/base/ena_plat_dpdk.h | 8 +- drivers/net/enic/enic.h | 2 +- drivers/net/ice/ice_dcf_parent.c | 4 +- drivers/net/ipn3ke/ipn3ke_representor.c | 6 +- drivers/net/ixgbe/ixgbe_ethdev.h | 2 +- drivers/net/kni/rte_eth_kni.c | 8 +- drivers/net/mlx5/linux/mlx5_os.c | 2 +- drivers/net/mlx5/mlx5.c | 20 +- drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_txpp.c | 8 +- drivers/net/mlx5/windows/mlx5_flow_os.c | 10 +- drivers/net/mlx5/windows/mlx5_os.c | 2 +- drivers/net/qede/base/bcm_osal.h | 8 +- drivers/net/vhost/rte_eth_vhost.c | 24 +-- .../net/virtio/virtio_user/virtio_user_dev.c | 30 +-- .../net/virtio/virtio_user/virtio_user_dev.h | 2 +- drivers/raw/ifpga/ifpga_rawdev.c | 6 +- drivers/vdpa/ifc/ifcvf_vdpa.c | 46 ++--- drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +-- drivers/vdpa/mlx5/mlx5_vdpa.h | 6 +- drivers/vdpa/mlx5/mlx5_vdpa_event.c | 73 +++---- examples/kni/main.c | 6 +- .../performance-thread/pthread_shim/main.c | 2 +- examples/vhost/main.c | 2 +- examples/vhost_blk/vhost_blk.c | 12 +- lib/eal/common/eal_common_options.c | 6 +- lib/eal/common/eal_common_proc.c | 48 ++--- lib/eal/common/eal_common_thread.c | 31 ++- lib/eal/common/eal_common_trace.c | 2 +- lib/eal/common/eal_private.h | 2 +- lib/eal/common/malloc_mp.c | 32 +-- lib/eal/common/meson.build | 1 + lib/eal/freebsd/eal.c | 40 +++- lib/eal/freebsd/eal_alarm.c | 12 +- lib/eal/freebsd/eal_interrupts.c | 4 +- lib/eal/freebsd/eal_thread.c | 14 +- lib/eal/include/meson.build | 1 + lib/eal/include/rte_lcore.h | 8 +- lib/eal/include/rte_per_lcore.h | 2 - lib/eal/linux/eal.c | 43 ++-- lib/eal/linux/eal_alarm.c | 10 +- lib/eal/linux/eal_interrupts.c | 4 +- lib/eal/linux/eal_thread.c | 18 +- lib/eal/linux/eal_timer.c | 2 +- lib/eal/unix/meson.build | 1 - lib/eal/unix/rte_thread.c | 92 --------- lib/eal/version.map | 21 ++ lib/eal/windows/eal.c | 40 +++- lib/eal/windows/eal_interrupts.c | 10 +- lib/eal/windows/eal_thread.c | 28 +-- lib/eal/windows/eal_windows.h | 10 - lib/eal/windows/include/meson.build | 1 + lib/eal/windows/include/pthread.h | 186 ------------------ lib/eal/windows/include/sched.h | 2 +- lib/eal/windows/meson.build | 7 +- lib/ethdev/rte_ethdev.c | 4 +- lib/ethdev/rte_ethdev_core.h | 5 +- lib/ethdev/rte_flow.c | 4 +- lib/eventdev/rte_event_eth_rx_adapter.c | 6 +- lib/vhost/fd_man.c | 40 ++-- lib/vhost/fd_man.h | 6 +- lib/vhost/socket.c | 130 ++++++------ lib/vhost/vhost.c | 10 +- meson_options.txt | 2 + 97 files changed, 683 insertions(+), 892 deletions(-) delete mode 100644 lib/eal/unix/rte_thread.c delete mode 100644 lib/eal/windows/include/pthread.h diff --git a/app/test/process.h b/app/test/process.h index a09a088477..9e4be17bad 100644 --- a/app/test/process.h +++ b/app/test/process.h @@ -26,7 +26,7 @@ #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING -#include +#include extern void *send_pkts(void *empty); extern uint16_t flag_for_send_pkts; #endif @@ -47,7 +47,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) char path[32]; #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING - pthread_t thread; + rte_thread_t thread; int rc; #endif #endif @@ -128,7 +128,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) #ifdef RTE_LIB_PDUMP #ifdef RTE_NET_RING if ((strcmp(env_value, "run_pdump_server_tests") == 0)) { - rc = pthread_create(&thread, NULL, &send_pkts, NULL); + rc = rte_thread_create(&thread, NULL, &send_pkts, NULL); if (rc != 0) { rte_panic("Cannot start send pkts thread: %s\n", strerror(rc)); @@ -143,7 +143,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value) #ifdef RTE_NET_RING if ((strcmp(env_value, "run_pdump_server_tests") == 0)) { flag_for_send_pkts = 0; - pthread_join(thread, NULL); + rte_thread_join(thread, NULL); } #endif #endif diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c index 19a7ab9fce..c507e423e0 100644 --- a/app/test/test_lcores.c +++ b/app/test/test_lcores.c @@ -14,7 +14,7 @@ struct thread_context { enum { INIT, ERROR, DONE } state; bool lcore_id_any; - pthread_t id; + rte_thread_t id; unsigned int *registered_count; }; @@ -77,7 +77,7 @@ test_non_eal_lcores(unsigned int eal_threads_count) t->state = INIT; t->registered_count = ®istered_count; t->lcore_id_any = false; - if (pthread_create(&t->id, NULL, thread_loop, t) != 0) + if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0) break; non_eal_threads_count++; } @@ -96,7 +96,7 @@ test_non_eal_lcores(unsigned int eal_threads_count) t->state = INIT; t->registered_count = ®istered_count; t->lcore_id_any = true; - if (pthread_create(&t->id, NULL, thread_loop, t) == 0) { + if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) { non_eal_threads_count++; printf("non-EAL threads count: %u\n", non_eal_threads_count); while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != @@ -110,7 +110,7 @@ test_non_eal_lcores(unsigned int eal_threads_count) ret = 0; for (i = 0; i < non_eal_threads_count; i++) { t = &thread_contexts[i]; - pthread_join(t->id, NULL); + rte_thread_join(t->id, NULL); if (t->state != DONE) ret = -1; } @@ -262,7 +262,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count) t->state = INIT; t->registered_count = ®istered_count; t->lcore_id_any = false; - if (pthread_create(&t->id, NULL, thread_loop, t) != 0) + if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0) goto cleanup_threads; non_eal_threads_count++; while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != @@ -285,7 +285,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count) t->state = INIT; t->registered_count = ®istered_count; t->lcore_id_any = true; - if (pthread_create(&t->id, NULL, thread_loop, t) != 0) + if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0) goto cleanup_threads; non_eal_threads_count++; while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != @@ -309,7 +309,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count) ret = 0; for (i = 0; i < non_eal_threads_count; i++) { t = &thread_contexts[i]; - pthread_join(t->id, NULL); + rte_thread_join(t->id, NULL); if (t->state != DONE) ret = -1; } @@ -330,7 +330,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count) __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE); for (i = 0; i < non_eal_threads_count; i++) { t = &thread_contexts[i]; - pthread_join(t->id, NULL); + rte_thread_join(t->id, NULL); } error: if (handle[1] != NULL) diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index 8a5c8310a8..3d9663f5ee 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -203,7 +203,7 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr) static int slaves_initialized; static int mac_slaves_initialized; -static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t mutex = RTE_THREAD_MUTEX_INITIALIZER; static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER; @@ -1191,11 +1191,11 @@ test_bonding_lsc_event_callback(uint16_t port_id __rte_unused, void *param __rte_unused, void *ret_param __rte_unused) { - pthread_mutex_lock(&mutex); + rte_thread_mutex_lock(&mutex); test_lsc_interrupt_count++; pthread_cond_signal(&cvar); - pthread_mutex_unlock(&mutex); + rte_thread_mutex_unlock(&mutex); return 0; } @@ -1220,11 +1220,11 @@ lsc_timeout(int wait_us) ts.tv_sec += 1; } - pthread_mutex_lock(&mutex); + rte_thread_mutex_lock(&mutex); if (test_lsc_interrupt_count < 1) retval = pthread_cond_timedwait(&cvar, &mutex, &ts); - pthread_mutex_unlock(&mutex); + rte_thread_mutex_unlock(&mutex); if (retval == 0 && test_lsc_interrupt_count < 1) return -1; diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c index 2bed00d064..ce79d9c17a 100644 --- a/app/test/test_lpm_perf.c +++ b/app/test/test_lpm_perf.c @@ -25,7 +25,7 @@ static volatile uint32_t thr_id; static uint64_t gwrite_cycles; static uint32_t num_writers; /* LPM APIs are not thread safe, use mutex to provide thread safety */ -static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t lpm_mutex = RTE_THREAD_MUTEX_INITIALIZER; /* Report quiescent state interval every 1024 lookups. Larger critical * sections in reader will result in writer polling multiple times. @@ -443,7 +443,7 @@ test_lpm_rcu_qsbr_writer(void *arg) /* Add all the entries */ for (j = si; j < ei; j++) { if (num_writers > 1) - pthread_mutex_lock(&lpm_mutex); + rte_thread_mutex_lock(&lpm_mutex); if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip, large_ldepth_route_table[j].depth, next_hop_add) != 0) { @@ -452,13 +452,13 @@ test_lpm_rcu_qsbr_writer(void *arg) goto error; } if (num_writers > 1) - pthread_mutex_unlock(&lpm_mutex); + rte_thread_mutex_unlock(&lpm_mutex); } /* Delete all the entries */ for (j = si; j < ei; j++) { if (num_writers > 1) - pthread_mutex_lock(&lpm_mutex); + rte_thread_mutex_lock(&lpm_mutex); if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip, large_ldepth_route_table[j].depth) != 0) { printf("Failed to delete iteration %d, route# %d\n", @@ -466,7 +466,7 @@ test_lpm_rcu_qsbr_writer(void *arg) goto error; } if (num_writers > 1) - pthread_mutex_unlock(&lpm_mutex); + rte_thread_mutex_unlock(&lpm_mutex); } } @@ -478,7 +478,7 @@ test_lpm_rcu_qsbr_writer(void *arg) error: if (num_writers > 1) - pthread_mutex_unlock(&lpm_mutex); + rte_thread_mutex_unlock(&lpm_mutex); return -1; } diff --git a/config/meson.build b/config/meson.build index 017bb2efbb..4070eb200d 100644 --- a/config/meson.build +++ b/config/meson.build @@ -262,6 +262,10 @@ else # for 32-bit we need smaller reserved memory areas dpdk_conf.set('RTE_MAX_MEM_MB', 2048) endif +if is_windows + dpdk_conf.set('RTE_USE_WINDOWS_THREAD_TYPES', not get_option('use_external_thread_lib')) +endif + compile_time_cpuflags = [] subdir(arch_subdir) diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c index ee35e03da1..c66a0946ca 100644 --- a/drivers/bus/dpaa/base/qbman/bman_driver.c +++ b/drivers/bus/dpaa/base/qbman/bman_driver.c @@ -38,11 +38,10 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared) struct dpaa_ioctl_irq_map irq_map; /* Verify the thread's cpu-affinity */ - ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), - &cpuset); + ret = rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset); if (ret) { errno = ret; - err(0, "pthread_getaffinity_np()"); + err(0, "rte_thread_get_affinity_by_id()"); return ret; } pcfg.cpu = -1; diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/drivers/bus/dpaa/base/qbman/dpaa_sys.c index 9d6bfd40a2..dc5f02bec1 100644 --- a/drivers/bus/dpaa/base/qbman/dpaa_sys.c +++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.c @@ -18,16 +18,16 @@ struct process_interrupt { }; static COMPAT_LIST_HEAD(process_irq_list); -static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t process_irq_lock = RTE_THREAD_MUTEX_INITIALIZER; static void process_interrupt_install(struct process_interrupt *irq) { int ret; /* Add the irq to the end of the list */ - ret = pthread_mutex_lock(&process_irq_lock); + ret = rte_thread_mutex_lock(&process_irq_lock); assert(!ret); list_add_tail(&irq->node, &process_irq_list); - ret = pthread_mutex_unlock(&process_irq_lock); + ret = rte_thread_mutex_unlock(&process_irq_lock); assert(!ret); } @@ -35,10 +35,10 @@ static void process_interrupt_remove(struct process_interrupt *irq) { int ret; - ret = pthread_mutex_lock(&process_irq_lock); + ret = rte_thread_mutex_lock(&process_irq_lock); assert(!ret); list_del(&irq->node); - ret = pthread_mutex_unlock(&process_irq_lock); + ret = rte_thread_mutex_unlock(&process_irq_lock); assert(!ret); } @@ -47,14 +47,14 @@ static struct process_interrupt *process_interrupt_find(int irq_num) int ret; struct process_interrupt *i = NULL; - ret = pthread_mutex_lock(&process_irq_lock); + ret = rte_thread_mutex_lock(&process_irq_lock); assert(!ret); list_for_each_entry(i, &process_irq_list, node) { if (i->irq == irq_num) goto done; } done: - ret = pthread_mutex_unlock(&process_irq_lock); + ret = rte_thread_mutex_unlock(&process_irq_lock); assert(!ret); return i; } diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c index 9bc92681cd..6d2d10cdf0 100644 --- a/drivers/bus/dpaa/base/qbman/process.c +++ b/drivers/bus/dpaa/base/qbman/process.c @@ -21,7 +21,7 @@ * what the lock is for. */ static int fd = -1; -static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t fd_init_lock = RTE_THREAD_MUTEX_INITIALIZER; static int check_fd(void) { @@ -29,12 +29,12 @@ static int check_fd(void) if (fd >= 0) return 0; - ret = pthread_mutex_lock(&fd_init_lock); + ret = rte_thread_mutex_lock(&fd_init_lock); assert(!ret); /* check again with the lock held */ if (fd < 0) fd = open(PROCESS_PATH, O_RDWR); - ret = pthread_mutex_unlock(&fd_init_lock); + ret = rte_thread_mutex_unlock(&fd_init_lock); assert(!ret); return (fd >= 0) ? 0 : -ENODEV; } diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c index 173041c026..baa1c7c6dc 100644 --- a/drivers/bus/dpaa/dpaa_bus.c +++ b/drivers/bus/dpaa/dpaa_bus.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -48,7 +48,7 @@ static struct rte_dpaa_bus rte_dpaa_bus; struct netcfg_info *dpaa_netcfg; /* define a variable to hold the portal_key, once created.*/ -static pthread_key_t dpaa_portal_key; +static rte_thread_key dpaa_portal_key; unsigned int dpaa_svr_family; @@ -316,10 +316,10 @@ int rte_dpaa_portal_init(void *arg) DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index(); DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid); - ret = pthread_setspecific(dpaa_portal_key, + ret = rte_thread_value_set(dpaa_portal_key, (void *)DPAA_PER_LCORE_PORTAL); if (ret) { - DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u" + DPAA_BUS_LOG(ERR, "rte_thread_value_set failed on core %u" " (lcore=%u) with ret: %d", cpu, lcore, ret); dpaa_portal_finish(NULL); @@ -377,7 +377,7 @@ dpaa_portal_finish(void *arg) bman_thread_finish(); qman_thread_finish(); - pthread_setspecific(dpaa_portal_key, NULL); + rte_thread_value_set(dpaa_portal_key, NULL); rte_free(dpaa_io_portal); dpaa_io_portal = NULL; @@ -453,9 +453,9 @@ rte_dpaa_bus_scan(void) /* create the key, supplying a function that'll be invoked * when a portal affined thread will be deleted. */ - ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish); + ret = rte_thread_key_create(&dpaa_portal_key, dpaa_portal_finish); if (ret) { - DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret); + DPAA_BUS_LOG(DEBUG, "Unable to create thread key. (%d)", ret); dpaa_clean_device_list(); return ret; } diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c index c6f8312a1d..dffbdff666 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -59,7 +59,7 @@ uint8_t dpaa2_dqrr_size; uint8_t dpaa2_eqcr_size; /* Variable to hold the portal_key, once created.*/ -static pthread_key_t dpaa2_portal_key; +static rte_thread_key dpaa2_portal_key; /*Stashing Macros default for LS208x*/ static int dpaa2_core_cluster_base = 0x04; @@ -92,10 +92,9 @@ dpaa2_get_core_id(void) rte_cpuset_t cpuset; int i, ret, cpu_id = -1; - ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), - &cpuset); + ret = rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset); if (ret) { - DPAA2_BUS_ERR("pthread_getaffinity_np() failed"); + DPAA2_BUS_ERR("rte_thread_get_affinity_by_id() failed"); return ret; } @@ -296,9 +295,9 @@ static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void) } } - ret = pthread_setspecific(dpaa2_portal_key, (void *)dpio_dev); + ret = rte_thread_value_set(dpaa2_portal_key, (void *)dpio_dev); if (ret) { - DPAA2_BUS_ERR("pthread_setspecific failed with ret: %d", ret); + DPAA2_BUS_ERR("rte_thread_value_set failed with ret: %d", ret); dpaa2_put_qbman_swp(dpio_dev); return NULL; } @@ -357,7 +356,7 @@ static void dpaa2_portal_finish(void *arg) dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).dpio_dev); dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev); - pthread_setspecific(dpaa2_portal_key, NULL); + rte_thread_value_set(dpaa2_portal_key, NULL); } static int @@ -515,10 +514,10 @@ dpaa2_create_dpio_device(int vdev_fd, /* create the key, supplying a function that'll be invoked * when a portal affined thread will be deleted. */ - ret = pthread_key_create(&dpaa2_portal_key, + ret = rte_thread_key_create(&dpaa2_portal_key, dpaa2_portal_finish); if (ret) { - DPAA2_BUS_DEBUG("Unable to create pthread key (%d)", + DPAA2_BUS_DEBUG("Unable to create thread key (%d)", ret); goto err; } diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c index ec3c237512..d2bda960f8 100644 --- a/drivers/compress/mlx5/mlx5_compress.c +++ b/drivers/compress/mlx5/mlx5_compress.c @@ -73,7 +73,7 @@ struct mlx5_compress_qp { TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list = TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list); -static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER; int mlx5_compress_logtype; @@ -833,9 +833,9 @@ mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv, } priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr; priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr; - pthread_mutex_lock(&priv_list_lock); + rte_thread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next); - pthread_mutex_unlock(&priv_list_lock); + rte_thread_mutex_unlock(&priv_list_lock); return 0; } @@ -855,13 +855,13 @@ mlx5_compress_pci_remove(struct rte_pci_device *pdev) { struct mlx5_compress_priv *priv = NULL; - pthread_mutex_lock(&priv_list_lock); + rte_thread_mutex_lock(&priv_list_lock); TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next) if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0) break; if (priv) TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next); - pthread_mutex_unlock(&priv_list_lock); + rte_thread_mutex_unlock(&priv_list_lock); if (priv) { mlx5_mr_release_cache(&priv->mr_scache); mlx5_compress_hw_global_release(priv); diff --git a/drivers/event/dlb2/pf/base/dlb2_osdep.h b/drivers/event/dlb2/pf/base/dlb2_osdep.h index aa101a49a3..98ff0a26fc 100644 --- a/drivers/event/dlb2/pf/base/dlb2_osdep.h +++ b/drivers/event/dlb2/pf/base/dlb2_osdep.h @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include @@ -192,7 +192,7 @@ static void *dlb2_complete_queue_map_unmap(void *__args) static inline void os_schedule_work(struct dlb2_hw *hw) { struct dlb2_dev *dlb2_dev; - pthread_t complete_queue_map_unmap_thread; + rte_thread_t complete_queue_map_unmap_thread; int ret; dlb2_dev = container_of(hw, struct dlb2_dev, hw); diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 0c91a40c4a..20b51e15dd 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -178,7 +178,7 @@ TAILQ_HEAD(internal_list_head, internal_list); static struct internal_list_head internal_list = TAILQ_HEAD_INITIALIZER(internal_list); -static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER; #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) static inline int @@ -666,7 +666,7 @@ find_internal_resource(struct pmd_internals *port_int) if (port_int == NULL) return NULL; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { struct pmd_internals *list_int = @@ -677,7 +677,7 @@ find_internal_resource(struct pmd_internals *port_int) } } - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); if (!found) return NULL; @@ -715,7 +715,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, if (mb_pool == NULL) return ret; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { internals = list->eth_dev->data->dev_private; @@ -741,7 +741,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, } out: - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); return ret; } @@ -770,9 +770,9 @@ eth_dev_configure(struct rte_eth_dev *dev) return -1; list->eth_dev = dev; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_INSERT_TAIL(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); } return 0; @@ -938,9 +938,9 @@ eth_dev_close(struct rte_eth_dev *dev) /* Remove ethdev from list used to track and share UMEMs */ list = find_internal_resource(internals); if (list) { - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_REMOVE(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_free(list); } } diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c index 6372cf7740..0e4a7d0851 100644 --- a/drivers/net/ark/ark_ethdev.c +++ b/drivers/net/ark/ark_ethdev.c @@ -563,7 +563,7 @@ eth_ark_dev_start(struct rte_eth_dev *dev) ark_pktchkr_run(ark->pc); if (ark->start_pg && (dev->data->port_id == 0)) { - pthread_t thread; + rte_thread_t thread; /* Delay packet generatpr start allow the hardware to be ready * This is only used for sanity checking with internal generator diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c index 473f6209f6..96b7da3ac7 100644 --- a/drivers/net/atlantic/atl_ethdev.c +++ b/drivers/net/atlantic/atl_ethdev.c @@ -405,7 +405,7 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev) hw->aq_nic_cfg = &adapter->hw_cfg; - pthread_mutex_init(&hw->mbox_mutex, NULL); + rte_thread_mutex_init(&hw->mbox_mutex); /* disable interrupt */ atl_disable_intr(hw); @@ -712,7 +712,7 @@ atl_dev_close(struct rte_eth_dev *dev) rte_intr_callback_unregister(intr_handle, atl_dev_interrupt_handler, dev); - pthread_mutex_destroy(&hw->mbox_mutex); + rte_thread_mutex_destroy(&hw->mbox_mutex); return ret; } diff --git a/drivers/net/atlantic/atl_types.h b/drivers/net/atlantic/atl_types.h index e813d9f326..93e41aa26a 100644 --- a/drivers/net/atlantic/atl_types.h +++ b/drivers/net/atlantic/atl_types.h @@ -10,7 +10,8 @@ #include #include #include -#include +#include +#include #include @@ -141,7 +142,7 @@ struct aq_hw_s { u32 rpc_tid; struct hw_aq_atl_utils_fw_rpc rpc; - pthread_mutex_t mbox_mutex; + rte_thread_mutex_t mbox_mutex; }; struct aq_fw_ops { diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c index 3a7faf405c..15b80584f8 100644 --- a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include "../atl_hw_regs.h" #include "../atl_types.h" @@ -218,7 +218,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) u32 mac_addr[2] = { 0 }; u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR); - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); if (efuse_addr != 0) { err = hw_atl_utils_fw_downld_dwords(self, @@ -257,7 +257,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) } exit: - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); return err; } @@ -269,7 +269,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS); - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); /* Toggle statistics bit for FW to update */ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS); @@ -286,7 +286,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) err = hw_atl_utils_update_stats(self); exit: - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); return err; @@ -299,7 +299,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp) u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE); u32 temp_res; - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); /* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */ mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE); @@ -317,7 +317,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp) sizeof(temp_res) / sizeof(u32)); - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); if (err) return err; @@ -536,7 +536,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr, if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0) return -EOPNOTSUPP; - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); request.msg_id = 0; request.device_id = dev_addr; @@ -605,7 +605,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr, } exit: - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); return err; } @@ -626,7 +626,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr, request.address = offset; request.length = len; - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); /* Write SMBUS request to cfg memory */ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, @@ -694,7 +694,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr, } exit: - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); return err; } @@ -712,7 +712,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self, if ((self->caps_lo & BIT(CAPS_LO_MACSEC)) == 0) return -EOPNOTSUPP; - pthread_mutex_lock(&self->mbox_mutex); + rte_thread_mutex_lock(&self->mbox_mutex); /* Write macsec request to cfg memory */ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, @@ -742,7 +742,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self, RTE_ALIGN(sizeof(*response) / sizeof(u32), sizeof(u32))); exit: - pthread_mutex_unlock(&self->mbox_mutex); + rte_thread_mutex_unlock(&self->mbox_mutex); return err; } diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h index df0aa21a9b..a7b892b806 100644 --- a/drivers/net/axgbe/axgbe_common.h +++ b/drivers/net/axgbe/axgbe_common.h @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c index 786288a7b0..197c722901 100644 --- a/drivers/net/axgbe/axgbe_dev.c +++ b/drivers/net/axgbe/axgbe_dev.c @@ -167,12 +167,12 @@ static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata, index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); - pthread_mutex_lock(&pdata->xpcs_mutex); + rte_thread_mutex_lock(&pdata->xpcs_mutex); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); mmd_data = XPCS16_IOREAD(pdata, offset); - pthread_mutex_unlock(&pdata->xpcs_mutex); + rte_thread_mutex_unlock(&pdata->xpcs_mutex); return mmd_data; } @@ -201,12 +201,12 @@ static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata, index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); - pthread_mutex_lock(&pdata->xpcs_mutex); + rte_thread_mutex_lock(&pdata->xpcs_mutex); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); XPCS16_IOWRITE(pdata, offset, mmd_data); - pthread_mutex_unlock(&pdata->xpcs_mutex); + rte_thread_mutex_unlock(&pdata->xpcs_mutex); } static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad, diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c index ebe9a2876d..e6f252e094 100644 --- a/drivers/net/axgbe/axgbe_ethdev.c +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -2311,10 +2311,10 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) pdata->tx_desc_count = AXGBE_MAX_RING_DESC; pdata->rx_desc_count = AXGBE_MAX_RING_DESC; - pthread_mutex_init(&pdata->xpcs_mutex, NULL); - pthread_mutex_init(&pdata->i2c_mutex, NULL); - pthread_mutex_init(&pdata->an_mutex, NULL); - pthread_mutex_init(&pdata->phy_mutex, NULL); + rte_thread_mutex_init(&pdata->xpcs_mutex); + rte_thread_mutex_init(&pdata->i2c_mutex); + rte_thread_mutex_init(&pdata->an_mutex); + rte_thread_mutex_init(&pdata->phy_mutex); ret = pdata->phy_if.phy_init(pdata); if (ret) { diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h index a6226729fe..cfd436fa63 100644 --- a/drivers/net/axgbe/axgbe_ethdev.h +++ b/drivers/net/axgbe/axgbe_ethdev.h @@ -602,10 +602,10 @@ struct axgbe_port { int phy_link; int phy_speed; - pthread_mutex_t xpcs_mutex; - pthread_mutex_t i2c_mutex; - pthread_mutex_t an_mutex; - pthread_mutex_t phy_mutex; + rte_thread_mutex_t xpcs_mutex; + rte_thread_mutex_t i2c_mutex; + rte_thread_mutex_t an_mutex; + rte_thread_mutex_t phy_mutex; /* Flow control settings */ unsigned int pause_autoneg; diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c index ab3738a12e..c17f9a8b9e 100644 --- a/drivers/net/axgbe/axgbe_i2c.c +++ b/drivers/net/axgbe/axgbe_i2c.c @@ -229,7 +229,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op) int ret; uint64_t timeout; - pthread_mutex_lock(&pdata->i2c_mutex); + rte_thread_mutex_lock(&pdata->i2c_mutex); ret = axgbe_i2c_disable(pdata); if (ret) { PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); @@ -282,7 +282,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op) } unlock: - pthread_mutex_unlock(&pdata->i2c_mutex); + rte_thread_mutex_unlock(&pdata->i2c_mutex); return ret; } diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c index 4f98e695ae..5f3d9b360e 100644 --- a/drivers/net/axgbe/axgbe_mdio.c +++ b/drivers/net/axgbe/axgbe_mdio.c @@ -686,9 +686,9 @@ static void axgbe_an73_isr(struct axgbe_port *pdata) if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); - pthread_mutex_lock(&pdata->an_mutex); + rte_thread_mutex_lock(&pdata->an_mutex); axgbe_an73_state_machine(pdata); - pthread_mutex_unlock(&pdata->an_mutex); + rte_thread_mutex_unlock(&pdata->an_mutex); } else { /* Enable AN interrupts */ axgbe_an73_enable_interrupts(pdata); @@ -977,7 +977,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata) { int ret; - pthread_mutex_lock(&pdata->an_mutex); + rte_thread_mutex_lock(&pdata->an_mutex); ret = __axgbe_phy_config_aneg(pdata); if (ret) @@ -985,7 +985,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata) else rte_bit_relaxed_clear32(AXGBE_LINK_ERR, &pdata->dev_state); - pthread_mutex_unlock(&pdata->an_mutex); + rte_thread_mutex_unlock(&pdata->an_mutex); return ret; } diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c index 02236ec192..1e1d6358d8 100644 --- a/drivers/net/axgbe/axgbe_phy_impl.c +++ b/drivers/net/axgbe/axgbe_phy_impl.c @@ -403,7 +403,7 @@ static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata) phy_data->comm_owned = 0; - pthread_mutex_unlock(&pdata->phy_mutex); + rte_thread_mutex_unlock(&pdata->phy_mutex); } static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) @@ -416,7 +416,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) * the driver needs to take the software mutex and then the hardware * mutexes before being able to use the busses. */ - pthread_mutex_lock(&pdata->phy_mutex); + rte_thread_mutex_lock(&pdata->phy_mutex); if (phy_data->comm_owned) return 0; @@ -447,7 +447,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) return 0; } - pthread_mutex_unlock(&pdata->phy_mutex); + rte_thread_mutex_unlock(&pdata->phy_mutex); PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n"); diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index dfdfa9f7a0..b6a74753c5 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -533,8 +533,8 @@ struct bnxt_mark_info { struct bnxt_rep_info { struct rte_eth_dev *vfr_eth_dev; - pthread_mutex_t vfr_lock; - pthread_mutex_t vfr_start_lock; + rte_thread_mutex_t vfr_lock; + rte_thread_mutex_t vfr_start_lock; bool conduit_valid; }; @@ -693,7 +693,7 @@ struct bnxt { #define BNXT_FW_CAP_ADV_FLOW_COUNTERS BIT(6) #define BNXT_FW_CAP_LINK_ADMIN BIT(7) - pthread_mutex_t flow_lock; + rte_thread_mutex_t flow_lock; uint32_t vnic_cap_flags; #define BNXT_VNIC_CAP_COS_CLASSIFY BIT(0) @@ -747,18 +747,18 @@ struct bnxt { rte_iova_t hwrm_short_cmd_req_dma_addr; rte_spinlock_t hwrm_lock; /* synchronize between dev_configure_op and int handler */ - pthread_mutex_t def_cp_lock; + rte_thread_mutex_t def_cp_lock; /* synchronize between dev_start_op and async evt handler * Locking sequence in async evt handler will be * def_cp_lock * health_check_lock */ - pthread_mutex_t health_check_lock; + rte_thread_mutex_t health_check_lock; /* synchronize between dev_stop/dev_close_op and * error recovery thread triggered as part of * HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY */ - pthread_mutex_t err_recovery_lock; + rte_thread_mutex_t err_recovery_lock; uint16_t max_req_len; uint16_t max_resp_len; uint16_t hwrm_max_ext_req_len; @@ -957,10 +957,10 @@ uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, extern const struct rte_flow_ops bnxt_flow_ops; #define bnxt_acquire_flow_lock(bp) \ - pthread_mutex_lock(&(bp)->flow_lock) + rte_thread_mutex_lock(&(bp)->flow_lock) #define bnxt_release_flow_lock(bp) \ - pthread_mutex_unlock(&(bp)->flow_lock) + rte_thread_mutex_unlock(&(bp)->flow_lock) #define BNXT_VALID_VNIC_OR_RET(bp, vnic_id) do { \ if ((vnic_id) >= (bp)->max_vnics) { \ diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 2c7fd78c3d..a50511047c 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -135,7 +135,7 @@ void bnxt_handle_async_event(struct bnxt *bp, return; } - pthread_mutex_lock(&bp->err_recovery_lock); + rte_thread_mutex_lock(&bp->err_recovery_lock); event_data = rte_le_to_cpu_32(async_cmp->event_data1); /* timestamp_lo/hi values are in units of 100ms */ bp->fw_reset_max_msecs = async_cmp->timestamp_hi ? @@ -157,7 +157,7 @@ void bnxt_handle_async_event(struct bnxt *bp, } bp->flags |= BNXT_FLAG_FW_RESET; - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, (void *)bp); break; diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index f5d2dc8590..5bb63756aa 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1049,7 +1049,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) * are calculated correctly. */ - pthread_mutex_lock(&bp->def_cp_lock); + rte_thread_mutex_lock(&bp->def_cp_lock); if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { bnxt_disable_int(bp); @@ -1059,20 +1059,20 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); if (rc) { PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); return -ENOSPC; } if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { rc = bnxt_alloc_async_cp_ring(bp); if (rc) { - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); return rc; } bnxt_enable_int(bp); } - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); } /* Inherit new configurations */ @@ -1456,14 +1456,14 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; - pthread_mutex_lock(&bp->err_recovery_lock); + rte_thread_mutex_lock(&bp->err_recovery_lock); if (bp->flags & BNXT_FLAG_FW_RESET) { PMD_DRV_LOG(ERR, "Adapter recovering from error..Please retry\n"); - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); return -EAGAIN; } - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); return bnxt_dev_stop(eth_dev); } @@ -1545,13 +1545,13 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) static void bnxt_uninit_locks(struct bnxt *bp) { - pthread_mutex_destroy(&bp->flow_lock); - pthread_mutex_destroy(&bp->def_cp_lock); - pthread_mutex_destroy(&bp->health_check_lock); - pthread_mutex_destroy(&bp->err_recovery_lock); + rte_thread_mutex_destroy(&bp->flow_lock); + rte_thread_mutex_destroy(&bp->def_cp_lock); + rte_thread_mutex_destroy(&bp->health_check_lock); + rte_thread_mutex_destroy(&bp->err_recovery_lock); if (bp->rep_info) { - pthread_mutex_destroy(&bp->rep_info->vfr_lock); - pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); + rte_thread_mutex_destroy(&bp->rep_info->vfr_lock); + rte_thread_mutex_destroy(&bp->rep_info->vfr_start_lock); } } @@ -1583,14 +1583,14 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pthread_mutex_lock(&bp->err_recovery_lock); + rte_thread_mutex_lock(&bp->err_recovery_lock); if (bp->flags & BNXT_FLAG_FW_RESET) { PMD_DRV_LOG(ERR, "Adapter recovering from error...Please retry\n"); - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); return -EAGAIN; } - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); /* cancel the recovery handler before remove dev */ rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); @@ -4076,7 +4076,7 @@ static void bnxt_dev_recover(void *arg) goto err_start; PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); return; err_start: @@ -4084,7 +4084,7 @@ static void bnxt_dev_recover(void *arg) err: bp->flags |= BNXT_FLAG_FATAL_ERROR; bnxt_uninit_resources(bp, false); - pthread_mutex_unlock(&bp->err_recovery_lock); + rte_thread_mutex_unlock(&bp->err_recovery_lock); PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); } @@ -4254,7 +4254,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp) { uint32_t polling_freq; - pthread_mutex_lock(&bp->health_check_lock); + rte_thread_mutex_lock(&bp->health_check_lock); if (!bnxt_is_recovery_enabled(bp)) goto done; @@ -4269,7 +4269,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp) bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; done: - pthread_mutex_unlock(&bp->health_check_lock); + rte_thread_mutex_unlock(&bp->health_check_lock); } static void bnxt_cancel_fw_health_check(struct bnxt *bp) @@ -5055,25 +5055,25 @@ bnxt_init_locks(struct bnxt *bp) { int err; - err = pthread_mutex_init(&bp->flow_lock, NULL); + err = rte_thread_mutex_init(&bp->flow_lock); if (err) { PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); return err; } - err = pthread_mutex_init(&bp->def_cp_lock, NULL); + err = rte_thread_mutex_init(&bp->def_cp_lock); if (err) { PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); return err; } - err = pthread_mutex_init(&bp->health_check_lock, NULL); + err = rte_thread_mutex_init(&bp->health_check_lock); if (err) { PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); return err; } - err = pthread_mutex_init(&bp->err_recovery_lock, NULL); + err = rte_thread_mutex_init(&bp->err_recovery_lock); if (err) PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); @@ -5856,14 +5856,14 @@ static int bnxt_init_rep_info(struct bnxt *bp) for (i = 0; i < BNXT_MAX_CFA_CODE; i++) bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; - rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); + rc = rte_thread_mutex_init(&bp->rep_info->vfr_lock); if (rc) { PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); bnxt_free_rep_info(bp); return rc; } - rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); + rc = rte_thread_mutex_init(&bp->rep_info->vfr_start_lock); if (rc) { PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); bnxt_free_rep_info(bp); diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index 8abbadb3d1..e1d980cf91 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -30,15 +30,15 @@ void bnxt_int_handler(void *param) return; raw_cons = cpr->cp_raw_cons; - pthread_mutex_lock(&bp->def_cp_lock); + rte_thread_mutex_lock(&bp->def_cp_lock); while (1) { if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) { - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); return; } if (is_bnxt_in_error(bp)) { - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); return; } @@ -58,7 +58,7 @@ void bnxt_int_handler(void *param) else B_CP_DB_REARM(cpr, cpr->cp_raw_cons); - pthread_mutex_unlock(&bp->def_cp_lock); + rte_thread_mutex_unlock(&bp->def_cp_lock); } int bnxt_free_int(struct bnxt *bp) diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c index b224a7d2c2..3f22a55110 100644 --- a/drivers/net/bnxt/bnxt_reps.c +++ b/drivers/net/bnxt/bnxt_reps.c @@ -120,7 +120,7 @@ bnxt_rep_tx_burst(void *tx_queue, qid = vfr_txq->txq->queue_id; vf_rep_bp = vfr_txq->bp; parent = vf_rep_bp->parent_dev->data->dev_private; - pthread_mutex_lock(&parent->rep_info->vfr_lock); + rte_thread_mutex_lock(&parent->rep_info->vfr_lock); ptxq = parent->tx_queues[qid]; ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; @@ -132,7 +132,7 @@ bnxt_rep_tx_burst(void *tx_queue, rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); ptxq->vfr_tx_cfa_action = 0; - pthread_mutex_unlock(&parent->rep_info->vfr_lock); + rte_thread_mutex_unlock(&parent->rep_info->vfr_lock); return rc; } @@ -407,15 +407,15 @@ int bnxt_rep_dev_start_op(struct rte_eth_dev *eth_dev) rep_info = &parent_bp->rep_info[rep_bp->vf_id]; BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id); - pthread_mutex_lock(&rep_info->vfr_start_lock); + rte_thread_mutex_lock(&rep_info->vfr_start_lock); if (!rep_info->conduit_valid) { rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp); if (rc || !rep_info->conduit_valid) { - pthread_mutex_unlock(&rep_info->vfr_start_lock); + rte_thread_mutex_unlock(&rep_info->vfr_start_lock); return rc; } } - pthread_mutex_unlock(&rep_info->vfr_start_lock); + rte_thread_mutex_unlock(&rep_info->vfr_start_lock); rc = bnxt_vfr_alloc(eth_dev); if (rc) { diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c index 5c805eef97..455d83bef8 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -28,7 +28,7 @@ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list); /* Mutex to synchronize bnxt_ulp_session_list operations. */ -static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t bnxt_ulp_global_mutex = RTE_THREAD_MUTEX_INITIALIZER; /* * Allow the deletion of context only for the bnxt device that @@ -640,7 +640,7 @@ ulp_ctx_detach(struct bnxt *bp) static void ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) { - pthread_mutex_lock(&session->bnxt_ulp_mutex); + rte_thread_mutex_lock(&session->bnxt_ulp_mutex); if (!session->bnxt_ulp_init) { session->bnxt_ulp_init = true; @@ -649,7 +649,7 @@ ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) *init = true; } - pthread_mutex_unlock(&session->bnxt_ulp_mutex); + rte_thread_mutex_unlock(&session->bnxt_ulp_mutex); } /* @@ -690,7 +690,7 @@ ulp_session_init(struct bnxt *bp, pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); pci_addr = &pci_dev->addr; - pthread_mutex_lock(&bnxt_ulp_global_mutex); + rte_thread_mutex_lock(&bnxt_ulp_global_mutex); session = ulp_get_session(pci_addr); if (!session) { @@ -701,17 +701,17 @@ ulp_session_init(struct bnxt *bp, if (!session) { BNXT_TF_DBG(ERR, "Allocation failed for bnxt_ulp_session\n"); - pthread_mutex_unlock(&bnxt_ulp_global_mutex); + rte_thread_mutex_unlock(&bnxt_ulp_global_mutex); return NULL; } else { /* Add it to the queue */ session->pci_info.domain = pci_addr->domain; session->pci_info.bus = pci_addr->bus; - rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL); + rc = rte_thread_mutex_init(&session->bnxt_ulp_mutex); if (rc) { BNXT_TF_DBG(ERR, "mutex create failed\n"); - pthread_mutex_unlock(&bnxt_ulp_global_mutex); + rte_thread_mutex_unlock(&bnxt_ulp_global_mutex); return NULL; } STAILQ_INSERT_TAIL(&bnxt_ulp_session_list, @@ -719,7 +719,7 @@ ulp_session_init(struct bnxt *bp, } } ulp_context_initialized(session, init); - pthread_mutex_unlock(&bnxt_ulp_global_mutex); + rte_thread_mutex_unlock(&bnxt_ulp_global_mutex); return session; } @@ -734,12 +734,12 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session) return; if (!session->cfg_data) { - pthread_mutex_lock(&bnxt_ulp_global_mutex); + rte_thread_mutex_lock(&bnxt_ulp_global_mutex); STAILQ_REMOVE(&bnxt_ulp_session_list, session, bnxt_ulp_session_state, next); - pthread_mutex_destroy(&session->bnxt_ulp_mutex); + rte_thread_mutex_destroy(&session->bnxt_ulp_mutex); rte_free(session); - pthread_mutex_unlock(&bnxt_ulp_global_mutex); + rte_thread_mutex_unlock(&bnxt_ulp_global_mutex); } } @@ -892,7 +892,7 @@ bnxt_ulp_deinit(struct bnxt *bp, BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0); /* free the flow db lock */ - pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock); + rte_thread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock); /* Delete the ulp context and tf session and free the ulp context */ ulp_ctx_deinit(bp, session); @@ -917,7 +917,7 @@ bnxt_ulp_init(struct bnxt *bp, goto jump_to_error; } - rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL); + rc = rte_thread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock); if (rc) { BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n"); goto jump_to_error; @@ -1117,9 +1117,9 @@ bnxt_ulp_port_deinit(struct bnxt *bp) /* Get the session details */ pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); pci_addr = &pci_dev->addr; - pthread_mutex_lock(&bnxt_ulp_global_mutex); + rte_thread_mutex_lock(&bnxt_ulp_global_mutex); session = ulp_get_session(pci_addr); - pthread_mutex_unlock(&bnxt_ulp_global_mutex); + rte_thread_mutex_unlock(&bnxt_ulp_global_mutex); /* session not found then just exit */ if (!session) { @@ -1451,7 +1451,7 @@ bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx) if (!ulp_ctx || !ulp_ctx->cfg_data) return -1; - if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) { + if (rte_thread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) { BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n"); return -1; } @@ -1465,5 +1465,5 @@ bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx) if (!ulp_ctx || !ulp_ctx->cfg_data) return; - pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + rte_thread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); } diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h index 330965061a..88d64d703f 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h @@ -49,7 +49,7 @@ struct bnxt_ulp_data { uint32_t dev_id; /* Hardware device id */ uint32_t ref_cnt; struct bnxt_ulp_flow_db *flow_db; - pthread_mutex_t flow_db_lock; + rte_thread_mutex_t flow_db_lock; void *mapper_data; struct bnxt_ulp_port_db *port_db; struct bnxt_ulp_fc_info *fc_info; @@ -75,7 +75,7 @@ struct bnxt_ulp_pci_info { struct bnxt_ulp_session_state { STAILQ_ENTRY(bnxt_ulp_session_state) next; bool bnxt_ulp_init; - pthread_mutex_t bnxt_ulp_mutex; + rte_thread_mutex_t bnxt_ulp_mutex; struct bnxt_ulp_pci_info pci_info; struct bnxt_ulp_data *cfg_data; struct tf *g_tfp; diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c index 3eddbd6831..26c3e7551c 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c +++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c @@ -84,7 +84,7 @@ ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt) if (!ulp_fc_info) goto error; - rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL); + rc = rte_thread_mutex_init(&ulp_fc_info->fc_lock); if (rc) { PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n"); goto error; @@ -141,7 +141,7 @@ ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt) ulp_fc_mgr_thread_cancel(ctxt); - pthread_mutex_destroy(&ulp_fc_info->fc_lock); + rte_thread_mutex_destroy(&ulp_fc_info->fc_lock); for (i = 0; i < TF_DIR_MAX; i++) rte_free(ulp_fc_info->sw_acc_tbl[i]); @@ -383,7 +383,7 @@ ulp_fc_mgr_alarm_cb(void *arg) goto out; if (!ulp_fc_info->num_entries) { - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); ulp_fc_mgr_thread_cancel(ctxt); return; } @@ -414,7 +414,7 @@ ulp_fc_mgr_alarm_cb(void *arg) } } - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); /* * If cmd fails once, no need of @@ -503,12 +503,12 @@ int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, if (!ulp_fc_info) return -EIO; - pthread_mutex_lock(&ulp_fc_info->fc_lock); + rte_thread_mutex_lock(&ulp_fc_info->fc_lock); sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id; ulp_fc_info->num_entries++; - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); return 0; } @@ -535,14 +535,14 @@ int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir, if (!ulp_fc_info) return -EIO; - pthread_mutex_lock(&ulp_fc_info->fc_lock); + rte_thread_mutex_lock(&ulp_fc_info->fc_lock); sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0; ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0; ulp_fc_info->num_entries--; - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); return 0; } @@ -607,7 +607,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, hw_cntr_id = params.resource_hndl; if (params.resource_sub_type == BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) { - pthread_mutex_lock(&ulp_fc_info->fc_lock); + rte_thread_mutex_lock(&ulp_fc_info->fc_lock); sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx]; @@ -621,7 +621,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, sw_acc_tbl_entry->pkt_count = 0; sw_acc_tbl_entry->byte_count = 0; } - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); } else if (params.resource_sub_type == BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC) { /* Get stats from the parent child table */ @@ -663,7 +663,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt, if (!ulp_fc_info) return -EIO; - pthread_mutex_lock(&ulp_fc_info->fc_lock); + rte_thread_mutex_lock(&ulp_fc_info->fc_lock); sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) { ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid; @@ -672,7 +672,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt, hw_cntr_id, fid); rc = -ENOENT; } - pthread_mutex_unlock(&ulp_fc_info->fc_lock); + rte_thread_mutex_unlock(&ulp_fc_info->fc_lock); return rc; } diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h index 04cb86bea2..cb6bbcad6c 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h +++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h @@ -47,7 +47,7 @@ struct bnxt_ulp_fc_info { struct hw_fc_mem_info shadow_hw_tbl[TF_DIR_MAX]; uint32_t flags; uint32_t num_entries; - pthread_mutex_t fc_lock; + rte_thread_mutex_t fc_lock; }; int32_t diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h index a1d749f83f..f06bd2b279 100644 --- a/drivers/net/ena/base/ena_plat_dpdk.h +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -129,14 +129,14 @@ do { \ #define q_waitqueue_t \ struct { \ pthread_cond_t cond; \ - pthread_mutex_t mutex; \ + rte_thread_mutex_t mutex; \ } #define ena_wait_queue_t q_waitqueue_t #define ENA_WAIT_EVENT_INIT(waitqueue) \ do { \ - pthread_mutex_init(&(waitqueue).mutex, NULL); \ + rte_thread_mutex_init(&(waitqueue).mutex); \ pthread_cond_init(&(waitqueue).cond, NULL); \ } while (0) @@ -149,10 +149,10 @@ do { \ wait.tv_sec = now.tv_sec + timeout / 1000000UL; \ timeout_us = timeout % 1000000UL; \ wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \ - pthread_mutex_lock(&waitevent.mutex); \ + rte_thread_mutex_lock(&waitevent.mutex); \ pthread_cond_timedwait(&waitevent.cond, \ &waitevent.mutex, &wait); \ - pthread_mutex_unlock(&waitevent.mutex); \ + rte_thread_mutex_unlock(&waitevent.mutex); \ } while (0) #define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond) /* pthread condition doesn't need to be rearmed after usage */ diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index cd66348f2f..1115afa81b 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -107,7 +107,7 @@ struct enic { int iommu_groupid; int eventfd; uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; - pthread_t err_intr_thread; + rte_thread_t err_intr_thread; int promisc; int allmulti; uint8_t ig_vlan_strip_en; diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c index c8e433239b..5f7bd4a3fa 100644 --- a/drivers/net/ice/ice_dcf_parent.c +++ b/drivers/net/ice/ice_dcf_parent.c @@ -3,7 +3,7 @@ */ #include #include -#include +#include #include #include @@ -154,7 +154,7 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id) #define THREAD_NAME_LEN 16 struct ice_dcf_reset_event_param *param; char name[THREAD_NAME_LEN]; - pthread_t thread; + rte_thread_t thread; int ret; param = malloc(sizeof(*param)); diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c index 589d9fa587..97e36a2e0e 100644 --- a/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/drivers/net/ipn3ke/ipn3ke_representor.c @@ -27,7 +27,7 @@ #include "ipn3ke_ethdev.h" static int ipn3ke_rpst_scan_num; -static pthread_t ipn3ke_rpst_scan_thread; +static rte_thread_t ipn3ke_rpst_scan_thread; /** Double linked list of representor port. */ TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst); @@ -2614,11 +2614,11 @@ ipn3ke_rpst_scan_check(void) return -1; } } else if (ipn3ke_rpst_scan_num == 0) { - ret = pthread_cancel(ipn3ke_rpst_scan_thread); + ret = rte_thread_cancel(ipn3ke_rpst_scan_thread); if (ret) IPN3KE_AFU_PMD_ERR("Can't cancel the thread"); - ret = pthread_join(ipn3ke_rpst_scan_thread, NULL); + ret = rte_thread_join(ipn3ke_rpst_scan_thread, NULL); if (ret) IPN3KE_AFU_PMD_ERR("Can't join the thread"); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index a0ce18ca24..272fdcfb8d 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -516,7 +516,7 @@ struct ixgbe_adapter { uint8_t pflink_fullchk; uint8_t mac_ctrl_frame_fwd; rte_atomic32_t link_thread_running; - pthread_t link_thread_tid; + rte_thread_t link_thread_tid; }; struct ixgbe_vf_representor { diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c index 9ce74e549b..610ce7daa2 100644 --- a/drivers/net/kni/rte_eth_kni.c +++ b/drivers/net/kni/rte_eth_kni.c @@ -3,7 +3,7 @@ */ #include -#include +#include #include #include @@ -50,7 +50,7 @@ struct pmd_internals { uint16_t port_id; int is_kni_started; - pthread_t thread; + rte_thread_t thread; int stop_thread; int no_request_thread; @@ -186,11 +186,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev) if (internals->no_request_thread == 0 && internals->stop_thread == 0) { internals->stop_thread = 1; - ret = pthread_cancel(internals->thread); + ret = rte_thread_cancel(internals->thread); if (ret) PMD_LOG(ERR, "Can't cancel the thread"); - ret = pthread_join(internals->thread, NULL); + ret = rte_thread_join(internals->thread, NULL); if (ret) PMD_LOG(ERR, "Can't join the thread"); } diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 41622c1bf7..c540be96a7 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -2534,7 +2534,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, int err = 0; sh->numa_node = spawn->pci_dev->device.numa_node; - pthread_mutex_init(&sh->txpp.mutex, NULL); + rte_thread_mutex_init(&sh->txpp.mutex); /* * Configure environment variable "MLX5_BF_SHUT_UP" * before the device creation. The rdma_core library diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 7e13b38ee0..cb4386c36e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -183,7 +183,7 @@ int mlx5_logtype; static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = LIST_HEAD_INITIALIZER(); -static pthread_mutex_t mlx5_dev_ctx_list_mutex; +static rte_thread_mutex_t mlx5_dev_ctx_list_mutex; static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_IPOOL_DECAP_ENCAP] = { @@ -981,7 +981,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, MLX5_ASSERT(spawn); /* Secondary process should not create the shared context. */ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex); /* Search for IB context by device name. */ LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { if (!strcmp(sh->ibdev_name, @@ -1108,11 +1108,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); rte_spinlock_init(&sh->geneve_tlv_opt_sl); exit: - pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex); return sh; error: - pthread_mutex_destroy(&sh->txpp.mutex); - pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_destroy(&sh->txpp.mutex); + rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex); MLX5_ASSERT(sh); if (sh->cnt_id_tbl) mlx5_l3t_destroy(sh->cnt_id_tbl); @@ -1144,7 +1144,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) { - pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex); #ifdef RTE_LIBRTE_MLX5_DEBUG /* Check the object presence in the list. */ struct mlx5_dev_ctx_shared *lctx; @@ -1175,7 +1175,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) /* Release flow workspaces objects on the last device. */ if (LIST_EMPTY(&mlx5_dev_ctx_list)) mlx5_flow_os_release_workspace(); - pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex); /* * Ensure there is no async event handler installed. * Only primary process handles async device events. @@ -1208,11 +1208,11 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); - pthread_mutex_destroy(&sh->txpp.mutex); + rte_thread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; exit: - pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex); } /** @@ -2338,7 +2338,7 @@ RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) */ RTE_INIT(rte_mlx5_pmd_init) { - pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL); + rte_thread_mutex_init(&mlx5_dev_ctx_list_mutex); mlx5_common_init(); /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index c9bc45cc1e..e8f657e7cc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -817,7 +817,7 @@ struct mlx5_txpp_ts { /* Tx packet pacing structure. */ struct mlx5_dev_txpp { - pthread_mutex_t mutex; /* Pacing create/destroy mutex. */ + rte_thread_mutex_t mutex; /* Pacing create/destroy mutex. */ uint32_t refcnt; /* Pacing reference counter. */ uint32_t freq; /* Timestamp frequency, Hz. */ uint32_t tick; /* Completion tick duration in nanoseconds. */ diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index d90399afb5..e308c5e599 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -905,7 +905,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev) if (ret < 0) return 0; } - ret = pthread_mutex_lock(&sh->txpp.mutex); + ret = rte_thread_mutex_lock(&sh->txpp.mutex); MLX5_ASSERT(!ret); RTE_SET_USED(ret); if (sh->txpp.refcnt) { @@ -921,7 +921,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev) rte_errno = -err; } } - ret = pthread_mutex_unlock(&sh->txpp.mutex); + ret = rte_thread_mutex_unlock(&sh->txpp.mutex); MLX5_ASSERT(!ret); RTE_SET_USED(ret); return err; @@ -948,7 +948,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev) return; } priv->txpp_en = 0; - ret = pthread_mutex_lock(&sh->txpp.mutex); + ret = rte_thread_mutex_lock(&sh->txpp.mutex); MLX5_ASSERT(!ret); RTE_SET_USED(ret); MLX5_ASSERT(sh->txpp.refcnt); @@ -956,7 +956,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev) return; /* No references any more, do actual destroy. */ mlx5_txpp_destroy(sh); - ret = pthread_mutex_unlock(&sh->txpp.mutex); + ret = rte_thread_mutex_unlock(&sh->txpp.mutex); MLX5_ASSERT(!ret); RTE_SET_USED(ret); } diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c b/drivers/net/mlx5/windows/mlx5_flow_os.c index c4d5790726..229bad3ea7 100644 --- a/drivers/net/mlx5/windows/mlx5_flow_os.c +++ b/drivers/net/mlx5/windows/mlx5_flow_os.c @@ -253,7 +253,7 @@ struct mlx5_workspace_thread { static struct mlx5_workspace_thread *curr; static struct mlx5_workspace_thread *first; rte_thread_key ws_tls_index; -static pthread_mutex_t lock_thread_list; +static rte_thread_mutex_t lock_thread_list; static bool mlx5_is_thread_alive(HANDLE thread_handle) @@ -330,7 +330,7 @@ mlx5_flow_os_release_workspace(void) free(first); } rte_thread_key_delete(ws_tls_index); - pthread_mutex_destroy(&lock_thread_list); + rte_thread_mutex_destroy(&lock_thread_list); } static int @@ -352,7 +352,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data) } temp->mlx5_ws = data; temp->thread_handle = curr_thread; - pthread_mutex_lock(&lock_thread_list); + rte_thread_mutex_lock(&lock_thread_list); mlx5_clear_thread_list(); if (!first) { first = temp; @@ -361,7 +361,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data) curr->next = temp; curr = curr->next; } - pthread_mutex_unlock(&lock_thread_list); + rte_thread_mutex_unlock(&lock_thread_list); return 0; } @@ -374,7 +374,7 @@ mlx5_flow_os_init_workspace_once(void) DRV_LOG(ERR, "Can't create flow workspace data thread key."); return err; } - pthread_mutex_init(&lock_thread_list, NULL); + rte_thread_mutex_init(&lock_thread_list); return 0; } diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c index 814063b5ce..be0169b2b2 100644 --- a/drivers/net/mlx5/windows/mlx5_os.c +++ b/drivers/net/mlx5/windows/mlx5_os.c @@ -228,7 +228,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, int err = 0; struct mlx5_context *mlx5_ctx; - pthread_mutex_init(&sh->txpp.mutex, NULL); + rte_thread_mutex_init(&sh->txpp.mutex); /* Set numa node from pci probe */ sh->numa_node = spawn->pci_dev->device.numa_node; diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index c5b5399282..f0cb71c93d 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -153,10 +153,10 @@ void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys); /* Mutexes */ -typedef pthread_mutex_t osal_mutex_t; -#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock) -#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL) -#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock) +typedef rte_thread_mutex_t osal_mutex_t; +#define OSAL_MUTEX_RELEASE(lock) rte_thread_mutex_unlock(lock) +#define OSAL_MUTEX_INIT(lock) rte_thread_mutex_init(lock) +#define OSAL_MUTEX_ACQUIRE(lock) rte_thread_mutex_lock(lock) #define OSAL_MUTEX_ALLOC(hwfn, lock) nothing #define OSAL_MUTEX_DEALLOC(lock) nothing diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index d198fc8a8e..55f72d9532 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -3,7 +3,7 @@ * Copyright(c) 2016-2018 Intel Corporation */ #include -#include +#include #include #include @@ -121,7 +121,7 @@ TAILQ_HEAD(internal_list_head, internal_list); static struct internal_list_head internal_list = TAILQ_HEAD_INITIALIZER(internal_list); -static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER; static struct rte_eth_link pmd_link = { .link_speed = 10000, @@ -507,7 +507,7 @@ find_internal_resource(char *ifname) if (ifname == NULL) return NULL; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { internal = list->eth_dev->data->dev_private; @@ -517,7 +517,7 @@ find_internal_resource(char *ifname) } } - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); if (!found) return NULL; @@ -1001,9 +1001,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev) goto free_list; list->eth_dev = eth_dev; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_INSERT_TAIL(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_spinlock_init(&vring_state->lock); vring_states[eth_dev->data->port_id] = vring_state; @@ -1035,9 +1035,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev) rte_vhost_driver_unregister(internal->iface_name); list_remove: vring_states[eth_dev->data->port_id] = NULL; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_REMOVE(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_free(vring_state); free_list: rte_free(list); @@ -1093,7 +1093,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id) if (!rte_eth_dev_is_valid_port(port_id)) return -1; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { eth_dev = list->eth_dev; @@ -1106,7 +1106,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id) } } - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); return vid; } @@ -1184,9 +1184,9 @@ eth_dev_close(struct rte_eth_dev *dev) list = find_internal_resource(internal->iface_name); if (list) { rte_vhost_driver_unregister(internal->iface_name); - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_REMOVE(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_free(list); } diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index 364f43e21c..9231da676b 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -143,7 +143,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev) uint64_t features; int ret = -1; - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); /* Step 0: tell vhost to create queues */ if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) @@ -161,7 +161,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev) goto error; PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features); error: - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); return ret; } @@ -185,7 +185,7 @@ virtio_user_start_device(struct virtio_user_dev *dev) * memory subsystem in the future. */ rte_mcfg_mem_read_lock(); - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); /* Step 2: share memory regions */ ret = dev->ops->set_memory_table(dev); @@ -206,12 +206,12 @@ virtio_user_start_device(struct virtio_user_dev *dev) dev->started = true; - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); rte_mcfg_mem_read_unlock(); return 0; error: - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); rte_mcfg_mem_read_unlock(); PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path); @@ -226,7 +226,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev) uint32_t i; int ret; - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); if (!dev->started) goto out; @@ -249,11 +249,11 @@ int virtio_user_stop_device(struct virtio_user_dev *dev) dev->started = false; out: - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); return 0; err: - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path); @@ -380,7 +380,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, if (msl->external) return; - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); if (dev->started == false) goto exit; @@ -405,7 +405,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, } exit: - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); if (ret < 0) PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path); @@ -491,7 +491,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, uint64_t backend_features; int i; - pthread_mutex_init(&dev->mutex, NULL); + rte_thread_mutex_init(&dev->mutex); strlcpy(dev->path, path, PATH_MAX); for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) { @@ -796,13 +796,13 @@ virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status) { int ret; - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); dev->status = status; ret = dev->ops->set_status(dev, status); if (ret && ret != -ENOTSUP) PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path); - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); return ret; } @@ -812,7 +812,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev) int ret; uint8_t status; - pthread_mutex_lock(&dev->mutex); + rte_thread_mutex_lock(&dev->mutex); ret = dev->ops->get_status(dev, &status); if (!ret) { @@ -837,7 +837,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev) PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path); } - pthread_mutex_unlock(&dev->mutex); + rte_thread_mutex_unlock(&dev->mutex); return ret; } diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h index 58ad5198b6..8c2df84d74 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h @@ -55,7 +55,7 @@ struct virtio_user_dev { bool qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS]; struct virtio_user_backend_ops *ops; - pthread_mutex_t mutex; + rte_thread_mutex_t mutex; bool started; void *backend_data; diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c index f2551be821..ccd9b5a252 100644 --- a/drivers/raw/ifpga/ifpga_rawdev.c +++ b/drivers/raw/ifpga/ifpga_rawdev.c @@ -69,7 +69,7 @@ static const struct rte_pci_id pci_ifpga_map[] = { static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM]; static int ifpga_monitor_start; -static pthread_t ifpga_monitor_start_thread; +static rte_thread_t ifpga_monitor_start_thread; #define IFPGA_MAX_IRQ 12 /* 0 for FME interrupt, others are reserved for AFU irq */ @@ -545,11 +545,11 @@ ifpga_monitor_stop_func(void) int ret; if (ifpga_monitor_start == 1) { - ret = pthread_cancel(ifpga_monitor_start_thread); + ret = rte_thread_cancel(ifpga_monitor_start_thread); if (ret) IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread"); - ret = pthread_join(ifpga_monitor_start_thread, NULL); + ret = rte_thread_join(ifpga_monitor_start_thread, NULL); if (ret) IFPGA_RAWDEV_PMD_ERR("Can't join the thread"); diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index 1dc813d0a3..7a90237ba2 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -3,7 +3,7 @@ */ #include -#include +#include #include #include #include @@ -52,7 +52,7 @@ struct ifcvf_internal { int vfio_container_fd; int vfio_group_fd; int vfio_dev_fd; - pthread_t tid; /* thread for notify relay */ + rte_thread_t tid; /* thread for notify relay */ int epfd; int vid; struct rte_vdpa_device *vdev; @@ -79,7 +79,7 @@ TAILQ_HEAD(internal_list_head, internal_list); static struct internal_list_head internal_list = TAILQ_HEAD_INITIALIZER(internal_list); -static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER; static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid); @@ -89,7 +89,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev) int found = 0; struct internal_list *list; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { if (vdev == list->internal->vdev) { @@ -98,7 +98,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev) } } - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); if (!found) return NULL; @@ -112,7 +112,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev) int found = 0; struct internal_list *list; - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_FOREACH(list, &internal_list, next) { if (!rte_pci_addr_cmp(&pdev->addr, @@ -122,7 +122,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev) } } - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); if (!found) return NULL; @@ -503,7 +503,7 @@ setup_notify_relay(struct ifcvf_internal *internal) ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay, (void *)internal); if (ret != 0) { - DRV_LOG(ERR, "failed to create notify relay pthread."); + DRV_LOG(ERR, "failed to create notify relay thread."); return -1; } @@ -513,13 +513,11 @@ setup_notify_relay(struct ifcvf_internal *internal) static int unset_notify_relay(struct ifcvf_internal *internal) { - void *status; - - if (internal->tid) { - pthread_cancel(internal->tid); - pthread_join(internal->tid, &status); + if (internal->tid.opaque_id) { + rte_thread_cancel(internal->tid); + rte_thread_join(internal->tid, NULL); } - internal->tid = 0; + internal->tid.opaque_id = 0; if (internal->epfd >= 0) close(internal->epfd); @@ -809,7 +807,7 @@ setup_vring_relay(struct ifcvf_internal *internal) ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay, (void *)internal); if (ret != 0) { - DRV_LOG(ERR, "failed to create ring relay pthread."); + DRV_LOG(ERR, "failed to create ring relay thread."); return -1; } @@ -819,13 +817,11 @@ setup_vring_relay(struct ifcvf_internal *internal) static int unset_vring_relay(struct ifcvf_internal *internal) { - void *status; - - if (internal->tid) { - pthread_cancel(internal->tid); - pthread_join(internal->tid, &status); + if (internal->tid.opaque_id) { + rte_thread_cancel(internal->tid); + rte_thread_join(internal->tid, NULL); } - internal->tid = 0; + internal->tid.opaque_id = 0; if (internal->epfd >= 0) close(internal->epfd); @@ -1253,9 +1249,9 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, goto error; } - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_INSERT_TAIL(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_atomic32_set(&internal->started, 1); update_datapath(internal); @@ -1293,9 +1289,9 @@ ifcvf_pci_remove(struct rte_pci_device *pci_dev) rte_vfio_container_destroy(internal->vfio_container_fd); rte_vdpa_unregister_device(internal->vdev); - pthread_mutex_lock(&internal_list_lock); + rte_thread_mutex_lock(&internal_list_lock); TAILQ_REMOVE(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + rte_thread_mutex_unlock(&internal_list_lock); rte_free(list); rte_free(internal); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index 898e50f807..bcb744f5b6 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -48,7 +48,7 @@ TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list = TAILQ_HEAD_INITIALIZER(priv_list); -static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER; static struct mlx5_vdpa_priv * mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev) @@ -56,14 +56,14 @@ mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev) struct mlx5_vdpa_priv *priv; int found = 0; - pthread_mutex_lock(&priv_list_lock); + rte_thread_mutex_lock(&priv_list_lock); TAILQ_FOREACH(priv, &priv_list, next) { if (vdev == priv->vdev) { found = 1; break; } } - pthread_mutex_unlock(&priv_list_lock); + rte_thread_mutex_unlock(&priv_list_lock); if (!found) { DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name); rte_errno = EINVAL; @@ -143,9 +143,9 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state) DRV_LOG(ERR, "Too big vring id: %d.", vring); return -E2BIG; } - pthread_mutex_lock(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv->vq_config_lock); ret = mlx5_vdpa_virtq_enable(priv, vring, state); - pthread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); return ret; } @@ -296,7 +296,7 @@ mlx5_vdpa_dev_close(int vid) priv->configured = 0; priv->vid = 0; /* The mutex may stay locked after event thread cancel - initiate it. */ - pthread_mutex_init(&priv->vq_config_lock, NULL); + rte_thread_mutex_init(&priv->vq_config_lock); DRV_LOG(INFO, "vDPA device %d was closed.", vid); return ret; } @@ -764,10 +764,10 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } mlx5_vdpa_config_get(pci_dev->device.devargs, priv); SLIST_INIT(&priv->mr_list); - pthread_mutex_init(&priv->vq_config_lock, NULL); - pthread_mutex_lock(&priv_list_lock); + rte_thread_mutex_init(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&priv_list, priv, next); - pthread_mutex_unlock(&priv_list_lock); + rte_thread_mutex_unlock(&priv_list_lock); return 0; error: @@ -798,7 +798,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev) struct mlx5_vdpa_priv *priv = NULL; int found = 0; - pthread_mutex_lock(&priv_list_lock); + rte_thread_mutex_lock(&priv_list_lock); TAILQ_FOREACH(priv, &priv_list, next) { if (!rte_pci_addr_cmp(&priv->pci_dev->addr, &pci_dev->addr)) { found = 1; @@ -807,7 +807,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev) } if (found) TAILQ_REMOVE(&priv_list, priv, next); - pthread_mutex_unlock(&priv_list_lock); + rte_thread_mutex_unlock(&priv_list_lock); if (found) { if (priv->configured) mlx5_vdpa_dev_close(priv->vid); @@ -816,7 +816,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev) priv->var = NULL; } mlx5_glue->close_device(priv->ctx); - pthread_mutex_destroy(&priv->vq_config_lock); + rte_thread_mutex_destroy(&priv->vq_config_lock); rte_free(priv); } return 0; diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index d93b430c97..568b84c3ff 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -119,10 +119,10 @@ enum { struct mlx5_vdpa_priv { TAILQ_ENTRY(mlx5_vdpa_priv) next; uint8_t configured; - pthread_mutex_t vq_config_lock; + rte_thread_mutex_t vq_config_lock; uint64_t last_traffic_tic; - pthread_t timer_tid; - pthread_mutex_t timer_lock; + rte_thread_t timer_tid; + rte_thread_mutex_t timer_lock; pthread_cond_t timer_cond; volatile uint8_t timer_on; int event_mode; diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c index 404e135d5c..233436a97b 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c @@ -247,17 +247,17 @@ mlx5_vdpa_poll_handle(void *arg) uint32_t max; uint64_t current_tic; - pthread_mutex_lock(&priv->timer_lock); + rte_thread_mutex_lock(&priv->timer_lock); while (!priv->timer_on) pthread_cond_wait(&priv->timer_cond, &priv->timer_lock); - pthread_mutex_unlock(&priv->timer_lock); + rte_thread_mutex_unlock(&priv->timer_lock); priv->timer_delay_us = priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ? MLX5_VDPA_DEFAULT_TIMER_DELAY_US : priv->event_us; while (1) { max = 0; - pthread_mutex_lock(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv->vq_config_lock); for (i = 0; i < priv->nr_virtqs; i++) { cq = &priv->virtqs[i].eqp.cq; if (cq->cq_obj.cq && !cq->armed) { @@ -281,13 +281,13 @@ mlx5_vdpa_poll_handle(void *arg) DRV_LOG(DEBUG, "Device %s traffic was stopped.", priv->vdev->device->name); mlx5_vdpa_arm_all_cqs(priv); - pthread_mutex_unlock(&priv->vq_config_lock); - pthread_mutex_lock(&priv->timer_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv->timer_lock); priv->timer_on = 0; while (!priv->timer_on) pthread_cond_wait(&priv->timer_cond, &priv->timer_lock); - pthread_mutex_unlock(&priv->timer_lock); + rte_thread_mutex_unlock(&priv->timer_lock); priv->timer_delay_us = priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ? MLX5_VDPA_DEFAULT_TIMER_DELAY_US : @@ -297,7 +297,7 @@ mlx5_vdpa_poll_handle(void *arg) } else { priv->last_traffic_tic = current_tic; } - pthread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); mlx5_vdpa_timer_sleep(priv, max); } return NULL; @@ -313,7 +313,7 @@ mlx5_vdpa_interrupt_handler(void *cb_arg) uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; } out; - pthread_mutex_lock(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv->vq_config_lock); while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp, sizeof(out.buf)) >= (ssize_t)sizeof(out.event_resp.cookie)) { @@ -332,7 +332,7 @@ mlx5_vdpa_interrupt_handler(void *cb_arg) eventfd_write(cq->callfd, (eventfd_t)1); if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) { mlx5_vdpa_cq_arm(priv, cq); - pthread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); return; } /* Don't arm again - timer will take control. */ @@ -347,13 +347,13 @@ mlx5_vdpa_interrupt_handler(void *cb_arg) /* Traffic detected: make sure timer is on. */ priv->last_traffic_tic = rte_rdtsc(); - pthread_mutex_lock(&priv->timer_lock); + rte_thread_mutex_lock(&priv->timer_lock); if (!priv->timer_on) { priv->timer_on = 1; pthread_cond_signal(&priv->timer_cond); } - pthread_mutex_unlock(&priv->timer_lock); - pthread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_unlock(&priv->timer_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); } static void @@ -369,7 +369,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused) struct mlx5_vdpa_virtq *virtq; uint64_t sec; - pthread_mutex_lock(&priv->vq_config_lock); + rte_thread_mutex_lock(&priv->vq_config_lock); while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp, sizeof(out.buf)) >= (ssize_t)sizeof(out.event_resp.cookie)) { @@ -415,7 +415,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused) virtq->err_time[i - 1] = virtq->err_time[i]; virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc(); } - pthread_mutex_unlock(&priv->vq_config_lock); + rte_thread_mutex_unlock(&priv->vq_config_lock); #endif } @@ -502,49 +502,41 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv) { int ret; rte_cpuset_t cpuset; - pthread_attr_t attr; + rte_thread_attr_t attr; char name[16]; - const struct sched_param sp = { - .sched_priority = sched_get_priority_max(SCHED_RR), - }; if (!priv->eventc) /* All virtqs are in poll mode. */ return 0; if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) { - pthread_mutex_init(&priv->timer_lock, NULL); + rte_thread_mutex_init(&priv->timer_lock); pthread_cond_init(&priv->timer_cond, NULL); priv->timer_on = 0; - pthread_attr_init(&attr); - ret = pthread_attr_setschedpolicy(&attr, SCHED_RR); + rte_thread_attr_init(&attr); + CPU_ZERO(&cpuset); + if (priv->event_core != -1) + CPU_SET(priv->event_core, &cpuset); + else + cpuset = rte_lcore_cpuset(rte_get_main_lcore()); + ret = rte_thread_attr_set_affinity(&attr, &cpuset); if (ret) { - DRV_LOG(ERR, "Failed to set thread sched policy = RR."); + DRV_LOG(ERR, "Failed to set thread affinity."); return -1; } - ret = pthread_attr_setschedparam(&attr, &sp); + ret = rte_thread_attr_set_priority(&attr, + RTE_THREAD_PRIORITY_REALTIME_CRITICAL); if (ret) { DRV_LOG(ERR, "Failed to set thread priority."); return -1; } - ret = pthread_create(&priv->timer_tid, &attr, + ret = rte_thread_create(&priv->timer_tid, &attr, mlx5_vdpa_poll_handle, (void *)priv); if (ret) { DRV_LOG(ERR, "Failed to create timer thread."); return -1; } - CPU_ZERO(&cpuset); - if (priv->event_core != -1) - CPU_SET(priv->event_core, &cpuset); - else - cpuset = rte_lcore_cpuset(rte_get_main_lcore()); - ret = pthread_setaffinity_np(priv->timer_tid, - sizeof(cpuset), &cpuset); - if (ret) { - DRV_LOG(ERR, "Failed to set thread affinity."); - goto error; - } snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid); - ret = pthread_setname_np(priv->timer_tid, name); + ret = pthread_setname_np(priv->timer_tid.opaque_id, name); if (ret) { DRV_LOG(ERR, "Failed to set timer thread name."); return -1; @@ -569,7 +561,6 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv) { int retries = MLX5_VDPA_INTR_RETRIES; int ret = -EAGAIN; - void *status; if (priv->intr_handle.fd) { while (retries-- && ret == -EAGAIN) { @@ -585,11 +576,11 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv) } memset(&priv->intr_handle, 0, sizeof(priv->intr_handle)); } - if (priv->timer_tid) { - pthread_cancel(priv->timer_tid); - pthread_join(priv->timer_tid, &status); + if (priv->timer_tid.opaque_id) { + rte_thread_cancel(priv->timer_tid); + rte_thread_join(priv->timer_tid, NULL); } - priv->timer_tid = 0; + priv->timer_tid.opaque_id = 0; } void diff --git a/examples/kni/main.c b/examples/kni/main.c index beabb3c848..e4741f85ba 100644 --- a/examples/kni/main.c +++ b/examples/kni/main.c @@ -1042,8 +1042,8 @@ main(int argc, char** argv) int ret; uint16_t nb_sys_ports, port; unsigned i; - void *retval; - pthread_t kni_link_tid; + int retval; + rte_thread_t kni_link_tid; int pid; /* Associate signal_hanlder function with USR signals */ @@ -1126,7 +1126,7 @@ main(int argc, char** argv) return -1; } monitor_links = 0; - pthread_join(kni_link_tid, &retval); + rte_thread_join(kni_link_tid, &retval); /* Release resources */ RTE_ETH_FOREACH_DEV(port) { diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c index 257de50692..bef7b59842 100644 --- a/examples/performance-thread/pthread_shim/main.c +++ b/examples/performance-thread/pthread_shim/main.c @@ -164,7 +164,7 @@ static void *initial_lthread(void *args __rte_unused) rte_exit(EXIT_FAILURE, "Cannot create helloworld thread\n"); snprintf(name, sizeof(name), "helloworld-%u", (uint32_t)i); - rte_thread_setname(tid[i], name); + pthread_setname_np(tid[i], name); } /* wait for 1s to allow threads diff --git a/examples/vhost/main.c b/examples/vhost/main.c index ff48ba270d..da45b0d41f 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -1635,7 +1635,7 @@ main(int argc, char *argv[]) unsigned nb_ports, valid_num_ports; int ret, i; uint16_t portid; - static pthread_t tid; + static rte_thread_t tid; uint64_t flags = 0; signal(SIGINT, sigint_handler); diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c index fe2b4e4803..6d96277eb3 100644 --- a/examples/vhost_blk/vhost_blk.c +++ b/examples/vhost_blk/vhost_blk.c @@ -5,7 +5,7 @@ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif -#include +#include #include #include @@ -533,7 +533,7 @@ ctrlr_worker(void *arg) { struct vhost_blk_ctrlr *ctrlr = (struct vhost_blk_ctrlr *)arg; cpu_set_t cpuset; - pthread_t thread; + rte_thread_t thread; int i; fprintf(stdout, "Ctrlr Worker Thread start\n"); @@ -545,10 +545,10 @@ ctrlr_worker(void *arg) exit(0); } - thread = pthread_self(); + thread = rte_thread_self(); CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); - pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset); + rte_thread_set_affinity_by_id(thread, &cpuset); for (i = 0; i < NUM_OF_BLK_QUEUES; i++) submit_inflight_vq(&ctrlr->queues[i]); @@ -604,7 +604,7 @@ new_device(int vid) struct vhost_blk_queue *vq; char path[PATH_MAX]; uint64_t features, protocol_features; - pthread_t tid; + rte_thread_t tid; int i, ret; bool packed_ring, inflight_shmfd; @@ -693,7 +693,7 @@ new_device(int vid) /* device has been started */ ctrlr->started = 1; - pthread_detach(tid); + pthread_detach(tid.opaque_id); return 0; } diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c index 773cefdff7..fb53b40840 100644 --- a/lib/eal/common/eal_common_options.c +++ b/lib/eal/common/eal_common_options.c @@ -1866,8 +1866,7 @@ eal_auto_detect_cores(struct rte_config *cfg) unsigned int removed = 0; rte_cpuset_t affinity_set; - if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), - &affinity_set)) + if (rte_thread_get_affinity_by_id(rte_thread_self(), &affinity_set)) CPU_ZERO(&affinity_set); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { @@ -1895,8 +1894,7 @@ compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) } RTE_CPU_NOT(cpuset, cpuset); - if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), - &default_set)) + if (rte_thread_get_affinity_by_id(rte_thread_self(), &default_set)) CPU_ZERO(&default_set); RTE_CPU_AND(cpuset, cpuset, &default_set); diff --git a/lib/eal/common/eal_common_proc.c b/lib/eal/common/eal_common_proc.c index 6d1af3c0e7..6eb2bbb3a0 100644 --- a/lib/eal/common/eal_common_proc.c +++ b/lib/eal/common/eal_common_proc.c @@ -37,7 +37,7 @@ static int mp_fd = -1; static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */ static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */ -static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER; +static rte_thread_mutex_t mp_mutex_action = RTE_THREAD_MUTEX_INITIALIZER; static char peer_name[PATH_MAX]; struct action_entry { @@ -96,10 +96,10 @@ TAILQ_HEAD(pending_request_list, pending_request); static struct { struct pending_request_list requests; - pthread_mutex_t lock; + rte_thread_mutex_t lock; } pending_requests = { .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests), - .lock = PTHREAD_MUTEX_INITIALIZER, + .lock = RTE_THREAD_MUTEX_INITIALIZER, /**< used in async requests only */ }; @@ -222,15 +222,15 @@ rte_mp_action_register(const char *name, rte_mp_t action) strlcpy(entry->action_name, name, sizeof(entry->action_name)); entry->action = action; - pthread_mutex_lock(&mp_mutex_action); + rte_thread_mutex_lock(&mp_mutex_action); if (find_action_entry_by_name(name) != NULL) { - pthread_mutex_unlock(&mp_mutex_action); + rte_thread_mutex_unlock(&mp_mutex_action); rte_errno = EEXIST; free(entry); return -1; } TAILQ_INSERT_TAIL(&action_entry_list, entry, next); - pthread_mutex_unlock(&mp_mutex_action); + rte_thread_mutex_unlock(&mp_mutex_action); return 0; } @@ -249,14 +249,14 @@ rte_mp_action_unregister(const char *name) return; } - pthread_mutex_lock(&mp_mutex_action); + rte_thread_mutex_lock(&mp_mutex_action); entry = find_action_entry_by_name(name); if (entry == NULL) { - pthread_mutex_unlock(&mp_mutex_action); + rte_thread_mutex_unlock(&mp_mutex_action); return; } TAILQ_REMOVE(&action_entry_list, entry, next); - pthread_mutex_unlock(&mp_mutex_action); + rte_thread_mutex_unlock(&mp_mutex_action); free(entry); } @@ -328,7 +328,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) if (m->type == MP_REP || m->type == MP_IGN) { struct pending_request *req = NULL; - pthread_mutex_lock(&pending_requests.lock); + rte_thread_mutex_lock(&pending_requests.lock); pending_req = find_pending_request(s->sun_path, msg->name); if (pending_req) { memcpy(pending_req->reply, msg, sizeof(*msg)); @@ -343,18 +343,18 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) pending_req); } else RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name); - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); if (req != NULL) trigger_async_action(req); return; } - pthread_mutex_lock(&mp_mutex_action); + rte_thread_mutex_lock(&mp_mutex_action); entry = find_action_entry_by_name(msg->name); if (entry != NULL) action = entry->action; - pthread_mutex_unlock(&mp_mutex_action); + rte_thread_mutex_unlock(&mp_mutex_action); if (!action) { if (m->type == MP_REQ && !internal_conf->init_complete) { @@ -527,9 +527,9 @@ async_reply_handle(void *arg) { struct pending_request *req; - pthread_mutex_lock(&pending_requests.lock); + rte_thread_mutex_lock(&pending_requests.lock); req = async_reply_handle_thread_unsafe(arg); - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); if (req != NULL) trigger_async_action(req); @@ -587,7 +587,7 @@ rte_mp_channel_init(void) { char path[PATH_MAX]; int dir_fd; - pthread_t mp_handle_tid; + rte_thread_t mp_handle_tid; const struct internal_config *internal_conf = eal_get_internal_configuration(); @@ -999,9 +999,9 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply, /* for secondary process, send request to the primary process only */ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - pthread_mutex_lock(&pending_requests.lock); + rte_thread_mutex_lock(&pending_requests.lock); ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end); - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); goto end; } @@ -1022,7 +1022,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply, goto close_end; } - pthread_mutex_lock(&pending_requests.lock); + rte_thread_mutex_lock(&pending_requests.lock); while ((ent = readdir(mp_dir))) { char path[PATH_MAX]; @@ -1041,7 +1041,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply, ret = 0; unlock_end: - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); /* unlock the directory */ flock(dir_fd, LOCK_UN); @@ -1119,7 +1119,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, * of requests to the queue at once, and some of the replies may arrive * before we add all of the requests to the queue. */ - pthread_mutex_lock(&pending_requests.lock); + rte_thread_mutex_lock(&pending_requests.lock); /* we have to ensure that callback gets triggered even if we don't send * anything, therefore earlier we have allocated a dummy request. fill @@ -1142,7 +1142,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, dummy_used = true; } - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); /* if we couldn't send anything, clean up */ if (ret != 0) @@ -1186,7 +1186,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, } /* finally, unlock the queue */ - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); /* unlock the directory */ flock(dir_fd, LOCK_UN); @@ -1202,7 +1202,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, closedir_fail: closedir(mp_dir); unlock_fail: - pthread_mutex_unlock(&pending_requests.lock); + rte_thread_mutex_unlock(&pending_requests.lock); fail: free(dummy); free(param); diff --git a/lib/eal/common/eal_common_thread.c b/lib/eal/common/eal_common_thread.c index 1a52f42a2b..f2dbc19191 100644 --- a/lib/eal/common/eal_common_thread.c +++ b/lib/eal/common/eal_common_thread.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -86,9 +85,8 @@ thread_update_affinity(rte_cpuset_t *cpusetp) int rte_thread_set_affinity(rte_cpuset_t *cpusetp) { - if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t), - cpusetp) != 0) { - RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); + if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) { + RTE_LOG(ERR, EAL, "rte_thread_set_affinity failed\n"); return -1; } @@ -169,14 +167,14 @@ __rte_thread_uninit(void) struct rte_thread_ctrl_params { void *(*start_routine)(void *); void *arg; - pthread_barrier_t configured; + rte_thread_barrier_t configured; unsigned int refcnt; }; static void ctrl_params_free(struct rte_thread_ctrl_params *params) { if (__atomic_sub_fetch(¶ms->refcnt, 1, __ATOMIC_ACQ_REL) == 0) { - (void)pthread_barrier_destroy(¶ms->configured); + (void)rte_thread_barrier_destroy(¶ms->configured); free(params); } } @@ -192,7 +190,7 @@ static void *ctrl_thread_init(void *arg) __rte_thread_init(rte_lcore_id(), cpuset); - pthread_barrier_wait(¶ms->configured); + rte_thread_barrier_wait(¶ms->configured); start_routine = params->start_routine; ctrl_params_free(params); @@ -203,8 +201,8 @@ static void *ctrl_thread_init(void *arg) } int -rte_ctrl_thread_create(pthread_t *thread, const char *name, - const pthread_attr_t *attr, +rte_ctrl_thread_create(rte_thread_t *thread, const char *name, + const rte_thread_attr_t *attr, void *(*start_routine)(void *), void *arg) { struct internal_config *internal_conf = @@ -221,11 +219,11 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name, params->arg = arg; params->refcnt = 2; - ret = pthread_barrier_init(¶ms->configured, NULL, 2); + ret = rte_thread_barrier_init(¶ms->configured, 2); if (ret != 0) goto fail_no_barrier; - ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params); + ret = rte_thread_create(thread, attr, ctrl_thread_init, (void *)params); if (ret != 0) goto fail_with_barrier; @@ -236,22 +234,22 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name, "Cannot set name for ctrl thread\n"); } - ret = pthread_setaffinity_np(*thread, sizeof(*cpuset), cpuset); + ret = rte_thread_set_affinity_by_id(*thread, cpuset); if (ret != 0) params->start_routine = NULL; - pthread_barrier_wait(¶ms->configured); + rte_thread_barrier_wait(¶ms->configured); ctrl_params_free(params); if (ret != 0) /* start_routine has been set to NULL above; */ /* ctrl thread will exit immediately */ - pthread_join(*thread, NULL); + rte_thread_join(*thread, NULL); return -ret; fail_with_barrier: - (void)pthread_barrier_destroy(¶ms->configured); + (void)rte_thread_barrier_destroy(¶ms->configured); fail_no_barrier: free(params); @@ -276,8 +274,7 @@ rte_thread_register(void) rte_errno = EINVAL; return -1; } - if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset), - &cpuset) != 0) + if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0) CPU_ZERO(&cpuset); lcore_id = eal_lcore_non_eal_allocate(); if (lcore_id >= RTE_MAX_LCORE) diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c index 24e27387b1..6df4a01277 100644 --- a/lib/eal/common/eal_common_trace.c +++ b/lib/eal/common/eal_common_trace.c @@ -359,7 +359,7 @@ __rte_trace_mem_per_thread_alloc(void) /* Store the thread name */ char *name = header->stream_header.thread_name; memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX); - rte_thread_getname(pthread_self(), name, + rte_thread_getname(rte_thread_self(), name, __RTE_TRACE_EMIT_STRING_LEN_MAX); trace->lcore_meta[count].mem = header; diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h index 64cf4e81c8..4b95001d7d 100644 --- a/lib/eal/common/eal_private.h +++ b/lib/eal/common/eal_private.h @@ -19,7 +19,7 @@ * Structure storing internal configuration (per-lcore) */ struct lcore_config { - pthread_t thread_id; /**< pthread identifier */ + rte_thread_t thread_id; /**< pthread identifier */ int pipe_main2worker[2]; /**< communication pipe with main */ int pipe_worker2main[2]; /**< communication pipe with main */ diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c index c7101b32d3..fdc3a2cfe6 100644 --- a/lib/eal/common/malloc_mp.c +++ b/lib/eal/common/malloc_mp.c @@ -75,10 +75,10 @@ struct mp_request { TAILQ_HEAD(mp_request_list, mp_request); static struct { struct mp_request_list list; - pthread_mutex_t lock; + rte_thread_mutex_t lock; } mp_request_list = { .list = TAILQ_HEAD_INITIALIZER(mp_request_list.list), - .lock = PTHREAD_MUTEX_INITIALIZER + .lock = RTE_THREAD_MUTEX_INITIALIZER }; /** @@ -303,7 +303,7 @@ handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused) int ret; /* lock access to request */ - pthread_mutex_lock(&mp_request_list.lock); + rte_thread_mutex_lock(&mp_request_list.lock); /* make sure it's not a dupe */ entry = find_request_by_id(m->id); @@ -389,10 +389,10 @@ handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused) TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next); } - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return 0; fail: - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); free(entry); return -1; } @@ -411,7 +411,7 @@ handle_sync_response(const struct rte_mp_msg *request, int i; /* lock the request */ - pthread_mutex_lock(&mp_request_list.lock); + rte_thread_mutex_lock(&mp_request_list.lock); entry = find_request_by_id(mpreq->id); if (entry == NULL) { @@ -541,10 +541,10 @@ handle_sync_response(const struct rte_mp_msg *request, goto fail; } - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return 0; fail: - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return -1; } @@ -559,7 +559,7 @@ handle_rollback_response(const struct rte_mp_msg *request, struct mp_request *entry; /* lock the request */ - pthread_mutex_lock(&mp_request_list.lock); + rte_thread_mutex_lock(&mp_request_list.lock); memset(&msg, 0, sizeof(msg)); @@ -590,10 +590,10 @@ handle_rollback_response(const struct rte_mp_msg *request, free(entry->alloc_state.ms); free(entry); - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return 0; fail: - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return -1; } @@ -605,7 +605,7 @@ handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused) (const struct malloc_mp_req *)msg->param; struct mp_request *entry; - pthread_mutex_lock(&mp_request_list.lock); + rte_thread_mutex_lock(&mp_request_list.lock); entry = find_request_by_id(m->id); if (entry != NULL) { @@ -618,7 +618,7 @@ handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused) pthread_cond_signal(&entry->cond); } - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return 0; } @@ -708,7 +708,7 @@ request_to_primary(struct malloc_mp_req *user_req) memset(&msg, 0, sizeof(msg)); memset(&ts, 0, sizeof(ts)); - pthread_mutex_lock(&mp_request_list.lock); + rte_thread_mutex_lock(&mp_request_list.lock); entry = malloc(sizeof(*entry)); if (entry == NULL) { @@ -769,10 +769,10 @@ request_to_primary(struct malloc_mp_req *user_req) TAILQ_REMOVE(&mp_request_list.list, entry, next); free(entry); - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); return ret; fail: - pthread_mutex_unlock(&mp_request_list.lock); + rte_thread_mutex_unlock(&mp_request_list.lock); free(entry); return -1; } diff --git a/lib/eal/common/meson.build b/lib/eal/common/meson.build index edfca77779..eda250247b 100644 --- a/lib/eal/common/meson.build +++ b/lib/eal/common/meson.build @@ -80,6 +80,7 @@ sources += files( 'rte_random.c', 'rte_reciprocal.c', 'rte_service.c', + 'rte_thread.c', 'rte_version.c', ) diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c index f4d1676754..1c862aa1cb 100644 --- a/lib/eal/freebsd/eal.c +++ b/lib/eal/freebsd/eal.c @@ -667,7 +667,7 @@ int rte_eal_init(int argc, char **argv) { int i, fctret, ret; - pthread_t thread_id; + rte_thread_t thread_id; static uint32_t run_once; uint32_t has_run = 0; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; @@ -690,7 +690,7 @@ rte_eal_init(int argc, char **argv) return -1; } - thread_id = pthread_self(); + thread_id = rte_thread_self(); eal_reset_internal_config(internal_conf); @@ -854,7 +854,14 @@ rte_eal_init(int argc, char **argv) eal_check_mem_on_local_socket(); - if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + ret = rte_thread_set_priority(rte_thread_self(), + internal_conf->thread_priority); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread priority"); + rte_errno = ret; + return -1; + } + if (rte_thread_set_affinity_by_id(rte_thread_self(), &lcore_config[config->main_lcore].cpuset) != 0) { rte_eal_init_alert("Cannot set affinity"); rte_errno = EINVAL; @@ -865,10 +872,25 @@ rte_eal_init(int argc, char **argv) ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); - RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%p;cpuset=[%s%s])\n", - config->main_lcore, thread_id, cpuset, + RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (cpuset=[%s%s])\n", + config->main_lcore, cpuset, ret == 0 ? "" : "..."); + rte_thread_attr_t thread_attr; + ret = rte_thread_attr_init(&thread_attr); + if (ret != 0) { + rte_eal_init_alert("Cannot initialize thread attributes"); + rte_errno = ret; + return -1; + } + ret = rte_thread_attr_set_priority(&thread_attr, + internal_conf->thread_priority); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread priority attribute"); + rte_errno = ret; + return -1; + } + RTE_LCORE_FOREACH_WORKER(i) { /* @@ -882,8 +904,10 @@ rte_eal_init(int argc, char **argv) lcore_config[i].state = WAIT; + rte_thread_attr_set_affinity(&thread_attr, + &lcore_config[i].cpuset); /* create a thread for each lcore */ - ret = pthread_create(&lcore_config[i].thread_id, NULL, + ret = rte_thread_create(&lcore_config[i].thread_id, &thread_attr, eal_thread_loop, NULL); if (ret != 0) rte_panic("Cannot create thread\n"); @@ -893,10 +917,6 @@ rte_eal_init(int argc, char **argv) "lcore-worker-%d", i); rte_thread_setname(lcore_config[i].thread_id, thread_name); - ret = pthread_setaffinity_np(lcore_config[i].thread_id, - sizeof(rte_cpuset_t), &lcore_config[i].cpuset); - if (ret != 0) - rte_panic("Cannot set affinity\n"); } /* diff --git a/lib/eal/freebsd/eal_alarm.c b/lib/eal/freebsd/eal_alarm.c index c38b2e04f8..e5d7b130b1 100644 --- a/lib/eal/freebsd/eal_alarm.c +++ b/lib/eal/freebsd/eal_alarm.c @@ -37,7 +37,7 @@ struct alarm_entry { rte_eal_alarm_callback cb_fn; void *cb_arg; volatile uint8_t executing; - volatile pthread_t executing_id; + volatile rte_thread_t executing_id; }; static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER(); @@ -156,7 +156,7 @@ eal_alarm_callback(void *arg __rte_unused) while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) { ap->executing = 1; - ap->executing_id = pthread_self(); + ap->executing_id = rte_thread_self(); rte_spinlock_unlock(&alarm_list_lk); ap->cb_fn(ap->cb_arg); @@ -263,8 +263,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg) * finish. Otherwise we are trying to cancel * ourselves - mark it by EINPROGRESS. */ - if (pthread_equal(ap->executing_id, - pthread_self()) == 0) + if (rte_thread_equal(ap->executing_id, + rte_thread_self()) == 0) executing++; else err = EINPROGRESS; @@ -285,8 +285,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg) free(ap); count++; ap = ap_prev; - } else if (pthread_equal(ap->executing_id, - pthread_self()) == 0) { + } else if (rte_thread_equal(ap->executing_id, + rte_thread_self()) == 0) { executing++; } else { err = EINPROGRESS; diff --git a/lib/eal/freebsd/eal_interrupts.c b/lib/eal/freebsd/eal_interrupts.c index 86810845fe..c0bf6c882a 100644 --- a/lib/eal/freebsd/eal_interrupts.c +++ b/lib/eal/freebsd/eal_interrupts.c @@ -52,7 +52,7 @@ static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER; static struct rte_intr_source_list intr_sources; /* interrupt handling thread */ -static pthread_t intr_thread; +static rte_thread_t intr_thread; static volatile int kq = -1; @@ -737,5 +737,5 @@ rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle) int rte_thread_is_intr(void) { - return pthread_equal(intr_thread, pthread_self()); + return rte_thread_equal(intr_thread, rte_thread_self()); } diff --git a/lib/eal/freebsd/eal_thread.c b/lib/eal/freebsd/eal_thread.c index 1dce9b04f2..cb8563cb7a 100644 --- a/lib/eal/freebsd/eal_thread.c +++ b/lib/eal/freebsd/eal_thread.c @@ -73,15 +73,15 @@ eal_thread_loop(__rte_unused void *arg) char c; int n, ret; unsigned lcore_id; - pthread_t thread_id; + rte_thread_t thread_id; int m2w, w2m; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - thread_id = pthread_self(); + thread_id = rte_thread_self(); /* retrieve our lcore_id from the configuration structure */ RTE_LCORE_FOREACH_WORKER(lcore_id) { - if (thread_id == lcore_config[lcore_id].thread_id) + if (rte_thread_equal(thread_id, lcore_config[lcore_id].thread_id)) break; } if (lcore_id == RTE_MAX_LCORE) @@ -93,8 +93,8 @@ eal_thread_loop(__rte_unused void *arg) __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); - RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%p;cpuset=[%s%s])\n", - lcore_id, thread_id, cpuset, ret == 0 ? "" : "..."); + RTE_LOG(DEBUG, EAL, "lcore %u is ready (cpuset=[%s%s])\n", + lcore_id, cpuset, ret == 0 ? "" : "..."); rte_eal_trace_thread_lcore_ready(lcore_id, cpuset); @@ -143,14 +143,14 @@ int rte_sys_gettid(void) return (int)lwpid; } -int rte_thread_setname(pthread_t id, const char *name) +int rte_thread_setname(rte_thread_t id, const char *name) { /* this BSD function returns no error */ pthread_set_name_np(id, name); return 0; } -int rte_thread_getname(pthread_t id, char *name, size_t len) +int rte_thread_getname(rte_thread_t id, char *name, size_t len) { RTE_SET_USED(id); RTE_SET_USED(name); diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build index 88a9eba12f..3b872f228f 100644 --- a/lib/eal/include/meson.build +++ b/lib/eal/include/meson.build @@ -40,6 +40,7 @@ headers += files( 'rte_string_fns.h', 'rte_tailq.h', 'rte_thread.h', + 'rte_thread_types.h', 'rte_time.h', 'rte_trace.h', 'rte_trace_point.h', diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h index 1550b75da0..d5e004105d 100644 --- a/lib/eal/include/rte_lcore.h +++ b/lib/eal/include/rte_lcore.h @@ -374,7 +374,7 @@ rte_lcore_dump(FILE *f); * @return * On success, return 0; otherwise return a negative value. */ -int rte_thread_setname(pthread_t id, const char *name); +int rte_thread_setname(rte_thread_t id, const char *name); /** * Get thread name. @@ -391,7 +391,7 @@ int rte_thread_setname(pthread_t id, const char *name); * On success, return 0; otherwise return a negative value. */ __rte_experimental -int rte_thread_getname(pthread_t id, char *name, size_t len); +int rte_thread_getname(rte_thread_t id, char *name, size_t len); /** * Register current non-EAL thread as a lcore. @@ -440,8 +440,8 @@ rte_thread_unregister(void); * corresponding to the error number. */ int -rte_ctrl_thread_create(pthread_t *thread, const char *name, - const pthread_attr_t *attr, +rte_ctrl_thread_create(rte_thread_t *thread, const char *name, + const rte_thread_attr_t *attr, void *(*start_routine)(void *), void *arg); #ifdef __cplusplus diff --git a/lib/eal/include/rte_per_lcore.h b/lib/eal/include/rte_per_lcore.h index eaedf0cb37..025d97f962 100644 --- a/lib/eal/include/rte_per_lcore.h +++ b/lib/eal/include/rte_per_lcore.h @@ -22,8 +22,6 @@ extern "C" { #endif -#include - /** * Macro to define a per lcore variable "var" of type "type", don't * use keywords like "static" or "volatile" in type, just prefix the diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c index ba19fc6347..8ac2c99114 100644 --- a/lib/eal/linux/eal.c +++ b/lib/eal/linux/eal.c @@ -963,7 +963,6 @@ int rte_eal_init(int argc, char **argv) { int i, fctret, ret; - pthread_t thread_id; static uint32_t run_once; uint32_t has_run = 0; const char *p; @@ -991,7 +990,6 @@ rte_eal_init(int argc, char **argv) p = strrchr(argv[0], '/'); strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid)); - thread_id = pthread_self(); eal_reset_internal_config(internal_conf); @@ -1219,7 +1217,14 @@ rte_eal_init(int argc, char **argv) eal_check_mem_on_local_socket(); - if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + ret = rte_thread_set_priority(rte_thread_self(), + internal_conf->thread_priority); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread priority"); + rte_errno = ret; + return -1; + } + if (rte_thread_set_affinity_by_id(rte_thread_self(), &lcore_config[config->main_lcore].cpuset) != 0) { rte_eal_init_alert("Cannot set affinity"); rte_errno = EINVAL; @@ -1229,10 +1234,27 @@ rte_eal_init(int argc, char **argv) &lcore_config[config->main_lcore].cpuset); ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); - RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", - config->main_lcore, (uintptr_t)thread_id, cpuset, + RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (cpuset=[%s%s])\n", + config->main_lcore, cpuset, ret == 0 ? "" : "..."); + rte_thread_attr_t thread_attr; + ret = rte_thread_attr_init(&thread_attr); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Cannot initialize thread attributes," + "ret = %d\n", ret); + rte_errno = ret; + return -1; + } + ret = rte_thread_attr_set_priority(&thread_attr, + internal_conf->thread_priority); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Cannot set thread priority attribute," + "ret = %d\n", ret); + rte_errno = ret; + return -1; + } + RTE_LCORE_FOREACH_WORKER(i) { /* @@ -1246,9 +1268,11 @@ rte_eal_init(int argc, char **argv) lcore_config[i].state = WAIT; + rte_thread_attr_set_affinity(&thread_attr, + &lcore_config[i].cpuset); /* create a thread for each lcore */ - ret = pthread_create(&lcore_config[i].thread_id, NULL, - eal_thread_loop, NULL); + ret = rte_thread_create(&lcore_config[i].thread_id, + &thread_attr, eal_thread_loop, NULL); if (ret != 0) rte_panic("Cannot create thread\n"); @@ -1260,11 +1284,6 @@ rte_eal_init(int argc, char **argv) if (ret != 0) RTE_LOG(DEBUG, EAL, "Cannot set name for lcore thread\n"); - - ret = pthread_setaffinity_np(lcore_config[i].thread_id, - sizeof(rte_cpuset_t), &lcore_config[i].cpuset); - if (ret != 0) - rte_panic("Cannot set affinity\n"); } /* diff --git a/lib/eal/linux/eal_alarm.c b/lib/eal/linux/eal_alarm.c index 3252c6fa59..fef22a347c 100644 --- a/lib/eal/linux/eal_alarm.c +++ b/lib/eal/linux/eal_alarm.c @@ -48,7 +48,7 @@ struct alarm_entry { rte_eal_alarm_callback cb_fn; void *cb_arg; volatile uint8_t executing; - volatile pthread_t executing_id; + volatile rte_thread_t executing_id; }; static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER(); @@ -86,7 +86,7 @@ eal_alarm_callback(void *arg __rte_unused) (ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec && (ap->time.tv_usec * NS_PER_US) <= now.tv_nsec))) { ap->executing = 1; - ap->executing_id = pthread_self(); + ap->executing_id = rte_thread_self(); rte_spinlock_unlock(&alarm_list_lk); ap->cb_fn(ap->cb_arg); @@ -207,7 +207,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg) /* If calling from other context, mark that alarm is executing * so loop can spin till it finish. Otherwise we are trying to * cancel our self - mark it by EINPROGRESS */ - if (pthread_equal(ap->executing_id, pthread_self()) == 0) + if (rte_thread_equal(ap->executing_id, + rte_thread_self()) == 0) executing++; else err = EINPROGRESS; @@ -228,7 +229,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg) free(ap); count++; ap = ap_prev; - } else if (pthread_equal(ap->executing_id, pthread_self()) == 0) + } else if (rte_thread_equal(ap->executing_id, + rte_thread_self()) == 0) executing++; else err = EINPROGRESS; diff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c index 22b3b7bcd9..e106fa186c 100644 --- a/lib/eal/linux/eal_interrupts.c +++ b/lib/eal/linux/eal_interrupts.c @@ -97,7 +97,7 @@ static union intr_pipefds intr_pipe; static struct rte_intr_source_list intr_sources; /* interrupt handling thread */ -static pthread_t intr_thread; +static rte_thread_t intr_thread; /* VFIO interrupts */ #ifdef VFIO_PRESENT @@ -1570,5 +1570,5 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle) int rte_thread_is_intr(void) { - return pthread_equal(intr_thread, pthread_self()); + return rte_thread_equal(intr_thread, rte_thread_self()); } diff --git a/lib/eal/linux/eal_thread.c b/lib/eal/linux/eal_thread.c index 83c2034b93..fe6b02788f 100644 --- a/lib/eal/linux/eal_thread.c +++ b/lib/eal/linux/eal_thread.c @@ -73,15 +73,15 @@ eal_thread_loop(__rte_unused void *arg) char c; int n, ret; unsigned lcore_id; - pthread_t thread_id; + rte_thread_t thread_id; int m2w, w2m; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - thread_id = pthread_self(); + thread_id = rte_thread_self(); /* retrieve our lcore_id from the configuration structure */ RTE_LCORE_FOREACH_WORKER(lcore_id) { - if (thread_id == lcore_config[lcore_id].thread_id) + if (rte_thread_equal(thread_id,lcore_config[lcore_id].thread_id)) break; } if (lcore_id == RTE_MAX_LCORE) @@ -93,8 +93,8 @@ eal_thread_loop(__rte_unused void *arg) __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); - RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", - lcore_id, (uintptr_t)thread_id, cpuset, ret == 0 ? "" : "..."); + RTE_LOG(DEBUG, EAL, "lcore %u is ready (cpuset=[%s%s])\n", + lcore_id, cpuset, ret == 0 ? "" : "..."); rte_eal_trace_thread_lcore_ready(lcore_id, cpuset); @@ -148,7 +148,7 @@ int rte_sys_gettid(void) return (int)syscall(SYS_gettid); } -int rte_thread_setname(pthread_t id, const char *name) +int rte_thread_setname(rte_thread_t id, const char *name) { int ret = ENOSYS; #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) @@ -156,7 +156,7 @@ int rte_thread_setname(pthread_t id, const char *name) char truncated[16]; strlcpy(truncated, name, sizeof(truncated)); - ret = pthread_setname_np(id, truncated); + ret = pthread_setname_np(id.opaque_id, truncated); #endif #endif RTE_SET_USED(id); @@ -164,12 +164,12 @@ int rte_thread_setname(pthread_t id, const char *name) return -ret; } -int rte_thread_getname(pthread_t id, char *name, size_t len) +int rte_thread_getname(rte_thread_t id, char *name, size_t len) { int ret = ENOSYS; #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if __GLIBC_PREREQ(2, 12) - ret = pthread_getname_np(id, name, len); + ret = pthread_getname_np(id.opaque_id, name, len); #endif #endif RTE_SET_USED(id); diff --git a/lib/eal/linux/eal_timer.c b/lib/eal/linux/eal_timer.c index 7cf15cabac..b4099a0aa2 100644 --- a/lib/eal/linux/eal_timer.c +++ b/lib/eal/linux/eal_timer.c @@ -80,7 +80,7 @@ static uint64_t eal_hpet_resolution_hz = 0; /* Incremented 4 times during one 32bits hpet full count */ static uint32_t eal_hpet_msb; -static pthread_t msb_inc_thread_id; +static rte_thread_t msb_inc_thread_id; /* * This function runs on a specific thread to update a global variable diff --git a/lib/eal/unix/meson.build b/lib/eal/unix/meson.build index dc711b4240..f2b8063760 100644 --- a/lib/eal/unix/meson.build +++ b/lib/eal/unix/meson.build @@ -5,5 +5,4 @@ sources += files( 'eal_file.c', 'eal_unix_memory.c', 'eal_unix_timer.c', - 'rte_thread.c', ) diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c deleted file mode 100644 index c72d619ec1..0000000000 --- a/lib/eal/unix/rte_thread.c +++ /dev/null @@ -1,92 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2021 Mellanox Technologies, Ltd - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -struct eal_tls_key { - pthread_key_t thread_index; -}; - -int -rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *)) -{ - int err; - - *key = malloc(sizeof(**key)); - if ((*key) == NULL) { - RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n"); - rte_errno = ENOMEM; - return -1; - } - err = pthread_key_create(&((*key)->thread_index), destructor); - if (err) { - RTE_LOG(DEBUG, EAL, "pthread_key_create failed: %s\n", - strerror(err)); - free(*key); - rte_errno = ENOEXEC; - return -1; - } - return 0; -} - -int -rte_thread_key_delete(rte_thread_key key) -{ - int err; - - if (!key) { - RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - rte_errno = EINVAL; - return -1; - } - err = pthread_key_delete(key->thread_index); - if (err) { - RTE_LOG(DEBUG, EAL, "pthread_key_delete failed: %s\n", - strerror(err)); - free(key); - rte_errno = ENOEXEC; - return -1; - } - free(key); - return 0; -} - -int -rte_thread_value_set(rte_thread_key key, const void *value) -{ - int err; - - if (!key) { - RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - rte_errno = EINVAL; - return -1; - } - err = pthread_setspecific(key->thread_index, value); - if (err) { - RTE_LOG(DEBUG, EAL, "pthread_setspecific failed: %s\n", - strerror(err)); - rte_errno = ENOEXEC; - return -1; - } - return 0; -} - -void * -rte_thread_value_get(rte_thread_key key) -{ - if (!key) { - RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - rte_errno = EINVAL; - return NULL; - } - return pthread_getspecific(key->thread_index); -} diff --git a/lib/eal/version.map b/lib/eal/version.map index fe5c3dac98..902ad1ab65 100644 --- a/lib/eal/version.map +++ b/lib/eal/version.map @@ -423,6 +423,27 @@ EXPERIMENTAL { rte_version_release; # WINDOWS_NO_EXPORT rte_version_suffix; # WINDOWS_NO_EXPORT rte_version_year; # WINDOWS_NO_EXPORT + + rte_thread_mutex_lock; + rte_thread_mutex_unlock; + rte_thread_mutex_init; + rte_thread_mutex_destroy; + rte_thread_create; + rte_thread_set_affinity_by_id; + rte_thread_get_affinity_by_id; + rte_thread_set_priority; + rte_thread_attr_init; + rte_thread_attr_set_affinity; + rte_thread_attr_get_affinity; + rte_thread_join; + rte_thread_self; + rte_thread_equal; + rte_thread_barrier_init; + rte_thread_barrier_wait; + rte_thread_barrier_destroy; + rte_thread_cancel; # WINDOWS_NO_EXPORT + rte_thread_attr_set_priority; + }; INTERNAL { diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c index 28c787c0b0..533707ec72 100644 --- a/lib/eal/windows/eal.c +++ b/lib/eal/windows/eal.c @@ -375,6 +375,35 @@ rte_eal_init(int argc, char **argv) return -1; } + if (rte_thread_set_affinity_by_id(rte_thread_self(), + &lcore_config[config->main_lcore].cpuset) != 0) { + rte_eal_init_alert("Cannot set affinity"); + rte_errno = EINVAL; + return -1; + } + + ret = rte_thread_set_priority(rte_thread_self(), + internal_conf->thread_priority); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread priority"); + rte_errno = ret; + return -1; + } + rte_thread_attr_t thread_attr; + ret = rte_thread_attr_init(&thread_attr); + if (ret != 0) { + rte_eal_init_alert("Cannot initialize thread attributes"); + rte_errno = ret; + return -1; + } + ret = rte_thread_attr_set_priority(&thread_attr, + internal_conf->thread_priority); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread priority attribute"); + rte_errno = ret; + return -1; + } + RTE_LCORE_FOREACH_WORKER(i) { /* @@ -390,8 +419,17 @@ rte_eal_init(int argc, char **argv) lcore_config[i].state = WAIT; + ret = rte_thread_attr_set_affinity(&thread_attr, &lcore_config[i].cpuset); + if (ret != 0) { + rte_eal_init_alert("Cannot set thread affinity attribute"); + rte_errno = ret; + return -1; + } + /* create a thread for each lcore */ - if (eal_thread_create(&lcore_config[i].thread_id) != 0) + ret = rte_thread_create(&lcore_config[i].thread_id, + &thread_attr, eal_thread_loop, NULL); + if (ret != 0) rte_panic("Cannot create thread\n"); } diff --git a/lib/eal/windows/eal_interrupts.c b/lib/eal/windows/eal_interrupts.c index 1d4cf794df..a529ef95e3 100644 --- a/lib/eal/windows/eal_interrupts.c +++ b/lib/eal/windows/eal_interrupts.c @@ -2,12 +2,14 @@ * Copyright 2020 Mellanox Technologies, Ltd */ +#include + #include #include "eal_private.h" #include "eal_windows.h" -static pthread_t intr_thread; +static rte_thread_t intr_thread; static HANDLE intr_iocp; @@ -76,7 +78,7 @@ rte_eal_intr_init(void) int rte_thread_is_intr(void) { - return pthread_equal(intr_thread, pthread_self()); + return rte_thread_equal(intr_thread, rte_thread_self()); } int @@ -92,9 +94,9 @@ eal_intr_thread_schedule(void (*func)(void *arg), void *arg) { HANDLE handle; - handle = OpenThread(THREAD_ALL_ACCESS, FALSE, intr_thread); + handle = OpenThread(THREAD_ALL_ACCESS, FALSE, intr_thread.opaque_id); if (handle == NULL) { - RTE_LOG_WIN32_ERR("OpenThread(%llu)", intr_thread); + RTE_LOG_WIN32_ERR("OpenThread (%" PRIuPTR ")", intr_thread.opaque_id); return -ENOENT; } diff --git a/lib/eal/windows/eal_thread.c b/lib/eal/windows/eal_thread.c index 9c3f6d69fd..0c6bca24cf 100644 --- a/lib/eal/windows/eal_thread.c +++ b/lib/eal/windows/eal_thread.c @@ -60,15 +60,15 @@ eal_thread_loop(void *arg __rte_unused) char c; int n, ret; unsigned int lcore_id; - pthread_t thread_id; + rte_thread_t thread_id; int m2w, w2m; char cpuset[RTE_CPU_AFFINITY_STR_LEN]; - thread_id = pthread_self(); + thread_id = rte_thread_self(); /* retrieve our lcore_id from the configuration structure */ RTE_LCORE_FOREACH_WORKER(lcore_id) { - if (thread_id == lcore_config[lcore_id].thread_id) + if (rte_thread_equal(thread_id, lcore_config[lcore_id].thread_id)) break; } if (lcore_id == RTE_MAX_LCORE) @@ -80,7 +80,7 @@ eal_thread_loop(void *arg __rte_unused) __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s])\n", - lcore_id, (uintptr_t)thread_id, cpuset); + lcore_id, thread_id.opaque_id, cpuset); /* read on our pipe to get commands */ while (1) { @@ -122,24 +122,6 @@ eal_thread_loop(void *arg __rte_unused) } } -/* function to create threads */ -int -eal_thread_create(pthread_t *thread) -{ - HANDLE th; - - th = CreateThread(NULL, 0, - (LPTHREAD_START_ROUTINE)(ULONG_PTR)eal_thread_loop, - NULL, 0, (LPDWORD)thread); - if (!th) - return -1; - - SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS); - SetThreadPriority(th, THREAD_PRIORITY_NORMAL); - - return 0; -} - /* get current thread ID */ int rte_sys_gettid(void) @@ -148,7 +130,7 @@ rte_sys_gettid(void) } int -rte_thread_setname(__rte_unused pthread_t id, __rte_unused const char *name) +rte_thread_setname(__rte_unused rte_thread_t id, __rte_unused const char *name) { /* TODO */ /* This is a stub, not the expected result */ diff --git a/lib/eal/windows/eal_windows.h b/lib/eal/windows/eal_windows.h index dc5dc8240a..4b92b198c7 100644 --- a/lib/eal/windows/eal_windows.h +++ b/lib/eal/windows/eal_windows.h @@ -35,16 +35,6 @@ */ int eal_create_cpu_map(void); -/** - * Create a thread. - * - * @param thread - * The location to store the thread id if successful. - * @return - * 0 for success, -1 if the thread is not created. - */ -int eal_thread_create(pthread_t *thread); - /** * Get system NUMA node number for a socket ID. * diff --git a/lib/eal/windows/include/meson.build b/lib/eal/windows/include/meson.build index b3534b025f..7d9b3393e4 100644 --- a/lib/eal/windows/include/meson.build +++ b/lib/eal/windows/include/meson.build @@ -7,4 +7,5 @@ headers += files( 'rte_os.h', 'rte_virt2phys.h', 'rte_windows.h', + 'rte_windows_thread_types.h', ) diff --git a/lib/eal/windows/include/pthread.h b/lib/eal/windows/include/pthread.h deleted file mode 100644 index 1939b0121c..0000000000 --- a/lib/eal/windows/include/pthread.h +++ /dev/null @@ -1,186 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - -#ifndef _PTHREAD_H_ -#define _PTHREAD_H_ - -#include -#include - -/** - * This file is required to support the common code in eal_common_proc.c, - * eal_common_thread.c and common\include\rte_per_lcore.h as Microsoft libc - * does not contain pthread.h. This may be removed in future releases. - */ -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#define PTHREAD_BARRIER_SERIAL_THREAD TRUE - -/* defining pthread_t type on Windows since there is no in Microsoft libc*/ -typedef uintptr_t pthread_t; - -/* defining pthread_attr_t type on Windows since there is no in Microsoft libc*/ -typedef void *pthread_attr_t; - -typedef void *pthread_mutexattr_t; - -typedef CRITICAL_SECTION pthread_mutex_t; - -typedef SYNCHRONIZATION_BARRIER pthread_barrier_t; - -#define pthread_barrier_init(barrier, attr, count) \ - !InitializeSynchronizationBarrier(barrier, count, -1) -#define pthread_barrier_wait(barrier) EnterSynchronizationBarrier(barrier, \ - SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY) -#define pthread_barrier_destroy(barrier) \ - !DeleteSynchronizationBarrier(barrier) -#define pthread_cancel(thread) !TerminateThread((HANDLE) thread, 0) - -/* pthread function overrides */ -#define pthread_self() \ - ((pthread_t)GetCurrentThreadId()) - - -static inline int -pthread_equal(pthread_t t1, pthread_t t2) -{ - return t1 == t2; -} - -static inline int -pthread_setaffinity_np(pthread_t threadid, size_t cpuset_size, - rte_cpuset_t *cpuset) -{ - DWORD_PTR ret = 0; - HANDLE thread_handle; - - if (cpuset == NULL || cpuset_size == 0) - return -1; - - thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid); - if (thread_handle == NULL) { - RTE_LOG_WIN32_ERR("OpenThread()"); - return -1; - } - - ret = SetThreadAffinityMask(thread_handle, *cpuset->_bits); - if (ret == 0) { - RTE_LOG_WIN32_ERR("SetThreadAffinityMask()"); - goto close_handle; - } - -close_handle: - if (CloseHandle(thread_handle) == 0) { - RTE_LOG_WIN32_ERR("CloseHandle()"); - return -1; - } - return (ret == 0) ? -1 : 0; -} - -static inline int -pthread_getaffinity_np(pthread_t threadid, size_t cpuset_size, - rte_cpuset_t *cpuset) -{ - /* Workaround for the lack of a GetThreadAffinityMask() - *API in Windows - */ - DWORD_PTR prev_affinity_mask; - HANDLE thread_handle; - DWORD_PTR ret = 0; - - if (cpuset == NULL || cpuset_size == 0) - return -1; - - thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid); - if (thread_handle == NULL) { - RTE_LOG_WIN32_ERR("OpenThread()"); - return -1; - } - - /* obtain previous mask by setting dummy mask */ - prev_affinity_mask = SetThreadAffinityMask(thread_handle, 0x1); - if (prev_affinity_mask == 0) { - RTE_LOG_WIN32_ERR("SetThreadAffinityMask()"); - goto close_handle; - } - - /* set it back! */ - ret = SetThreadAffinityMask(thread_handle, prev_affinity_mask); - if (ret == 0) { - RTE_LOG_WIN32_ERR("SetThreadAffinityMask()"); - goto close_handle; - } - - memset(cpuset, 0, cpuset_size); - *cpuset->_bits = prev_affinity_mask; - -close_handle: - if (CloseHandle(thread_handle) == 0) { - RTE_LOG_WIN32_ERR("SetThreadAffinityMask()"); - return -1; - } - return (ret == 0) ? -1 : 0; -} - -static inline int -pthread_create(void *threadid, const void *threadattr, void *threadfunc, - void *args) -{ - RTE_SET_USED(threadattr); - HANDLE hThread; - hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc, - args, 0, (LPDWORD)threadid); - if (hThread) { - SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS); - SetThreadPriority(hThread, THREAD_PRIORITY_NORMAL); - } - return ((hThread != NULL) ? 0 : E_FAIL); -} - -static inline int -pthread_join(__rte_unused pthread_t thread, - __rte_unused void **value_ptr) -{ - return 0; -} - -static inline int -pthread_mutex_init(pthread_mutex_t *mutex, - __rte_unused pthread_mutexattr_t *attr) -{ - InitializeCriticalSection(mutex); - return 0; -} - -static inline int -pthread_mutex_lock(pthread_mutex_t *mutex) -{ - EnterCriticalSection(mutex); - return 0; -} - -static inline int -pthread_mutex_unlock(pthread_mutex_t *mutex) -{ - LeaveCriticalSection(mutex); - return 0; -} - -static inline int -pthread_mutex_destroy(pthread_mutex_t *mutex) -{ - DeleteCriticalSection(mutex); - return 0; -} - -#ifdef __cplusplus -} -#endif - -#endif /* _PTHREAD_H_ */ diff --git a/lib/eal/windows/include/sched.h b/lib/eal/windows/include/sched.h index ff572b5dcb..8f0b3cb71e 100644 --- a/lib/eal/windows/include/sched.h +++ b/lib/eal/windows/include/sched.h @@ -44,7 +44,7 @@ typedef struct _rte_cpuset_s { (1LL << _WHICH_BIT(b))) != 0LL) static inline int -count_cpu(rte_cpuset_t *s) +count_cpu(const rte_cpuset_t *s) { unsigned int _i; int count = 0; diff --git a/lib/eal/windows/meson.build b/lib/eal/windows/meson.build index ff9cbec417..4b7db4754b 100644 --- a/lib/eal/windows/meson.build +++ b/lib/eal/windows/meson.build @@ -19,7 +19,12 @@ sources += files( 'eal_timer.c', 'fnmatch.c', 'getopt.c', - 'rte_thread.c', ) +if get_option('use_external_thread_lib') + sources += 'eal/common/rte_thread.c' +else + sources += 'eal/windows/rte_thread.c' +endif + dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index a1879765e8..823d02b529 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -524,7 +524,7 @@ rte_eth_dev_allocate(const char *name) strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); eth_dev->data->port_id = port_id; eth_dev->data->mtu = RTE_ETHER_MTU; - pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); + rte_thread_mutex_init(ð_dev->data->flow_ops_mutex); unlock: rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); @@ -598,7 +598,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->mac_addrs); rte_free(eth_dev->data->hash_mac_addrs); rte_free(eth_dev->data->dev_private); - pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); + rte_thread_mutex_destroy(ð_dev->data->flow_ops_mutex); memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); } diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 4679d948fa..ad1053b561 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -5,7 +5,8 @@ #ifndef _RTE_ETHDEV_CORE_H_ #define _RTE_ETHDEV_CORE_H_ -#include +#include +#include /** * @file @@ -182,7 +183,7 @@ struct rte_eth_dev_data { * Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags. */ - pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */ + rte_thread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */ uint64_t reserved_64s[4]; /**< Reserved for future fields */ void *reserved_ptrs[4]; /**< Reserved for future fields */ } __rte_cache_aligned; diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c index c7c7108933..2c0a8c0470 100644 --- a/lib/ethdev/rte_flow.c +++ b/lib/ethdev/rte_flow.c @@ -225,14 +225,14 @@ static inline void fts_enter(struct rte_eth_dev *dev) { if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) - pthread_mutex_lock(&dev->data->flow_ops_mutex); + rte_thread_mutex_lock(&dev->data->flow_ops_mutex); } static inline void fts_exit(struct rte_eth_dev *dev) { if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) - pthread_mutex_unlock(&dev->data->flow_ops_mutex); + rte_thread_mutex_unlock(&dev->data->flow_ops_mutex); } static int diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index ca166a65f2..d3c49dac77 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -147,7 +147,7 @@ struct rte_event_eth_rx_adapter { /* Count of interrupt vectors in use */ uint32_t num_intr_vec; /* Thread blocked on Rx interrupts */ - pthread_t rx_intr_thread; + rte_thread_t rx_intr_thread; /* Configuration callback for rte_service configuration */ rte_event_eth_rx_adapter_conf_cb conf_cb; /* Configuration callback argument */ @@ -1450,12 +1450,12 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter) { int err; - err = pthread_cancel(rx_adapter->rx_intr_thread); + err = rte_thread_cancel(rx_adapter->rx_intr_thread); if (err) RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n", err); - err = pthread_join(rx_adapter->rx_intr_thread, NULL); + err = rte_thread_join(rx_adapter->rx_intr_thread, NULL); if (err) RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err); diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c index 55d4856f9e..b97774ccd4 100644 --- a/lib/vhost/fd_man.c +++ b/lib/vhost/fd_man.c @@ -61,9 +61,9 @@ fdset_shrink_nolock(struct fdset *pfdset) static void fdset_shrink(struct fdset *pfdset) { - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); fdset_shrink_nolock(pfdset); - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); } /** @@ -126,21 +126,21 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat) if (pfdset == NULL || fd == -1) return -1; - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); i = pfdset->num < MAX_FDS ? pfdset->num++ : -1; if (i == -1) { - pthread_mutex_lock(&pfdset->fd_pooling_mutex); + rte_thread_mutex_lock(&pfdset->fd_pooling_mutex); fdset_shrink_nolock(pfdset); - pthread_mutex_unlock(&pfdset->fd_pooling_mutex); + rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex); i = pfdset->num < MAX_FDS ? pfdset->num++ : -1; if (i == -1) { - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); return -2; } } fdset_add_fd(pfdset, i, fd, rcb, wcb, dat); - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); return 0; } @@ -159,7 +159,7 @@ fdset_del(struct fdset *pfdset, int fd) return NULL; do { - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); i = fdset_find_fd(pfdset, fd); if (i != -1 && pfdset->fd[i].busy == 0) { @@ -170,7 +170,7 @@ fdset_del(struct fdset *pfdset, int fd) pfdset->fd[i].dat = NULL; i = -1; } - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); } while (i != -1); return dat; @@ -192,10 +192,10 @@ fdset_try_del(struct fdset *pfdset, int fd) if (pfdset == NULL || fd == -1) return -2; - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); i = fdset_find_fd(pfdset, fd); if (i != -1 && pfdset->fd[i].busy) { - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); return -1; } @@ -205,7 +205,7 @@ fdset_try_del(struct fdset *pfdset, int fd) pfdset->fd[i].dat = NULL; } - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); return 0; } @@ -244,19 +244,19 @@ fdset_event_dispatch(void *arg) * might have been updated. It is ok if there is unwanted call * for new listenfds. */ - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); numfds = pfdset->num; - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); - pthread_mutex_lock(&pfdset->fd_pooling_mutex); + rte_thread_mutex_lock(&pfdset->fd_pooling_mutex); val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */); - pthread_mutex_unlock(&pfdset->fd_pooling_mutex); + rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex); if (val < 0) continue; need_shrink = 0; for (i = 0; i < numfds; i++) { - pthread_mutex_lock(&pfdset->fd_mutex); + rte_thread_mutex_lock(&pfdset->fd_mutex); pfdentry = &pfdset->fd[i]; fd = pfdentry->fd; @@ -264,12 +264,12 @@ fdset_event_dispatch(void *arg) if (fd < 0) { need_shrink = 1; - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); continue; } if (!pfd->revents) { - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); continue; } @@ -280,7 +280,7 @@ fdset_event_dispatch(void *arg) dat = pfdentry->dat; pfdentry->busy = 1; - pthread_mutex_unlock(&pfdset->fd_mutex); + rte_thread_mutex_unlock(&pfdset->fd_mutex); if (rcb && pfd->revents & (POLLIN | FDPOLLERR)) rcb(fd, dat, &remove1); diff --git a/lib/vhost/fd_man.h b/lib/vhost/fd_man.h index 3ab5cfdd60..ba58d849e8 100644 --- a/lib/vhost/fd_man.h +++ b/lib/vhost/fd_man.h @@ -5,7 +5,7 @@ #ifndef _FD_MAN_H_ #define _FD_MAN_H_ #include -#include +#include #include #define MAX_FDS 1024 @@ -23,8 +23,8 @@ struct fdentry { struct fdset { struct pollfd rwfds[MAX_FDS]; struct fdentry fd[MAX_FDS]; - pthread_mutex_t fd_mutex; - pthread_mutex_t fd_pooling_mutex; + rte_thread_mutex_t fd_mutex; + rte_thread_mutex_t fd_pooling_mutex; int num; /* current fd number of this fdset */ union pipefds { diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c index 0169d36481..faec3650fb 100644 --- a/lib/vhost/socket.c +++ b/lib/vhost/socket.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include @@ -31,7 +31,7 @@ TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection); */ struct vhost_user_socket { struct vhost_user_connection_list conn_list; - pthread_mutex_t conn_mutex; + rte_thread_mutex_t conn_mutex; char *path; int socket_fd; struct sockaddr_un un; @@ -73,7 +73,7 @@ struct vhost_user { struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET]; struct fdset fdset; int vsocket_cnt; - pthread_mutex_t mutex; + rte_thread_mutex_t mutex; }; #define MAX_VIRTIO_BACKLOG 128 @@ -86,12 +86,12 @@ static int vhost_user_start_client(struct vhost_user_socket *vsocket); static struct vhost_user vhost_user = { .fdset = { .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} }, - .fd_mutex = PTHREAD_MUTEX_INITIALIZER, - .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER, + .fd_mutex = RTE_THREAD_MUTEX_INITIALIZER, + .fd_pooling_mutex = RTE_THREAD_MUTEX_INITIALIZER, .num = 0 }, .vsocket_cnt = 0, - .mutex = PTHREAD_MUTEX_INITIALIZER, + .mutex = RTE_THREAD_MUTEX_INITIALIZER, }; /* @@ -269,9 +269,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) goto err_cleanup; } - pthread_mutex_lock(&vsocket->conn_mutex); + rte_thread_mutex_lock(&vsocket->conn_mutex); TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next); - pthread_mutex_unlock(&vsocket->conn_mutex); + rte_thread_mutex_unlock(&vsocket->conn_mutex); fdset_pipe_notify(&vhost_user.fdset); return; @@ -324,9 +324,9 @@ vhost_user_read_cb(int connfd, void *dat, int *remove) vhost_user_start_client(vsocket); } - pthread_mutex_lock(&vsocket->conn_mutex); + rte_thread_mutex_lock(&vsocket->conn_mutex); TAILQ_REMOVE(&vsocket->conn_list, conn, next); - pthread_mutex_unlock(&vsocket->conn_mutex); + rte_thread_mutex_unlock(&vsocket->conn_mutex); free(conn); } @@ -418,11 +418,11 @@ struct vhost_user_reconnect { TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect); struct vhost_user_reconnect_list { struct vhost_user_reconnect_tailq_list head; - pthread_mutex_t mutex; + rte_thread_mutex_t mutex; }; static struct vhost_user_reconnect_list reconn_list; -static pthread_t reconn_tid; +static rte_thread_t reconn_tid; static int vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz) @@ -454,7 +454,7 @@ vhost_user_client_reconnect(void *arg __rte_unused) struct vhost_user_reconnect *reconn, *next; while (1) { - pthread_mutex_lock(&reconn_list.mutex); + rte_thread_mutex_lock(&reconn_list.mutex); /* * An equal implementation of TAILQ_FOREACH_SAFE, @@ -485,7 +485,7 @@ vhost_user_client_reconnect(void *arg __rte_unused) free(reconn); } - pthread_mutex_unlock(&reconn_list.mutex); + rte_thread_mutex_unlock(&reconn_list.mutex); sleep(1); } @@ -497,7 +497,7 @@ vhost_user_reconnect_init(void) { int ret; - ret = pthread_mutex_init(&reconn_list.mutex, NULL); + ret = rte_thread_mutex_init(&reconn_list.mutex); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "failed to initialize mutex"); return ret; @@ -508,7 +508,7 @@ vhost_user_reconnect_init(void) vhost_user_client_reconnect, NULL); if (ret != 0) { VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread"); - if (pthread_mutex_destroy(&reconn_list.mutex)) { + if (rte_thread_mutex_destroy(&reconn_list.mutex)) { VHOST_LOG_CONFIG(ERR, "failed to destroy reconnect mutex"); } @@ -552,9 +552,9 @@ vhost_user_start_client(struct vhost_user_socket *vsocket) reconn->un = vsocket->un; reconn->fd = fd; reconn->vsocket = vsocket; - pthread_mutex_lock(&reconn_list.mutex); + rte_thread_mutex_lock(&reconn_list.mutex); TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next); - pthread_mutex_unlock(&reconn_list.mutex); + rte_thread_mutex_unlock(&reconn_list.mutex); return 0; } @@ -586,11 +586,11 @@ rte_vhost_driver_attach_vdpa_device(const char *path, if (dev == NULL || path == NULL) return -1; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) vsocket->vdpa_dev = dev; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -600,11 +600,11 @@ rte_vhost_driver_detach_vdpa_device(const char *path) { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) vsocket->vdpa_dev = NULL; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -615,11 +615,11 @@ rte_vhost_driver_get_vdpa_device(const char *path) struct vhost_user_socket *vsocket; struct rte_vdpa_device *dev = NULL; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) dev = vsocket->vdpa_dev; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return dev; } @@ -629,7 +629,7 @@ rte_vhost_driver_disable_features(const char *path, uint64_t features) { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); /* Note that use_builtin_virtio_net is not affected by this function @@ -639,7 +639,7 @@ rte_vhost_driver_disable_features(const char *path, uint64_t features) if (vsocket) vsocket->features &= ~features; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -649,7 +649,7 @@ rte_vhost_driver_enable_features(const char *path, uint64_t features) { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) { if ((vsocket->supported_features & features) != features) { @@ -657,12 +657,12 @@ rte_vhost_driver_enable_features(const char *path, uint64_t features) * trying to enable features the driver doesn't * support. */ - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return -1; } vsocket->features |= features; } - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -672,7 +672,7 @@ rte_vhost_driver_set_features(const char *path, uint64_t features) { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) { vsocket->supported_features = features; @@ -683,7 +683,7 @@ rte_vhost_driver_set_features(const char *path, uint64_t features) */ vsocket->use_builtin_virtio_net = false; } - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -696,7 +696,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features) struct rte_vdpa_device *vdpa_dev; int ret = 0; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { VHOST_LOG_CONFIG(ERR, @@ -722,7 +722,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features) *features = vsocket->features & vdpa_features; unlock_exit: - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return ret; } @@ -732,11 +732,11 @@ rte_vhost_driver_set_protocol_features(const char *path, { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) vsocket->protocol_features = protocol_features; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -749,7 +749,7 @@ rte_vhost_driver_get_protocol_features(const char *path, struct rte_vdpa_device *vdpa_dev; int ret = 0; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { VHOST_LOG_CONFIG(ERR, @@ -777,7 +777,7 @@ rte_vhost_driver_get_protocol_features(const char *path, & vdpa_protocol_features; unlock_exit: - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return ret; } @@ -789,7 +789,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) struct rte_vdpa_device *vdpa_dev; int ret = 0; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { VHOST_LOG_CONFIG(ERR, @@ -815,7 +815,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num); unlock_exit: - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return ret; } @@ -847,7 +847,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) if (!path) return -1; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) { VHOST_LOG_CONFIG(ERR, @@ -867,7 +867,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) goto out; } TAILQ_INIT(&vsocket->conn_list); - ret = pthread_mutex_init(&vsocket->conn_mutex, NULL); + ret = rte_thread_mutex_init(&vsocket->conn_mutex); if (ret) { VHOST_LOG_CONFIG(ERR, "error: failed to init connection mutex\n"); @@ -948,7 +948,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) if ((flags & RTE_VHOST_USER_CLIENT) != 0) { vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); - if (vsocket->reconnect && reconn_tid == 0) { + if (vsocket->reconnect && reconn_tid.opaque_id == 0) { if (vhost_user_reconnect_init() != 0) goto out_mutex; } @@ -962,18 +962,18 @@ rte_vhost_driver_register(const char *path, uint64_t flags) vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return ret; out_mutex: - if (pthread_mutex_destroy(&vsocket->conn_mutex)) { + if (rte_thread_mutex_destroy(&vsocket->conn_mutex)) { VHOST_LOG_CONFIG(ERR, "error: failed to destroy connection mutex\n"); } out_free: vhost_user_socket_mem_free(vsocket); out: - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return ret; } @@ -984,7 +984,7 @@ vhost_user_remove_reconnect(struct vhost_user_socket *vsocket) int found = false; struct vhost_user_reconnect *reconn, *next; - pthread_mutex_lock(&reconn_list.mutex); + rte_thread_mutex_lock(&reconn_list.mutex); for (reconn = TAILQ_FIRST(&reconn_list.head); reconn != NULL; reconn = next) { @@ -998,7 +998,7 @@ vhost_user_remove_reconnect(struct vhost_user_socket *vsocket) break; } } - pthread_mutex_unlock(&reconn_list.mutex); + rte_thread_mutex_unlock(&reconn_list.mutex); return found; } @@ -1016,13 +1016,13 @@ rte_vhost_driver_unregister(const char *path) return -1; again: - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); for (i = 0; i < vhost_user.vsocket_cnt; i++) { struct vhost_user_socket *vsocket = vhost_user.vsockets[i]; if (!strcmp(vsocket->path, path)) { - pthread_mutex_lock(&vsocket->conn_mutex); + rte_thread_mutex_lock(&vsocket->conn_mutex); for (conn = TAILQ_FIRST(&vsocket->conn_list); conn != NULL; conn = next) { @@ -1036,9 +1036,10 @@ rte_vhost_driver_unregister(const char *path) */ if (fdset_try_del(&vhost_user.fdset, conn->connfd) == -1) { - pthread_mutex_unlock( + rte_thread_mutex_unlock( &vsocket->conn_mutex); - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock( + &vhost_user.mutex); goto again; } @@ -1050,7 +1051,7 @@ rte_vhost_driver_unregister(const char *path) TAILQ_REMOVE(&vsocket->conn_list, conn, next); free(conn); } - pthread_mutex_unlock(&vsocket->conn_mutex); + rte_thread_mutex_unlock(&vsocket->conn_mutex); if (vsocket->is_server) { /* @@ -1060,7 +1061,8 @@ rte_vhost_driver_unregister(const char *path) */ if (fdset_try_del(&vhost_user.fdset, vsocket->socket_fd) == -1) { - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock( + &vhost_user.mutex); goto again; } @@ -1070,18 +1072,18 @@ rte_vhost_driver_unregister(const char *path) vhost_user_remove_reconnect(vsocket); } - pthread_mutex_destroy(&vsocket->conn_mutex); + rte_thread_mutex_destroy(&vsocket->conn_mutex); vhost_user_socket_mem_free(vsocket); count = --vhost_user.vsocket_cnt; vhost_user.vsockets[i] = vhost_user.vsockets[count]; vhost_user.vsockets[count] = NULL; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return 0; } } - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return -1; } @@ -1095,11 +1097,11 @@ rte_vhost_driver_callback_register(const char *path, { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) vsocket->notify_ops = ops; - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } @@ -1109,9 +1111,9 @@ vhost_driver_callback_get(const char *path) { struct vhost_user_socket *vsocket; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); return vsocket ? vsocket->notify_ops : NULL; } @@ -1120,16 +1122,16 @@ int rte_vhost_driver_start(const char *path) { struct vhost_user_socket *vsocket; - static pthread_t fdset_tid; + static rte_thread_t fdset_tid; - pthread_mutex_lock(&vhost_user.mutex); + rte_thread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); - pthread_mutex_unlock(&vhost_user.mutex); + rte_thread_mutex_unlock(&vhost_user.mutex); if (!vsocket) return -1; - if (fdset_tid == 0) { + if (fdset_tid.opaque_id == 0) { /** * create a pipe which will be waited by poll and notified to * rebuild the wait list of poll. diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index a70fe01d8f..a9264d01fd 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -26,7 +26,7 @@ #include "vhost_user.h" struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; -pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; +rte_thread_mutex_t vhost_dev_lock = RTE_THREAD_MUTEX_INITIALIZER; /* Called with iotlb_lock read-locked */ uint64_t @@ -656,7 +656,7 @@ vhost_new_device(void) struct virtio_net *dev; int i; - pthread_mutex_lock(&vhost_dev_lock); + rte_thread_mutex_lock(&vhost_dev_lock); for (i = 0; i < MAX_VHOST_DEVICE; i++) { if (vhost_devices[i] == NULL) break; @@ -665,7 +665,7 @@ vhost_new_device(void) if (i == MAX_VHOST_DEVICE) { VHOST_LOG_CONFIG(ERR, "Failed to find a free slot for new device.\n"); - pthread_mutex_unlock(&vhost_dev_lock); + rte_thread_mutex_unlock(&vhost_dev_lock); return -1; } @@ -673,12 +673,12 @@ vhost_new_device(void) if (dev == NULL) { VHOST_LOG_CONFIG(ERR, "Failed to allocate memory for new dev.\n"); - pthread_mutex_unlock(&vhost_dev_lock); + rte_thread_mutex_unlock(&vhost_dev_lock); return -1; } vhost_devices[i] = dev; - pthread_mutex_unlock(&vhost_dev_lock); + rte_thread_mutex_unlock(&vhost_dev_lock); dev->vid = i; dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; diff --git a/meson_options.txt b/meson_options.txt index 56bdfd0f0a..46d156cf2f 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -42,5 +42,7 @@ option('enable_trace_fp', type: 'boolean', value: false, description: 'enable fast path trace points.') option('tests', type: 'boolean', value: true, description: 'build unit tests') +option('use_external_thread_lib', type: 'boolean', value: false, + description: 'use an external thread library') option('use_hpet', type: 'boolean', value: false, description: 'use HPET timer in EAL')