[v8,10/10] Enable the new EAL thread API

Message ID 1622849908-5710-11-git-send-email-navasile@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series eal: Add EAL API for threading |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues
ci/iol-intel-Functional success Functional Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-abi-testing warning Testing issues
ci/iol-testing fail Testing issues

Commit Message

Narcisa Ana Maria Vasile June 4, 2021, 11:38 p.m. UTC
From: Narcisa Vasile <navasile@microsoft.com>

Rename pthread_* occurrences with the new rte_thread_* API.
Enable the new API in the build system.
---
 app/test/process.h                            |   8 +-
 app/test/test_lcores.c                        |  16 +-
 app/test/test_link_bonding.c                  |  10 +-
 app/test/test_lpm_perf.c                      |  12 +-
 config/meson.build                            |   4 +
 drivers/bus/dpaa/base/qbman/bman_driver.c     |   5 +-
 drivers/bus/dpaa/base/qbman/dpaa_sys.c        |  14 +-
 drivers/bus/dpaa/base/qbman/process.c         |   6 +-
 drivers/bus/dpaa/dpaa_bus.c                   |  14 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |  19 +-
 drivers/compress/mlx5/mlx5_compress.c         |  10 +-
 drivers/event/dlb2/pf/base/dlb2_osdep.h       |   4 +-
 drivers/net/af_xdp/rte_eth_af_xdp.c           |  18 +-
 drivers/net/ark/ark_ethdev.c                  |   2 +-
 drivers/net/ark/ark_pktgen.c                  |   4 +-
 drivers/net/atlantic/atl_ethdev.c             |   4 +-
 drivers/net/atlantic/atl_types.h              |   5 +-
 .../net/atlantic/hw_atl/hw_atl_utils_fw2x.c   |  26 +--
 drivers/net/axgbe/axgbe_common.h              |   2 +-
 drivers/net/axgbe/axgbe_dev.c                 |   8 +-
 drivers/net/axgbe/axgbe_ethdev.c              |   8 +-
 drivers/net/axgbe/axgbe_ethdev.h              |   8 +-
 drivers/net/axgbe/axgbe_i2c.c                 |   4 +-
 drivers/net/axgbe/axgbe_mdio.c                |   8 +-
 drivers/net/axgbe/axgbe_phy_impl.c            |   6 +-
 drivers/net/bnxt/bnxt.h                       |  16 +-
 drivers/net/bnxt/bnxt_cpr.c                   |   4 +-
 drivers/net/bnxt/bnxt_ethdev.c                |  52 ++---
 drivers/net/bnxt/bnxt_irq.c                   |   8 +-
 drivers/net/bnxt/bnxt_reps.c                  |  10 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c            |  34 ++--
 drivers/net/bnxt/tf_ulp/bnxt_ulp.h            |   4 +-
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c          |  24 +--
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h          |   2 +-
 drivers/net/ena/base/ena_plat_dpdk.h          |  12 +-
 drivers/net/enic/enic.h                       |   2 +-
 drivers/net/ice/ice_dcf_parent.c              |   6 +-
 drivers/net/ipn3ke/ipn3ke_representor.c       |   6 +-
 drivers/net/ixgbe/ixgbe_ethdev.c              |   2 +-
 drivers/net/ixgbe/ixgbe_ethdev.h              |   2 +-
 drivers/net/kni/rte_eth_kni.c                 |   8 +-
 drivers/net/mlx5/linux/mlx5_os.c              |   2 +-
 drivers/net/mlx5/mlx5.c                       |  20 +-
 drivers/net/mlx5/mlx5.h                       |   2 +-
 drivers/net/mlx5/mlx5_txpp.c                  |   8 +-
 drivers/net/mlx5/windows/mlx5_flow_os.c       |  10 +-
 drivers/net/mlx5/windows/mlx5_os.c            |   2 +-
 drivers/net/qede/base/bcm_osal.h              |   8 +-
 drivers/net/vhost/rte_eth_vhost.c             |  24 +--
 .../net/virtio/virtio_user/virtio_user_dev.c  |  30 +--
 .../net/virtio/virtio_user/virtio_user_dev.h  |   2 +-
 drivers/raw/ifpga/ifpga_rawdev.c              |   6 +-
 drivers/vdpa/ifc/ifcvf_vdpa.c                 |  46 ++---
 drivers/vdpa/mlx5/mlx5_vdpa.c                 |  24 +--
 drivers/vdpa/mlx5/mlx5_vdpa.h                 |   4 +-
 drivers/vdpa/mlx5/mlx5_vdpa_event.c           |  46 ++---
 examples/kni/main.c                           |   6 +-
 .../performance-thread/pthread_shim/main.c    |   2 +-
 examples/vhost/main.c                         |   2 +-
 examples/vhost_blk/vhost_blk.c                |  12 +-
 lib/eal/common/eal_common_options.c           |   6 +-
 lib/eal/common/eal_common_proc.c              |  48 ++---
 lib/eal/common/eal_common_thread.c            |  42 ++--
 lib/eal/common/eal_common_trace.c             |   2 +-
 lib/eal/common/eal_private.h                  |   2 +-
 lib/eal/common/eal_thread.h                   |   6 +
 lib/eal/common/malloc_mp.c                    |  32 +--
 lib/eal/common/meson.build                    |   1 +
 lib/eal/freebsd/eal.c                         |  42 +++-
 lib/eal/freebsd/eal_alarm.c                   |  12 +-
 lib/eal/freebsd/eal_interrupts.c              |   4 +-
 lib/eal/freebsd/eal_thread.c                  |  14 +-
 lib/eal/include/meson.build                   |   1 +
 lib/eal/include/rte_lcore.h                   |   8 +-
 lib/eal/include/rte_per_lcore.h               |   2 -
 lib/eal/linux/eal.c                           |  46 +++--
 lib/eal/linux/eal_alarm.c                     |  10 +-
 lib/eal/linux/eal_interrupts.c                |   4 +-
 lib/eal/linux/eal_thread.c                    |  18 +-
 lib/eal/linux/eal_timer.c                     |   2 +-
 lib/eal/unix/meson.build                      |   1 -
 lib/eal/unix/rte_thread.c                     |  92 ---------
 lib/eal/version.map                           |  22 ++
 lib/eal/windows/eal.c                         |  43 +++-
 lib/eal/windows/eal_interrupts.c              |  10 +-
 lib/eal/windows/eal_thread.c                  |  28 +--
 lib/eal/windows/eal_windows.h                 |  10 -
 lib/eal/windows/include/meson.build           |   1 +
 lib/eal/windows/include/pthread.h             | 192 ------------------
 lib/eal/windows/include/sched.h               |   2 +-
 lib/eal/windows/meson.build                   |   7 +-
 lib/ethdev/rte_ethdev.c                       |   4 +-
 lib/ethdev/rte_ethdev_core.h                  |   5 +-
 lib/ethdev/rte_flow.c                         |   4 +-
 lib/eventdev/rte_event_eth_rx_adapter.c       |   6 +-
 lib/vhost/fd_man.c                            |  40 ++--
 lib/vhost/fd_man.h                            |   6 +-
 lib/vhost/socket.c                            | 130 ++++++------
 lib/vhost/vhost.c                             |  10 +-
 meson_options.txt                             |   2 +
 100 files changed, 703 insertions(+), 887 deletions(-)
 delete mode 100644 lib/eal/unix/rte_thread.c
 delete mode 100644 lib/eal/windows/include/pthread.h
  

Patch

diff --git a/app/test/process.h b/app/test/process.h
index a09a088477..9e4be17bad 100644
--- a/app/test/process.h
+++ b/app/test/process.h
@@ -26,7 +26,7 @@ 
 
 #ifdef RTE_LIB_PDUMP
 #ifdef RTE_NET_RING
-#include <pthread.h>
+#include <rte_thread.h>
 extern void *send_pkts(void *empty);
 extern uint16_t flag_for_send_pkts;
 #endif
@@ -47,7 +47,7 @@  process_dup(const char *const argv[], int numargs, const char *env_value)
 	char path[32];
 #ifdef RTE_LIB_PDUMP
 #ifdef RTE_NET_RING
-	pthread_t thread;
+	rte_thread_t thread;
 	int rc;
 #endif
 #endif
@@ -128,7 +128,7 @@  process_dup(const char *const argv[], int numargs, const char *env_value)
 #ifdef RTE_LIB_PDUMP
 #ifdef RTE_NET_RING
 	if ((strcmp(env_value, "run_pdump_server_tests") == 0)) {
-		rc = pthread_create(&thread, NULL, &send_pkts, NULL);
+		rc = rte_thread_create(&thread, NULL, &send_pkts, NULL);
 		if (rc != 0) {
 			rte_panic("Cannot start send pkts thread: %s\n",
 				  strerror(rc));
@@ -143,7 +143,7 @@  process_dup(const char *const argv[], int numargs, const char *env_value)
 #ifdef RTE_NET_RING
 	if ((strcmp(env_value, "run_pdump_server_tests") == 0)) {
 		flag_for_send_pkts = 0;
-		pthread_join(thread, NULL);
+		rte_thread_join(thread, NULL);
 	}
 #endif
 #endif
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 19a7ab9fce..c507e423e0 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -14,7 +14,7 @@ 
 struct thread_context {
 	enum { INIT, ERROR, DONE } state;
 	bool lcore_id_any;
-	pthread_t id;
+	rte_thread_t id;
 	unsigned int *registered_count;
 };
 
@@ -77,7 +77,7 @@  test_non_eal_lcores(unsigned int eal_threads_count)
 		t->state = INIT;
 		t->registered_count = &registered_count;
 		t->lcore_id_any = false;
-		if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+		if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
 			break;
 		non_eal_threads_count++;
 	}
@@ -96,7 +96,7 @@  test_non_eal_lcores(unsigned int eal_threads_count)
 	t->state = INIT;
 	t->registered_count = &registered_count;
 	t->lcore_id_any = true;
-	if (pthread_create(&t->id, NULL, thread_loop, t) == 0) {
+	if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
 		non_eal_threads_count++;
 		printf("non-EAL threads count: %u\n", non_eal_threads_count);
 		while (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=
@@ -110,7 +110,7 @@  test_non_eal_lcores(unsigned int eal_threads_count)
 	ret = 0;
 	for (i = 0; i < non_eal_threads_count; i++) {
 		t = &thread_contexts[i];
-		pthread_join(t->id, NULL);
+		rte_thread_join(t->id, NULL);
 		if (t->state != DONE)
 			ret = -1;
 	}
@@ -262,7 +262,7 @@  test_non_eal_lcores_callback(unsigned int eal_threads_count)
 	t->state = INIT;
 	t->registered_count = &registered_count;
 	t->lcore_id_any = false;
-	if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+	if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
 		goto cleanup_threads;
 	non_eal_threads_count++;
 	while (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=
@@ -285,7 +285,7 @@  test_non_eal_lcores_callback(unsigned int eal_threads_count)
 	t->state = INIT;
 	t->registered_count = &registered_count;
 	t->lcore_id_any = true;
-	if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+	if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
 		goto cleanup_threads;
 	non_eal_threads_count++;
 	while (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=
@@ -309,7 +309,7 @@  test_non_eal_lcores_callback(unsigned int eal_threads_count)
 	ret = 0;
 	for (i = 0; i < non_eal_threads_count; i++) {
 		t = &thread_contexts[i];
-		pthread_join(t->id, NULL);
+		rte_thread_join(t->id, NULL);
 		if (t->state != DONE)
 			ret = -1;
 	}
@@ -330,7 +330,7 @@  test_non_eal_lcores_callback(unsigned int eal_threads_count)
 	__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);
 	for (i = 0; i < non_eal_threads_count; i++) {
 		t = &thread_contexts[i];
-		pthread_join(t->id, NULL);
+		rte_thread_join(t->id, NULL);
 	}
 error:
 	if (handle[1] != NULL)
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 8a5c8310a8..3d9663f5ee 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -203,7 +203,7 @@  configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
 static int slaves_initialized;
 static int mac_slaves_initialized;
 
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t mutex = RTE_THREAD_MUTEX_INITIALIZER;
 static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER;
 
 
@@ -1191,11 +1191,11 @@  test_bonding_lsc_event_callback(uint16_t port_id __rte_unused,
 		void *param __rte_unused,
 		void *ret_param __rte_unused)
 {
-	pthread_mutex_lock(&mutex);
+	rte_thread_mutex_lock(&mutex);
 	test_lsc_interrupt_count++;
 
 	pthread_cond_signal(&cvar);
-	pthread_mutex_unlock(&mutex);
+	rte_thread_mutex_unlock(&mutex);
 
 	return 0;
 }
@@ -1220,11 +1220,11 @@  lsc_timeout(int wait_us)
 		ts.tv_sec += 1;
 	}
 
-	pthread_mutex_lock(&mutex);
+	rte_thread_mutex_lock(&mutex);
 	if (test_lsc_interrupt_count < 1)
 		retval = pthread_cond_timedwait(&cvar, &mutex, &ts);
 
-	pthread_mutex_unlock(&mutex);
+	rte_thread_mutex_unlock(&mutex);
 
 	if (retval == 0 && test_lsc_interrupt_count < 1)
 		return -1;
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 2bed00d064..ce79d9c17a 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -25,7 +25,7 @@  static volatile uint32_t thr_id;
 static uint64_t gwrite_cycles;
 static uint32_t num_writers;
 /* LPM APIs are not thread safe, use mutex to provide thread safety */
-static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t lpm_mutex = RTE_THREAD_MUTEX_INITIALIZER;
 
 /* Report quiescent state interval every 1024 lookups. Larger critical
  * sections in reader will result in writer polling multiple times.
@@ -443,7 +443,7 @@  test_lpm_rcu_qsbr_writer(void *arg)
 		/* Add all the entries */
 		for (j = si; j < ei; j++) {
 			if (num_writers > 1)
-				pthread_mutex_lock(&lpm_mutex);
+				rte_thread_mutex_lock(&lpm_mutex);
 			if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
 					large_ldepth_route_table[j].depth,
 					next_hop_add) != 0) {
@@ -452,13 +452,13 @@  test_lpm_rcu_qsbr_writer(void *arg)
 				goto error;
 			}
 			if (num_writers > 1)
-				pthread_mutex_unlock(&lpm_mutex);
+				rte_thread_mutex_unlock(&lpm_mutex);
 		}
 
 		/* Delete all the entries */
 		for (j = si; j < ei; j++) {
 			if (num_writers > 1)
-				pthread_mutex_lock(&lpm_mutex);
+				rte_thread_mutex_lock(&lpm_mutex);
 			if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
 				large_ldepth_route_table[j].depth) != 0) {
 				printf("Failed to delete iteration %d, route# %d\n",
@@ -466,7 +466,7 @@  test_lpm_rcu_qsbr_writer(void *arg)
 				goto error;
 			}
 			if (num_writers > 1)
-				pthread_mutex_unlock(&lpm_mutex);
+				rte_thread_mutex_unlock(&lpm_mutex);
 		}
 	}
 
@@ -478,7 +478,7 @@  test_lpm_rcu_qsbr_writer(void *arg)
 
 error:
 	if (num_writers > 1)
-		pthread_mutex_unlock(&lpm_mutex);
+		rte_thread_mutex_unlock(&lpm_mutex);
 	return -1;
 }
 
diff --git a/config/meson.build b/config/meson.build
index 017bb2efbb..4070eb200d 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -262,6 +262,10 @@  else # for 32-bit we need smaller reserved memory areas
     dpdk_conf.set('RTE_MAX_MEM_MB', 2048)
 endif
 
+if is_windows
+	dpdk_conf.set('RTE_USE_WINDOWS_THREAD_TYPES', not get_option('use_external_thread_lib'))
+endif
+
 
 compile_time_cpuflags = []
 subdir(arch_subdir)
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index ee35e03da1..c66a0946ca 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -38,11 +38,10 @@  static int fsl_bman_portal_init(uint32_t idx, int is_shared)
 	struct dpaa_ioctl_irq_map irq_map;
 
 	/* Verify the thread's cpu-affinity */
-	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
-				     &cpuset);
+	ret = rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset);
 	if (ret) {
 		errno = ret;
-		err(0, "pthread_getaffinity_np()");
+		err(0, "rte_thread_get_affinity_by_id()");
 		return ret;
 	}
 	pcfg.cpu = -1;
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
index 9d6bfd40a2..dc5f02bec1 100644
--- a/drivers/bus/dpaa/base/qbman/dpaa_sys.c
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -18,16 +18,16 @@  struct process_interrupt {
 };
 
 static COMPAT_LIST_HEAD(process_irq_list);
-static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t process_irq_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 static void process_interrupt_install(struct process_interrupt *irq)
 {
 	int ret;
 	/* Add the irq to the end of the list */
-	ret = pthread_mutex_lock(&process_irq_lock);
+	ret = rte_thread_mutex_lock(&process_irq_lock);
 	assert(!ret);
 	list_add_tail(&irq->node, &process_irq_list);
-	ret = pthread_mutex_unlock(&process_irq_lock);
+	ret = rte_thread_mutex_unlock(&process_irq_lock);
 	assert(!ret);
 }
 
@@ -35,10 +35,10 @@  static void process_interrupt_remove(struct process_interrupt *irq)
 {
 	int ret;
 
-	ret = pthread_mutex_lock(&process_irq_lock);
+	ret = rte_thread_mutex_lock(&process_irq_lock);
 	assert(!ret);
 	list_del(&irq->node);
-	ret = pthread_mutex_unlock(&process_irq_lock);
+	ret = rte_thread_mutex_unlock(&process_irq_lock);
 	assert(!ret);
 }
 
@@ -47,14 +47,14 @@  static struct process_interrupt *process_interrupt_find(int irq_num)
 	int ret;
 	struct process_interrupt *i = NULL;
 
-	ret = pthread_mutex_lock(&process_irq_lock);
+	ret = rte_thread_mutex_lock(&process_irq_lock);
 	assert(!ret);
 	list_for_each_entry(i, &process_irq_list, node) {
 		if (i->irq == irq_num)
 			goto done;
 	}
 done:
-	ret = pthread_mutex_unlock(&process_irq_lock);
+	ret = rte_thread_mutex_unlock(&process_irq_lock);
 	assert(!ret);
 	return i;
 }
diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c
index 9bc92681cd..6d2d10cdf0 100644
--- a/drivers/bus/dpaa/base/qbman/process.c
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -21,7 +21,7 @@ 
  * what the lock is for.
  */
 static int fd = -1;
-static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t fd_init_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 static int check_fd(void)
 {
@@ -29,12 +29,12 @@  static int check_fd(void)
 
 	if (fd >= 0)
 		return 0;
-	ret = pthread_mutex_lock(&fd_init_lock);
+	ret = rte_thread_mutex_lock(&fd_init_lock);
 	assert(!ret);
 	/* check again with the lock held */
 	if (fd < 0)
 		fd = open(PROCESS_PATH, O_RDWR);
-	ret = pthread_mutex_unlock(&fd_init_lock);
+	ret = rte_thread_mutex_unlock(&fd_init_lock);
 	assert(!ret);
 	return (fd >= 0) ? 0 : -ENODEV;
 }
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 2f48d4036b..bd83cab577 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -10,7 +10,7 @@ 
 #include <limits.h>
 #include <sched.h>
 #include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <sys/types.h>
 #include <sys/syscall.h>
 #include <sys/eventfd.h>
@@ -48,7 +48,7 @@  static struct rte_dpaa_bus rte_dpaa_bus;
 struct netcfg_info *dpaa_netcfg;
 
 /* define a variable to hold the portal_key, once created.*/
-static pthread_key_t dpaa_portal_key;
+static rte_thread_key dpaa_portal_key;
 
 unsigned int dpaa_svr_family;
 
@@ -316,10 +316,10 @@  int rte_dpaa_portal_init(void *arg)
 	DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
 	DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid);
 
-	ret = pthread_setspecific(dpaa_portal_key,
+	ret = rte_thread_value_set(dpaa_portal_key,
 				  (void *)DPAA_PER_LCORE_PORTAL);
 	if (ret) {
-		DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
+		DPAA_BUS_LOG(ERR, "rte_thread_value_set failed on core %u"
 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
 		dpaa_portal_finish(NULL);
 
@@ -377,7 +377,7 @@  dpaa_portal_finish(void *arg)
 	bman_thread_finish();
 	qman_thread_finish();
 
-	pthread_setspecific(dpaa_portal_key, NULL);
+	rte_thread_value_set(dpaa_portal_key, NULL);
 
 	rte_free(dpaa_io_portal);
 	dpaa_io_portal = NULL;
@@ -453,9 +453,9 @@  rte_dpaa_bus_scan(void)
 	/* create the key, supplying a function that'll be invoked
 	 * when a portal affined thread will be deleted.
 	 */
-	ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+	ret = rte_thread_key_create(&dpaa_portal_key, dpaa_portal_finish);
 	if (ret) {
-		DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+		DPAA_BUS_LOG(DEBUG, "Unable to create thread key. (%d)", ret);
 		dpaa_clean_device_list();
 		return ret;
 	}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index c6f8312a1d..dffbdff666 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -13,7 +13,7 @@ 
 #include <stdarg.h>
 #include <inttypes.h>
 #include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <sys/types.h>
 #include <sys/queue.h>
 #include <sys/ioctl.h>
@@ -59,7 +59,7 @@  uint8_t dpaa2_dqrr_size;
 uint8_t dpaa2_eqcr_size;
 
 /* Variable to hold the portal_key, once created.*/
-static pthread_key_t dpaa2_portal_key;
+static rte_thread_key dpaa2_portal_key;
 
 /*Stashing Macros default for LS208x*/
 static int dpaa2_core_cluster_base = 0x04;
@@ -92,10 +92,9 @@  dpaa2_get_core_id(void)
 	rte_cpuset_t cpuset;
 	int i, ret, cpu_id = -1;
 
-	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
-		&cpuset);
+	ret = rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset);
 	if (ret) {
-		DPAA2_BUS_ERR("pthread_getaffinity_np() failed");
+		DPAA2_BUS_ERR("rte_thread_get_affinity_by_id() failed");
 		return ret;
 	}
 
@@ -296,9 +295,9 @@  static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
 		}
 	}
 
-	ret = pthread_setspecific(dpaa2_portal_key, (void *)dpio_dev);
+	ret = rte_thread_value_set(dpaa2_portal_key, (void *)dpio_dev);
 	if (ret) {
-		DPAA2_BUS_ERR("pthread_setspecific failed with ret: %d", ret);
+		DPAA2_BUS_ERR("rte_thread_value_set failed with ret: %d", ret);
 		dpaa2_put_qbman_swp(dpio_dev);
 		return NULL;
 	}
@@ -357,7 +356,7 @@  static void dpaa2_portal_finish(void *arg)
 	dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).dpio_dev);
 	dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev);
 
-	pthread_setspecific(dpaa2_portal_key, NULL);
+	rte_thread_value_set(dpaa2_portal_key, NULL);
 }
 
 static int
@@ -515,10 +514,10 @@  dpaa2_create_dpio_device(int vdev_fd,
 		/* create the key, supplying a function that'll be invoked
 		 * when a portal affined thread will be deleted.
 		 */
-		ret = pthread_key_create(&dpaa2_portal_key,
+		ret = rte_thread_key_create(&dpaa2_portal_key,
 					 dpaa2_portal_finish);
 		if (ret) {
-			DPAA2_BUS_DEBUG("Unable to create pthread key (%d)",
+			DPAA2_BUS_DEBUG("Unable to create thread key (%d)",
 					ret);
 			goto err;
 		}
diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c
index 80c564f10b..b1f14ea606 100644
--- a/drivers/compress/mlx5/mlx5_compress.c
+++ b/drivers/compress/mlx5/mlx5_compress.c
@@ -72,7 +72,7 @@  struct mlx5_compress_qp {
 
 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
 				TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 int mlx5_compress_logtype;
 
@@ -804,9 +804,9 @@  mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv,
 	}
 	priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
 	priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
-	pthread_mutex_lock(&priv_list_lock);
+	rte_thread_mutex_lock(&priv_list_lock);
 	TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
-	pthread_mutex_unlock(&priv_list_lock);
+	rte_thread_mutex_unlock(&priv_list_lock);
 	return 0;
 }
 
@@ -826,13 +826,13 @@  mlx5_compress_pci_remove(struct rte_pci_device *pdev)
 {
 	struct mlx5_compress_priv *priv = NULL;
 
-	pthread_mutex_lock(&priv_list_lock);
+	rte_thread_mutex_lock(&priv_list_lock);
 	TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
 		if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
 			break;
 	if (priv)
 		TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
-	pthread_mutex_unlock(&priv_list_lock);
+	rte_thread_mutex_unlock(&priv_list_lock);
 	if (priv) {
 		mlx5_mr_release_cache(&priv->mr_scache);
 		mlx5_compress_hw_global_release(priv);
diff --git a/drivers/event/dlb2/pf/base/dlb2_osdep.h b/drivers/event/dlb2/pf/base/dlb2_osdep.h
index cffe22f3c5..bdd174821c 100644
--- a/drivers/event/dlb2/pf/base/dlb2_osdep.h
+++ b/drivers/event/dlb2/pf/base/dlb2_osdep.h
@@ -8,7 +8,7 @@ 
 #include <string.h>
 #include <time.h>
 #include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
 
 #include <rte_string_fns.h>
 #include <rte_cycles.h>
@@ -194,7 +194,7 @@  static void *dlb2_complete_queue_map_unmap(void *__args)
 static inline void os_schedule_work(struct dlb2_hw *hw)
 {
 	struct dlb2_dev *dlb2_dev;
-	pthread_t complete_queue_map_unmap_thread;
+	rte_thread_t complete_queue_map_unmap_thread;
 	int ret;
 
 	dlb2_dev = container_of(hw, struct dlb2_dev, hw);
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index eb5660a3dc..41fe063639 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -178,7 +178,7 @@  TAILQ_HEAD(internal_list_head, internal_list);
 static struct internal_list_head internal_list =
 	TAILQ_HEAD_INITIALIZER(internal_list);
 
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 static inline int
@@ -676,7 +676,7 @@  find_internal_resource(struct pmd_internals *port_int)
 	if (port_int == NULL)
 		return NULL;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		struct pmd_internals *list_int =
@@ -687,7 +687,7 @@  find_internal_resource(struct pmd_internals *port_int)
 		}
 	}
 
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	if (!found)
 		return NULL;
@@ -725,7 +725,7 @@  get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
 	if (mb_pool == NULL)
 		return ret;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		internals = list->eth_dev->data->dev_private;
@@ -751,7 +751,7 @@  get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
 	}
 
 out:
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	return ret;
 }
@@ -780,9 +780,9 @@  eth_dev_configure(struct rte_eth_dev *dev)
 			return -1;
 
 		list->eth_dev = dev;
-		pthread_mutex_lock(&internal_list_lock);
+		rte_thread_mutex_lock(&internal_list_lock);
 		TAILQ_INSERT_TAIL(&internal_list, list, next);
-		pthread_mutex_unlock(&internal_list_lock);
+		rte_thread_mutex_unlock(&internal_list_lock);
 	}
 
 	return 0;
@@ -948,9 +948,9 @@  eth_dev_close(struct rte_eth_dev *dev)
 		/* Remove ethdev from list used to track and share UMEMs */
 		list = find_internal_resource(internals);
 		if (list) {
-			pthread_mutex_lock(&internal_list_lock);
+			rte_thread_mutex_lock(&internal_list_lock);
 			TAILQ_REMOVE(&internal_list, list, next);
-			pthread_mutex_unlock(&internal_list_lock);
+			rte_thread_mutex_unlock(&internal_list_lock);
 			rte_free(list);
 		}
 	}
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 377299b14c..481442b31e 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -563,7 +563,7 @@  eth_ark_dev_start(struct rte_eth_dev *dev)
 		ark_pktchkr_run(ark->pc);
 
 	if (ark->start_pg && (dev->data->port_id == 0)) {
-		pthread_t thread;
+		rte_thread_t thread;
 
 		/* Delay packet generatpr start allow the hardware to be ready
 		 * This is only used for sanity checking with internal generator
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index 515bfe461c..58ff65b5d5 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -3,7 +3,7 @@ 
  */
 
 #include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
 
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
@@ -475,7 +475,7 @@  ark_pktgen_delay_start(void *arg)
 	 * perform a blind sleep here to ensure that the external test
 	 * application has time to setup the test before we generate packets
 	 */
-	pthread_detach(pthread_self());
+	rte_thread_detach(rte_thread_self());
 	usleep(100000);
 	ark_pktgen_run(inst);
 	return NULL;
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 0ce35eb519..2b86f01a25 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -405,7 +405,7 @@  eth_atl_dev_init(struct rte_eth_dev *eth_dev)
 
 	hw->aq_nic_cfg = &adapter->hw_cfg;
 
-	pthread_mutex_init(&hw->mbox_mutex, NULL);
+	rte_thread_mutex_init(&hw->mbox_mutex);
 
 	/* disable interrupt */
 	atl_disable_intr(hw);
@@ -712,7 +712,7 @@  atl_dev_close(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     atl_dev_interrupt_handler, dev);
 
-	pthread_mutex_destroy(&hw->mbox_mutex);
+	rte_thread_mutex_destroy(&hw->mbox_mutex);
 
 	return ret;
 }
diff --git a/drivers/net/atlantic/atl_types.h b/drivers/net/atlantic/atl_types.h
index e813d9f326..93e41aa26a 100644
--- a/drivers/net/atlantic/atl_types.h
+++ b/drivers/net/atlantic/atl_types.h
@@ -10,7 +10,8 @@ 
 #include <string.h>
 #include <stdbool.h>
 #include <netinet/in.h>
-#include <pthread.h>
+#include <rte_compat.h>
+#include <rte_thread.h>
 
 #include <rte_common.h>
 
@@ -141,7 +142,7 @@  struct aq_hw_s {
 	u32 rpc_tid;
 	struct hw_aq_atl_utils_fw_rpc rpc;
 
-	pthread_mutex_t mbox_mutex;
+	rte_thread_mutex_t mbox_mutex;
 };
 
 struct aq_fw_ops {
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 3a7faf405c..15b80584f8 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -6,7 +6,7 @@ 
  */
 
 #include <rte_ether.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include "../atl_hw_regs.h"
 
 #include "../atl_types.h"
@@ -218,7 +218,7 @@  int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
 	u32 mac_addr[2] = { 0 };
 	u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	if (efuse_addr != 0) {
 		err = hw_atl_utils_fw_downld_dwords(self,
@@ -257,7 +257,7 @@  int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
 	}
 
 exit:
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	return err;
 }
@@ -269,7 +269,7 @@  static int aq_fw2x_update_stats(struct aq_hw_s *self)
 	u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
 
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	/* Toggle statistics bit for FW to update */
 	mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -286,7 +286,7 @@  static int aq_fw2x_update_stats(struct aq_hw_s *self)
 	err = hw_atl_utils_update_stats(self);
 
 exit:
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	return err;
 
@@ -299,7 +299,7 @@  static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
 	u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE);
 	u32 temp_res;
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	/* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */
 	mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE);
@@ -317,7 +317,7 @@  static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
 				sizeof(temp_res) / sizeof(u32));
 
 
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	if (err)
 		return err;
@@ -536,7 +536,7 @@  static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
 	if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0)
 		return -EOPNOTSUPP;
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	request.msg_id = 0;
 	request.device_id = dev_addr;
@@ -605,7 +605,7 @@  static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
 	}
 
 exit:
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	return err;
 }
@@ -626,7 +626,7 @@  static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
 	request.address = offset;
 	request.length = len;
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	/* Write SMBUS request to cfg memory */
 	err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -694,7 +694,7 @@  static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
 	}
 
 exit:
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	return err;
 }
@@ -712,7 +712,7 @@  static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
 	if ((self->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)
 		return -EOPNOTSUPP;
 
-	pthread_mutex_lock(&self->mbox_mutex);
+	rte_thread_mutex_lock(&self->mbox_mutex);
 
 	/* Write macsec request to cfg memory */
 	err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -742,7 +742,7 @@  static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
 		RTE_ALIGN(sizeof(*response) / sizeof(u32), sizeof(u32)));
 
 exit:
-	pthread_mutex_unlock(&self->mbox_mutex);
+	rte_thread_mutex_unlock(&self->mbox_mutex);
 
 	return err;
 }
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index df0aa21a9b..a7b892b806 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -19,7 +19,7 @@ 
 #include <stdarg.h>
 #include <unistd.h>
 #include <inttypes.h>
-#include <pthread.h>
+#include <rte_thread.h>
 
 #include <rte_bitops.h>
 #include <rte_byteorder.h>
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 786288a7b0..197c722901 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -167,12 +167,12 @@  static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
 	index = mmd_address & ~pdata->xpcs_window_mask;
 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
-	pthread_mutex_lock(&pdata->xpcs_mutex);
+	rte_thread_mutex_lock(&pdata->xpcs_mutex);
 
 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
 	mmd_data = XPCS16_IOREAD(pdata, offset);
 
-	pthread_mutex_unlock(&pdata->xpcs_mutex);
+	rte_thread_mutex_unlock(&pdata->xpcs_mutex);
 
 	return mmd_data;
 }
@@ -201,12 +201,12 @@  static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
 	index = mmd_address & ~pdata->xpcs_window_mask;
 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
-	pthread_mutex_lock(&pdata->xpcs_mutex);
+	rte_thread_mutex_lock(&pdata->xpcs_mutex);
 
 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
 	XPCS16_IOWRITE(pdata, offset, mmd_data);
 
-	pthread_mutex_unlock(&pdata->xpcs_mutex);
+	rte_thread_mutex_unlock(&pdata->xpcs_mutex);
 }
 
 static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 9cb4818af1..caf5a4476f 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -2311,10 +2311,10 @@  eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
 
 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
-	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
-	pthread_mutex_init(&pdata->i2c_mutex, NULL);
-	pthread_mutex_init(&pdata->an_mutex, NULL);
-	pthread_mutex_init(&pdata->phy_mutex, NULL);
+	rte_thread_mutex_init(&pdata->xpcs_mutex);
+	rte_thread_mutex_init(&pdata->i2c_mutex);
+	rte_thread_mutex_init(&pdata->an_mutex);
+	rte_thread_mutex_init(&pdata->phy_mutex);
 
 	ret = pdata->phy_if.phy_init(pdata);
 	if (ret) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index a6226729fe..cfd436fa63 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -602,10 +602,10 @@  struct axgbe_port {
 	int phy_link;
 	int phy_speed;
 
-	pthread_mutex_t xpcs_mutex;
-	pthread_mutex_t i2c_mutex;
-	pthread_mutex_t an_mutex;
-	pthread_mutex_t phy_mutex;
+	rte_thread_mutex_t xpcs_mutex;
+	rte_thread_mutex_t i2c_mutex;
+	rte_thread_mutex_t an_mutex;
+	rte_thread_mutex_t phy_mutex;
 
 	/* Flow control settings */
 	unsigned int pause_autoneg;
diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
index ab3738a12e..c17f9a8b9e 100644
--- a/drivers/net/axgbe/axgbe_i2c.c
+++ b/drivers/net/axgbe/axgbe_i2c.c
@@ -229,7 +229,7 @@  static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
 	int ret;
 	uint64_t timeout;
 
-	pthread_mutex_lock(&pdata->i2c_mutex);
+	rte_thread_mutex_lock(&pdata->i2c_mutex);
 	ret = axgbe_i2c_disable(pdata);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
@@ -282,7 +282,7 @@  static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
 	}
 
 unlock:
-	pthread_mutex_unlock(&pdata->i2c_mutex);
+	rte_thread_mutex_unlock(&pdata->i2c_mutex);
 	return ret;
 }
 
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
index 4f98e695ae..5f3d9b360e 100644
--- a/drivers/net/axgbe/axgbe_mdio.c
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -686,9 +686,9 @@  static void axgbe_an73_isr(struct axgbe_port *pdata)
 	if (pdata->an_int) {
 		/* Clear the interrupt(s) that fired and process them */
 		XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
-		pthread_mutex_lock(&pdata->an_mutex);
+		rte_thread_mutex_lock(&pdata->an_mutex);
 		axgbe_an73_state_machine(pdata);
-		pthread_mutex_unlock(&pdata->an_mutex);
+		rte_thread_mutex_unlock(&pdata->an_mutex);
 	} else {
 		/* Enable AN interrupts */
 		axgbe_an73_enable_interrupts(pdata);
@@ -977,7 +977,7 @@  static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
 {
 	int ret;
 
-	pthread_mutex_lock(&pdata->an_mutex);
+	rte_thread_mutex_lock(&pdata->an_mutex);
 
 	ret = __axgbe_phy_config_aneg(pdata);
 	if (ret)
@@ -985,7 +985,7 @@  static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
 	else
 		rte_bit_relaxed_clear32(AXGBE_LINK_ERR, &pdata->dev_state);
 
-	pthread_mutex_unlock(&pdata->an_mutex);
+	rte_thread_mutex_unlock(&pdata->an_mutex);
 
 	return ret;
 }
diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
index 02236ec192..1e1d6358d8 100644
--- a/drivers/net/axgbe/axgbe_phy_impl.c
+++ b/drivers/net/axgbe/axgbe_phy_impl.c
@@ -403,7 +403,7 @@  static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
 
 	phy_data->comm_owned = 0;
 
-	pthread_mutex_unlock(&pdata->phy_mutex);
+	rte_thread_mutex_unlock(&pdata->phy_mutex);
 }
 
 static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
@@ -416,7 +416,7 @@  static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
 	 * the driver needs to take the software mutex and then the hardware
 	 * mutexes before being able to use the busses.
 	 */
-	pthread_mutex_lock(&pdata->phy_mutex);
+	rte_thread_mutex_lock(&pdata->phy_mutex);
 
 	if (phy_data->comm_owned)
 		return 0;
@@ -447,7 +447,7 @@  static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
 		return 0;
 	}
 
-	pthread_mutex_unlock(&pdata->phy_mutex);
+	rte_thread_mutex_unlock(&pdata->phy_mutex);
 
 	PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
 
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index db67bff127..64678ee145 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -534,8 +534,8 @@  struct bnxt_mark_info {
 
 struct bnxt_rep_info {
 	struct rte_eth_dev	*vfr_eth_dev;
-	pthread_mutex_t		vfr_lock;
-	pthread_mutex_t		vfr_start_lock;
+	rte_thread_mutex_t		vfr_lock;
+	rte_thread_mutex_t		vfr_start_lock;
 	bool			conduit_valid;
 };
 
@@ -687,7 +687,7 @@  struct bnxt {
 #define BNXT_FW_CAP_ADV_FLOW_COUNTERS	BIT(6)
 #define BNXT_FW_CAP_LINK_ADMIN		BIT(7)
 
-	pthread_mutex_t         flow_lock;
+	rte_thread_mutex_t         flow_lock;
 
 	uint32_t		vnic_cap_flags;
 #define BNXT_VNIC_CAP_COS_CLASSIFY	BIT(0)
@@ -741,18 +741,18 @@  struct bnxt {
 	rte_iova_t			hwrm_short_cmd_req_dma_addr;
 	rte_spinlock_t			hwrm_lock;
 	/* synchronize between dev_configure_op and int handler */
-	pthread_mutex_t			def_cp_lock;
+	rte_thread_mutex_t			def_cp_lock;
 	/* synchronize between dev_start_op and async evt handler
 	 * Locking sequence in async evt handler will be
 	 * def_cp_lock
 	 * health_check_lock
 	 */
-	pthread_mutex_t			health_check_lock;
+	rte_thread_mutex_t			health_check_lock;
 	/* synchronize between dev_stop/dev_close_op and
 	 * error recovery thread triggered as part of
 	 * HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
 	 */
-	pthread_mutex_t			err_recovery_lock;
+	rte_thread_mutex_t			err_recovery_lock;
 	uint16_t			max_req_len;
 	uint16_t			max_resp_len;
 	uint16_t                        hwrm_max_ext_req_len;
@@ -944,10 +944,10 @@  uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 extern const struct rte_flow_ops bnxt_flow_ops;
 
 #define bnxt_acquire_flow_lock(bp) \
-	pthread_mutex_lock(&(bp)->flow_lock)
+	rte_thread_mutex_lock(&(bp)->flow_lock)
 
 #define bnxt_release_flow_lock(bp) \
-	pthread_mutex_unlock(&(bp)->flow_lock)
+	rte_thread_mutex_unlock(&(bp)->flow_lock)
 
 #define BNXT_VALID_VNIC_OR_RET(bp, vnic_id) do { \
 	if ((vnic_id) >= (bp)->max_vnics) { \
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 2c7fd78c3d..a50511047c 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -135,7 +135,7 @@  void bnxt_handle_async_event(struct bnxt *bp,
 			return;
 		}
 
-		pthread_mutex_lock(&bp->err_recovery_lock);
+		rte_thread_mutex_lock(&bp->err_recovery_lock);
 		event_data = rte_le_to_cpu_32(async_cmp->event_data1);
 		/* timestamp_lo/hi values are in units of 100ms */
 		bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
@@ -157,7 +157,7 @@  void bnxt_handle_async_event(struct bnxt *bp,
 		}
 
 		bp->flags |= BNXT_FLAG_FW_RESET;
-		pthread_mutex_unlock(&bp->err_recovery_lock);
+		rte_thread_mutex_unlock(&bp->err_recovery_lock);
 		rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
 				  (void *)bp);
 		break;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 3778e28cca..d00c8011ba 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1049,7 +1049,7 @@  static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 		 * are calculated correctly.
 		 */
 
-		pthread_mutex_lock(&bp->def_cp_lock);
+		rte_thread_mutex_lock(&bp->def_cp_lock);
 
 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
 			bnxt_disable_int(bp);
@@ -1059,20 +1059,20 @@  static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
 		if (rc) {
 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
-			pthread_mutex_unlock(&bp->def_cp_lock);
+			rte_thread_mutex_unlock(&bp->def_cp_lock);
 			return -ENOSPC;
 		}
 
 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
 			rc = bnxt_alloc_async_cp_ring(bp);
 			if (rc) {
-				pthread_mutex_unlock(&bp->def_cp_lock);
+				rte_thread_mutex_unlock(&bp->def_cp_lock);
 				return rc;
 			}
 			bnxt_enable_int(bp);
 		}
 
-		pthread_mutex_unlock(&bp->def_cp_lock);
+		rte_thread_mutex_unlock(&bp->def_cp_lock);
 	}
 
 	/* Inherit new configurations */
@@ -1456,14 +1456,14 @@  static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 {
 	struct bnxt *bp = eth_dev->data->dev_private;
 
-	pthread_mutex_lock(&bp->err_recovery_lock);
+	rte_thread_mutex_lock(&bp->err_recovery_lock);
 	if (bp->flags & BNXT_FLAG_FW_RESET) {
 		PMD_DRV_LOG(ERR,
 			    "Adapter recovering from error..Please retry\n");
-		pthread_mutex_unlock(&bp->err_recovery_lock);
+		rte_thread_mutex_unlock(&bp->err_recovery_lock);
 		return -EAGAIN;
 	}
-	pthread_mutex_unlock(&bp->err_recovery_lock);
+	rte_thread_mutex_unlock(&bp->err_recovery_lock);
 
 	return bnxt_dev_stop(eth_dev);
 }
@@ -1545,13 +1545,13 @@  static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 static void
 bnxt_uninit_locks(struct bnxt *bp)
 {
-	pthread_mutex_destroy(&bp->flow_lock);
-	pthread_mutex_destroy(&bp->def_cp_lock);
-	pthread_mutex_destroy(&bp->health_check_lock);
-	pthread_mutex_destroy(&bp->err_recovery_lock);
+	rte_thread_mutex_destroy(&bp->flow_lock);
+	rte_thread_mutex_destroy(&bp->def_cp_lock);
+	rte_thread_mutex_destroy(&bp->health_check_lock);
+	rte_thread_mutex_destroy(&bp->err_recovery_lock);
 	if (bp->rep_info) {
-		pthread_mutex_destroy(&bp->rep_info->vfr_lock);
-		pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
+		rte_thread_mutex_destroy(&bp->rep_info->vfr_lock);
+		rte_thread_mutex_destroy(&bp->rep_info->vfr_start_lock);
 	}
 }
 
@@ -1583,14 +1583,14 @@  static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
-	pthread_mutex_lock(&bp->err_recovery_lock);
+	rte_thread_mutex_lock(&bp->err_recovery_lock);
 	if (bp->flags & BNXT_FLAG_FW_RESET) {
 		PMD_DRV_LOG(ERR,
 			    "Adapter recovering from error...Please retry\n");
-		pthread_mutex_unlock(&bp->err_recovery_lock);
+		rte_thread_mutex_unlock(&bp->err_recovery_lock);
 		return -EAGAIN;
 	}
-	pthread_mutex_unlock(&bp->err_recovery_lock);
+	rte_thread_mutex_unlock(&bp->err_recovery_lock);
 
 	/* cancel the recovery handler before remove dev */
 	rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
@@ -4108,7 +4108,7 @@  static void bnxt_dev_recover(void *arg)
 		goto err_start;
 
 	PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
-	pthread_mutex_unlock(&bp->err_recovery_lock);
+	rte_thread_mutex_unlock(&bp->err_recovery_lock);
 
 	return;
 err_start:
@@ -4116,7 +4116,7 @@  static void bnxt_dev_recover(void *arg)
 err:
 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
 	bnxt_uninit_resources(bp, false);
-	pthread_mutex_unlock(&bp->err_recovery_lock);
+	rte_thread_mutex_unlock(&bp->err_recovery_lock);
 	PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
 }
 
@@ -4292,7 +4292,7 @@  void bnxt_schedule_fw_health_check(struct bnxt *bp)
 {
 	uint32_t polling_freq;
 
-	pthread_mutex_lock(&bp->health_check_lock);
+	rte_thread_mutex_lock(&bp->health_check_lock);
 
 	if (!bnxt_is_recovery_enabled(bp))
 		goto done;
@@ -4307,7 +4307,7 @@  void bnxt_schedule_fw_health_check(struct bnxt *bp)
 	bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
 
 done:
-	pthread_mutex_unlock(&bp->health_check_lock);
+	rte_thread_mutex_unlock(&bp->health_check_lock);
 }
 
 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
@@ -5093,25 +5093,25 @@  bnxt_init_locks(struct bnxt *bp)
 {
 	int err;
 
-	err = pthread_mutex_init(&bp->flow_lock, NULL);
+	err = rte_thread_mutex_init(&bp->flow_lock);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
 		return err;
 	}
 
-	err = pthread_mutex_init(&bp->def_cp_lock, NULL);
+	err = rte_thread_mutex_init(&bp->def_cp_lock);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
 		return err;
 	}
 
-	err = pthread_mutex_init(&bp->health_check_lock, NULL);
+	err = rte_thread_mutex_init(&bp->health_check_lock);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
 		return err;
 	}
 
-	err = pthread_mutex_init(&bp->err_recovery_lock, NULL);
+	err = rte_thread_mutex_init(&bp->err_recovery_lock);
 	if (err)
 		PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n");
 
@@ -5894,14 +5894,14 @@  static int bnxt_init_rep_info(struct bnxt *bp)
 	for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
 		bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
 
-	rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
+	rc = rte_thread_mutex_init(&bp->rep_info->vfr_lock);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
 		bnxt_free_rep_info(bp);
 		return rc;
 	}
 
-	rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
+	rc = rte_thread_mutex_init(&bp->rep_info->vfr_start_lock);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
 		bnxt_free_rep_info(bp);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 8abbadb3d1..e1d980cf91 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -30,15 +30,15 @@  void bnxt_int_handler(void *param)
 		return;
 
 	raw_cons = cpr->cp_raw_cons;
-	pthread_mutex_lock(&bp->def_cp_lock);
+	rte_thread_mutex_lock(&bp->def_cp_lock);
 	while (1) {
 		if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
-			pthread_mutex_unlock(&bp->def_cp_lock);
+			rte_thread_mutex_unlock(&bp->def_cp_lock);
 			return;
 		}
 
 		if (is_bnxt_in_error(bp)) {
-			pthread_mutex_unlock(&bp->def_cp_lock);
+			rte_thread_mutex_unlock(&bp->def_cp_lock);
 			return;
 		}
 
@@ -58,7 +58,7 @@  void bnxt_int_handler(void *param)
 	else
 		B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
 
-	pthread_mutex_unlock(&bp->def_cp_lock);
+	rte_thread_mutex_unlock(&bp->def_cp_lock);
 }
 
 int bnxt_free_int(struct bnxt *bp)
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index bdbad53b7d..49d3ec66bc 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -120,7 +120,7 @@  bnxt_rep_tx_burst(void *tx_queue,
 	qid = vfr_txq->txq->queue_id;
 	vf_rep_bp = vfr_txq->bp;
 	parent = vf_rep_bp->parent_dev->data->dev_private;
-	pthread_mutex_lock(&parent->rep_info->vfr_lock);
+	rte_thread_mutex_lock(&parent->rep_info->vfr_lock);
 	ptxq = parent->tx_queues[qid];
 
 	ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
@@ -132,7 +132,7 @@  bnxt_rep_tx_burst(void *tx_queue,
 
 	rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
 	ptxq->vfr_tx_cfa_action = 0;
-	pthread_mutex_unlock(&parent->rep_info->vfr_lock);
+	rte_thread_mutex_unlock(&parent->rep_info->vfr_lock);
 
 	return rc;
 }
@@ -407,15 +407,15 @@  int bnxt_rep_dev_start_op(struct rte_eth_dev *eth_dev)
 	rep_info = &parent_bp->rep_info[rep_bp->vf_id];
 
 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id);
-	pthread_mutex_lock(&rep_info->vfr_start_lock);
+	rte_thread_mutex_lock(&rep_info->vfr_start_lock);
 	if (!rep_info->conduit_valid) {
 		rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
 		if (rc || !rep_info->conduit_valid) {
-			pthread_mutex_unlock(&rep_info->vfr_start_lock);
+			rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
 			return rc;
 		}
 	}
-	pthread_mutex_unlock(&rep_info->vfr_start_lock);
+	rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
 
 	rc = bnxt_vfr_alloc(eth_dev);
 	if (rc) {
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 5c805eef97..455d83bef8 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -28,7 +28,7 @@  STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
 			STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
 
 /* Mutex to synchronize bnxt_ulp_session_list operations. */
-static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t bnxt_ulp_global_mutex = RTE_THREAD_MUTEX_INITIALIZER;
 
 /*
  * Allow the deletion of context only for the bnxt device that
@@ -640,7 +640,7 @@  ulp_ctx_detach(struct bnxt *bp)
 static void
 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
 {
-	pthread_mutex_lock(&session->bnxt_ulp_mutex);
+	rte_thread_mutex_lock(&session->bnxt_ulp_mutex);
 
 	if (!session->bnxt_ulp_init) {
 		session->bnxt_ulp_init = true;
@@ -649,7 +649,7 @@  ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
 		*init = true;
 	}
 
-	pthread_mutex_unlock(&session->bnxt_ulp_mutex);
+	rte_thread_mutex_unlock(&session->bnxt_ulp_mutex);
 }
 
 /*
@@ -690,7 +690,7 @@  ulp_session_init(struct bnxt *bp,
 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
 	pci_addr = &pci_dev->addr;
 
-	pthread_mutex_lock(&bnxt_ulp_global_mutex);
+	rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
 
 	session = ulp_get_session(pci_addr);
 	if (!session) {
@@ -701,17 +701,17 @@  ulp_session_init(struct bnxt *bp,
 		if (!session) {
 			BNXT_TF_DBG(ERR,
 				    "Allocation failed for bnxt_ulp_session\n");
-			pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+			rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
 			return NULL;
 
 		} else {
 			/* Add it to the queue */
 			session->pci_info.domain = pci_addr->domain;
 			session->pci_info.bus = pci_addr->bus;
-			rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
+			rc = rte_thread_mutex_init(&session->bnxt_ulp_mutex);
 			if (rc) {
 				BNXT_TF_DBG(ERR, "mutex create failed\n");
-				pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+				rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
 				return NULL;
 			}
 			STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
@@ -719,7 +719,7 @@  ulp_session_init(struct bnxt *bp,
 		}
 	}
 	ulp_context_initialized(session, init);
-	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+	rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
 	return session;
 }
 
@@ -734,12 +734,12 @@  ulp_session_deinit(struct bnxt_ulp_session_state *session)
 		return;
 
 	if (!session->cfg_data) {
-		pthread_mutex_lock(&bnxt_ulp_global_mutex);
+		rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
 		STAILQ_REMOVE(&bnxt_ulp_session_list, session,
 			      bnxt_ulp_session_state, next);
-		pthread_mutex_destroy(&session->bnxt_ulp_mutex);
+		rte_thread_mutex_destroy(&session->bnxt_ulp_mutex);
 		rte_free(session);
-		pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+		rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
 	}
 }
 
@@ -892,7 +892,7 @@  bnxt_ulp_deinit(struct bnxt *bp,
 					 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
 
 	/* free the flow db lock */
-	pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
+	rte_thread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
 
 	/* Delete the ulp context and tf session and free the ulp context */
 	ulp_ctx_deinit(bp, session);
@@ -917,7 +917,7 @@  bnxt_ulp_init(struct bnxt *bp,
 		goto jump_to_error;
 	}
 
-	rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
+	rc = rte_thread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
 		goto jump_to_error;
@@ -1117,9 +1117,9 @@  bnxt_ulp_port_deinit(struct bnxt *bp)
 	/* Get the session details  */
 	pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
 	pci_addr = &pci_dev->addr;
-	pthread_mutex_lock(&bnxt_ulp_global_mutex);
+	rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
 	session = ulp_get_session(pci_addr);
-	pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+	rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
 
 	/* session not found then just exit */
 	if (!session) {
@@ -1451,7 +1451,7 @@  bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
 	if (!ulp_ctx || !ulp_ctx->cfg_data)
 		return -1;
 
-	if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
+	if (rte_thread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
 		BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
 		return -1;
 	}
@@ -1465,5 +1465,5 @@  bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx)
 	if (!ulp_ctx || !ulp_ctx->cfg_data)
 		return;
 
-	pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
+	rte_thread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
 }
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 330965061a..88d64d703f 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -49,7 +49,7 @@  struct bnxt_ulp_data {
 	uint32_t			dev_id; /* Hardware device id */
 	uint32_t			ref_cnt;
 	struct bnxt_ulp_flow_db		*flow_db;
-	pthread_mutex_t			flow_db_lock;
+	rte_thread_mutex_t			flow_db_lock;
 	void				*mapper_data;
 	struct bnxt_ulp_port_db		*port_db;
 	struct bnxt_ulp_fc_info		*fc_info;
@@ -75,7 +75,7 @@  struct bnxt_ulp_pci_info {
 struct bnxt_ulp_session_state {
 	STAILQ_ENTRY(bnxt_ulp_session_state)	next;
 	bool					bnxt_ulp_init;
-	pthread_mutex_t				bnxt_ulp_mutex;
+	rte_thread_mutex_t				bnxt_ulp_mutex;
 	struct bnxt_ulp_pci_info		pci_info;
 	struct bnxt_ulp_data			*cfg_data;
 	struct tf				*g_tfp;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index 3eddbd6831..26c3e7551c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -84,7 +84,7 @@  ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
 	if (!ulp_fc_info)
 		goto error;
 
-	rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
+	rc = rte_thread_mutex_init(&ulp_fc_info->fc_lock);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
 		goto error;
@@ -141,7 +141,7 @@  ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
 
 	ulp_fc_mgr_thread_cancel(ctxt);
 
-	pthread_mutex_destroy(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_destroy(&ulp_fc_info->fc_lock);
 
 	for (i = 0; i < TF_DIR_MAX; i++)
 		rte_free(ulp_fc_info->sw_acc_tbl[i]);
@@ -383,7 +383,7 @@  ulp_fc_mgr_alarm_cb(void *arg)
 		goto out;
 
 	if (!ulp_fc_info->num_entries) {
-		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+		rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 		ulp_fc_mgr_thread_cancel(ctxt);
 		return;
 	}
@@ -414,7 +414,7 @@  ulp_fc_mgr_alarm_cb(void *arg)
 		}
 	}
 
-	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 
 	/*
 	 * If cmd fails once, no need of
@@ -503,12 +503,12 @@  int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
 	if (!ulp_fc_info)
 		return -EIO;
 
-	pthread_mutex_lock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
 	ulp_fc_info->num_entries++;
-	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 
 	return 0;
 }
@@ -535,14 +535,14 @@  int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
 	if (!ulp_fc_info)
 		return -EIO;
 
-	pthread_mutex_lock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
 	ulp_fc_info->num_entries--;
-	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 
 	return 0;
 }
@@ -607,7 +607,7 @@  int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
 	hw_cntr_id = params.resource_hndl;
 	if (params.resource_sub_type ==
 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
-		pthread_mutex_lock(&ulp_fc_info->fc_lock);
+		rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
 		sw_cntr_idx = hw_cntr_id -
 			ulp_fc_info->shadow_hw_tbl[dir].start_idx;
 		sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
@@ -621,7 +621,7 @@  int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
 			sw_acc_tbl_entry->pkt_count = 0;
 			sw_acc_tbl_entry->byte_count = 0;
 		}
-		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+		rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 	} else if (params.resource_sub_type ==
 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC) {
 		/* Get stats from the parent child table */
@@ -663,7 +663,7 @@  int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
 	if (!ulp_fc_info)
 		return -EIO;
 
-	pthread_mutex_lock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
 	if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
 		ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
@@ -672,7 +672,7 @@  int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
 			    hw_cntr_id, fid);
 		rc = -ENOENT;
 	}
-	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+	rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
 
 	return rc;
 }
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
index 04cb86bea2..cb6bbcad6c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h
@@ -47,7 +47,7 @@  struct bnxt_ulp_fc_info {
 	struct hw_fc_mem_info	shadow_hw_tbl[TF_DIR_MAX];
 	uint32_t		flags;
 	uint32_t		num_entries;
-	pthread_mutex_t		fc_lock;
+	rte_thread_mutex_t		fc_lock;
 };
 
 int32_t
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index f66df95591..923cef1b12 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -148,14 +148,14 @@  extern int ena_logtype_com;
 
 typedef struct {
 	pthread_cond_t cond;
-	pthread_mutex_t mutex;
+	rte_thread_mutex_t mutex;
 	uint8_t flag;
 } ena_wait_event_t;
 
 #define ENA_WAIT_EVENT_INIT(waitevent)					       \
 	do {								       \
 		ena_wait_event_t *_we = &(waitevent);			       \
-		pthread_mutex_init(&_we->mutex, NULL);			       \
+		rte_thread_mutex_init(&_we->mutex);				       \
 		pthread_cond_init(&_we->cond, NULL);			       \
 		_we->flag = 0;						       \
 	} while (0)
@@ -172,7 +172,7 @@  typedef struct {
 		wait.tv_sec = now.tv_sec + _tmo / 1000000UL;		       \
 		timeout_us = _tmo % 1000000UL;				       \
 		wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL;	       \
-		pthread_mutex_lock(&_we->mutex);			       \
+		rte_thread_mutex_lock(&_we->mutex);			       \
 		while (ret == 0 && !_we->flag) {			       \
 			ret = pthread_cond_timedwait(&_we->cond,	       \
 				&_we->mutex, &wait);			       \
@@ -185,15 +185,15 @@  typedef struct {
 			ena_trc_err(NULL,				       \
 				"Timeout waiting for " #waitevent "\n");       \
 		_we->flag = 0;						       \
-		pthread_mutex_unlock(&_we->mutex);			       \
+		rte_thread_mutex_unlock(&_we->mutex);			       \
 	} while (0)
 #define ENA_WAIT_EVENT_SIGNAL(waitevent)				       \
 	do {								       \
 		ena_wait_event_t *_we = &(waitevent);			       \
-		pthread_mutex_lock(&_we->mutex);			       \
+		rte_thread_mutex_lock(&_we->mutex);			       \
 		_we->flag = 1;						       \
 		pthread_cond_signal(&_we->cond);			       \
-		pthread_mutex_unlock(&_we->mutex);			       \
+		rte_thread_mutex_unlock(&_we->mutex);			       \
 	} while (0)
 /* pthread condition doesn't need to be rearmed after usage */
 #define ENA_WAIT_EVENT_CLEAR(...)
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 47bfdac2cf..db07c589b9 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -107,7 +107,7 @@  struct enic {
 	int iommu_groupid;
 	int eventfd;
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
-	pthread_t err_intr_thread;
+	rte_thread_t err_intr_thread;
 	int promisc;
 	int allmulti;
 	uint8_t ig_vlan_strip_en;
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 1d7aa8bc87..99eb940b87 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -3,7 +3,7 @@ 
  */
 #include <sys/types.h>
 #include <sys/stat.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <unistd.h>
 
 #include <rte_spinlock.h>
@@ -121,7 +121,7 @@  ice_dcf_vsi_update_service_handler(void *param)
 	struct ice_dcf_hw *hw = reset_param->dcf_hw;
 	struct ice_dcf_adapter *adapter;
 
-	pthread_detach(pthread_self());
+	rte_thread_detach(rte_thread_self());
 
 	rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
 
@@ -156,7 +156,7 @@  start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
 #define THREAD_NAME_LEN	16
 	struct ice_dcf_reset_event_param *param;
 	char name[THREAD_NAME_LEN];
-	pthread_t thread;
+	rte_thread_t thread;
 	int ret;
 
 	param = malloc(sizeof(*param));
diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c
index 589d9fa587..97e36a2e0e 100644
--- a/drivers/net/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/ipn3ke/ipn3ke_representor.c
@@ -27,7 +27,7 @@ 
 #include "ipn3ke_ethdev.h"
 
 static int ipn3ke_rpst_scan_num;
-static pthread_t ipn3ke_rpst_scan_thread;
+static rte_thread_t ipn3ke_rpst_scan_thread;
 
 /** Double linked list of representor port. */
 TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
@@ -2614,11 +2614,11 @@  ipn3ke_rpst_scan_check(void)
 			return -1;
 		}
 	} else if (ipn3ke_rpst_scan_num == 0) {
-		ret = pthread_cancel(ipn3ke_rpst_scan_thread);
+		ret = rte_thread_cancel(ipn3ke_rpst_scan_thread);
 		if (ret)
 			IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
 
-		ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
+		ret = rte_thread_join(ipn3ke_rpst_scan_thread, NULL);
 		if (ret)
 			IPN3KE_AFU_PMD_ERR("Can't join the thread");
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b5371568b5..33c82474ad 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -4156,7 +4156,7 @@  ixgbe_dev_setup_link_thread_handler(void *param)
 	u32 speed;
 	bool autoneg = false;
 
-	pthread_detach(pthread_self());
+	rte_thread_detach(rte_thread_self());
 	speed = hw->phy.autoneg_advertised;
 	if (!speed)
 		ixgbe_get_link_capabilities(hw, &speed, &autoneg);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a0ce18ca24..272fdcfb8d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -516,7 +516,7 @@  struct ixgbe_adapter {
 	uint8_t pflink_fullchk;
 	uint8_t mac_ctrl_frame_fwd;
 	rte_atomic32_t link_thread_running;
-	pthread_t link_thread_tid;
+	rte_thread_t link_thread_tid;
 };
 
 struct ixgbe_vf_representor {
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 871d11c413..5026305362 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -3,7 +3,7 @@ 
  */
 
 #include <fcntl.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <unistd.h>
 
 #include <rte_string_fns.h>
@@ -50,7 +50,7 @@  struct pmd_internals {
 	uint16_t port_id;
 	int is_kni_started;
 
-	pthread_t thread;
+	rte_thread_t thread;
 	int stop_thread;
 	int no_request_thread;
 
@@ -186,11 +186,11 @@  eth_kni_dev_stop(struct rte_eth_dev *dev)
 	if (internals->no_request_thread == 0 && internals->stop_thread == 0) {
 		internals->stop_thread = 1;
 
-		ret = pthread_cancel(internals->thread);
+		ret = rte_thread_cancel(internals->thread);
 		if (ret)
 			PMD_LOG(ERR, "Can't cancel the thread");
 
-		ret = pthread_join(internals->thread, NULL);
+		ret = rte_thread_join(internals->thread, NULL);
 		if (ret)
 			PMD_LOG(ERR, "Can't join the thread");
 	}
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 534a56a555..15080bbc1c 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -2557,7 +2557,7 @@  mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
 	int err = 0;
 
 	sh->numa_node = spawn->pci_dev->device.numa_node;
-	pthread_mutex_init(&sh->txpp.mutex, NULL);
+	rte_thread_mutex_init(&sh->txpp.mutex);
 	/*
 	 * Configure environment variable "MLX5_BF_SHUT_UP"
 	 * before the device creation. The rdma_core library
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cf1815cb74..5858659183 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -183,7 +183,7 @@  int mlx5_logtype;
 
 static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 						LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_dev_ctx_list_mutex;
+static rte_thread_mutex_t mlx5_dev_ctx_list_mutex;
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	[MLX5_IPOOL_DECAP_ENCAP] = {
@@ -1088,7 +1088,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	MLX5_ASSERT(spawn);
 	/* Secondary process should not create the shared context. */
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
-	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 	/* Search for IB context by device name. */
 	LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
 		if (!strcmp(sh->ibdev_name,
@@ -1215,11 +1215,11 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
 exit:
-	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	return sh;
 error:
-	pthread_mutex_destroy(&sh->txpp.mutex);
-	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_destroy(&sh->txpp.mutex);
+	rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	MLX5_ASSERT(sh);
 	if (sh->cnt_id_tbl)
 		mlx5_l3t_destroy(sh->cnt_id_tbl);
@@ -1251,7 +1251,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
-	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	/* Check the object presence in the list. */
 	struct mlx5_dev_ctx_shared *lctx;
@@ -1282,7 +1282,7 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	/* Release flow workspaces objects on the last device. */
 	if (LIST_EMPTY(&mlx5_dev_ctx_list))
 		mlx5_flow_os_release_workspace();
-	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
@@ -1315,11 +1315,11 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	if (sh->ctx)
 		claim_zero(mlx5_glue->close_device(sh->ctx));
 	MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
-	pthread_mutex_destroy(&sh->txpp.mutex);
+	rte_thread_mutex_destroy(&sh->txpp.mutex);
 	mlx5_free(sh);
 	return;
 exit:
-	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 }
 
 /**
@@ -2450,7 +2450,7 @@  RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
  */
 RTE_INIT(rte_mlx5_pmd_init)
 {
-	pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
+	rte_thread_mutex_init(&mlx5_dev_ctx_list_mutex);
 	mlx5_common_init();
 	/* Build the static tables for Verbs conversion. */
 	mlx5_set_ptype_table();
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 32b2817bf2..74df9f4355 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -949,7 +949,7 @@  struct mlx5_txpp_ts {
 
 /* Tx packet pacing structure. */
 struct mlx5_dev_txpp {
-	pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
+	rte_thread_mutex_t mutex; /* Pacing create/destroy mutex. */
 	uint32_t refcnt; /* Pacing reference counter. */
 	uint32_t freq; /* Timestamp frequency, Hz. */
 	uint32_t tick; /* Completion tick duration in nanoseconds. */
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index d90399afb5..e308c5e599 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -905,7 +905,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 		if (ret < 0)
 			return 0;
 	}
-	ret = pthread_mutex_lock(&sh->txpp.mutex);
+	ret = rte_thread_mutex_lock(&sh->txpp.mutex);
 	MLX5_ASSERT(!ret);
 	RTE_SET_USED(ret);
 	if (sh->txpp.refcnt) {
@@ -921,7 +921,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 			rte_errno = -err;
 		}
 	}
-	ret = pthread_mutex_unlock(&sh->txpp.mutex);
+	ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
 	MLX5_ASSERT(!ret);
 	RTE_SET_USED(ret);
 	return err;
@@ -948,7 +948,7 @@  mlx5_txpp_stop(struct rte_eth_dev *dev)
 		return;
 	}
 	priv->txpp_en = 0;
-	ret = pthread_mutex_lock(&sh->txpp.mutex);
+	ret = rte_thread_mutex_lock(&sh->txpp.mutex);
 	MLX5_ASSERT(!ret);
 	RTE_SET_USED(ret);
 	MLX5_ASSERT(sh->txpp.refcnt);
@@ -956,7 +956,7 @@  mlx5_txpp_stop(struct rte_eth_dev *dev)
 		return;
 	/* No references any more, do actual destroy. */
 	mlx5_txpp_destroy(sh);
-	ret = pthread_mutex_unlock(&sh->txpp.mutex);
+	ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
 	MLX5_ASSERT(!ret);
 	RTE_SET_USED(ret);
 }
diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c b/drivers/net/mlx5/windows/mlx5_flow_os.c
index c4d5790726..229bad3ea7 100644
--- a/drivers/net/mlx5/windows/mlx5_flow_os.c
+++ b/drivers/net/mlx5/windows/mlx5_flow_os.c
@@ -253,7 +253,7 @@  struct mlx5_workspace_thread {
 static struct mlx5_workspace_thread *curr;
 static struct mlx5_workspace_thread *first;
 rte_thread_key ws_tls_index;
-static pthread_mutex_t lock_thread_list;
+static rte_thread_mutex_t lock_thread_list;
 
 static bool
 mlx5_is_thread_alive(HANDLE thread_handle)
@@ -330,7 +330,7 @@  mlx5_flow_os_release_workspace(void)
 		free(first);
 	}
 	rte_thread_key_delete(ws_tls_index);
-	pthread_mutex_destroy(&lock_thread_list);
+	rte_thread_mutex_destroy(&lock_thread_list);
 }
 
 static int
@@ -352,7 +352,7 @@  mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
 	}
 	temp->mlx5_ws = data;
 	temp->thread_handle = curr_thread;
-	pthread_mutex_lock(&lock_thread_list);
+	rte_thread_mutex_lock(&lock_thread_list);
 	mlx5_clear_thread_list();
 	if (!first) {
 		first = temp;
@@ -361,7 +361,7 @@  mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
 		curr->next = temp;
 		curr = curr->next;
 	}
-	pthread_mutex_unlock(&lock_thread_list);
+	rte_thread_mutex_unlock(&lock_thread_list);
 	return 0;
 }
 
@@ -374,7 +374,7 @@  mlx5_flow_os_init_workspace_once(void)
 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
 		return err;
 	}
-	pthread_mutex_init(&lock_thread_list, NULL);
+	rte_thread_mutex_init(&lock_thread_list);
 	return 0;
 }
 
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 3fe3f55f49..e37e16d70f 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -228,7 +228,7 @@  mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
 	int err = 0;
 	struct mlx5_context *mlx5_ctx;
 
-	pthread_mutex_init(&sh->txpp.mutex, NULL);
+	rte_thread_mutex_init(&sh->txpp.mutex);
 	/* Set numa node from pci probe */
 	sh->numa_node = spawn->pci_dev->device.numa_node;
 
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index c5b5399282..f0cb71c93d 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -153,10 +153,10 @@  void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
 
 /* Mutexes */
 
-typedef pthread_mutex_t osal_mutex_t;
-#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
-#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
-#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+typedef rte_thread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) rte_thread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) rte_thread_mutex_init(lock)
+#define OSAL_MUTEX_ACQUIRE(lock) rte_thread_mutex_lock(lock)
 #define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
 #define OSAL_MUTEX_DEALLOC(lock) nothing
 
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a202931e9a..cbab53fb51 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -3,7 +3,7 @@ 
  * Copyright(c) 2016-2018 Intel Corporation
  */
 #include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <stdbool.h>
 #include <sys/epoll.h>
 
@@ -121,7 +121,7 @@  TAILQ_HEAD(internal_list_head, internal_list);
 static struct internal_list_head internal_list =
 	TAILQ_HEAD_INITIALIZER(internal_list);
 
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 static struct rte_eth_link pmd_link = {
 		.link_speed = 10000,
@@ -507,7 +507,7 @@  find_internal_resource(char *ifname)
 	if (ifname == NULL)
 		return NULL;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		internal = list->eth_dev->data->dev_private;
@@ -517,7 +517,7 @@  find_internal_resource(char *ifname)
 		}
 	}
 
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	if (!found)
 		return NULL;
@@ -1001,9 +1001,9 @@  vhost_driver_setup(struct rte_eth_dev *eth_dev)
 		goto free_list;
 
 	list->eth_dev = eth_dev;
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 	TAILQ_INSERT_TAIL(&internal_list, list, next);
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	rte_spinlock_init(&vring_state->lock);
 	vring_states[eth_dev->data->port_id] = vring_state;
@@ -1035,9 +1035,9 @@  vhost_driver_setup(struct rte_eth_dev *eth_dev)
 	rte_vhost_driver_unregister(internal->iface_name);
 list_remove:
 	vring_states[eth_dev->data->port_id] = NULL;
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 	TAILQ_REMOVE(&internal_list, list, next);
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 	rte_free(vring_state);
 free_list:
 	rte_free(list);
@@ -1093,7 +1093,7 @@  rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
 	if (!rte_eth_dev_is_valid_port(port_id))
 		return -1;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		eth_dev = list->eth_dev;
@@ -1106,7 +1106,7 @@  rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
 		}
 	}
 
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	return vid;
 }
@@ -1184,9 +1184,9 @@  eth_dev_close(struct rte_eth_dev *dev)
 	list = find_internal_resource(internal->iface_name);
 	if (list) {
 		rte_vhost_driver_unregister(internal->iface_name);
-		pthread_mutex_lock(&internal_list_lock);
+		rte_thread_mutex_lock(&internal_list_lock);
 		TAILQ_REMOVE(&internal_list, list, next);
-		pthread_mutex_unlock(&internal_list_lock);
+		rte_thread_mutex_unlock(&internal_list_lock);
 		rte_free(list);
 	}
 
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 364f43e21c..9231da676b 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -143,7 +143,7 @@  virtio_user_dev_set_features(struct virtio_user_dev *dev)
 	uint64_t features;
 	int ret = -1;
 
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 
 	/* Step 0: tell vhost to create queues */
 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
@@ -161,7 +161,7 @@  virtio_user_dev_set_features(struct virtio_user_dev *dev)
 		goto error;
 	PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
 error:
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 
 	return ret;
 }
@@ -185,7 +185,7 @@  virtio_user_start_device(struct virtio_user_dev *dev)
 	 * memory subsystem in the future.
 	 */
 	rte_mcfg_mem_read_lock();
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 
 	/* Step 2: share memory regions */
 	ret = dev->ops->set_memory_table(dev);
@@ -206,12 +206,12 @@  virtio_user_start_device(struct virtio_user_dev *dev)
 
 	dev->started = true;
 
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 	rte_mcfg_mem_read_unlock();
 
 	return 0;
 error:
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 	rte_mcfg_mem_read_unlock();
 
 	PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path);
@@ -226,7 +226,7 @@  int virtio_user_stop_device(struct virtio_user_dev *dev)
 	uint32_t i;
 	int ret;
 
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 	if (!dev->started)
 		goto out;
 
@@ -249,11 +249,11 @@  int virtio_user_stop_device(struct virtio_user_dev *dev)
 	dev->started = false;
 
 out:
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 
 	return 0;
 err:
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 
 	PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path);
 
@@ -380,7 +380,7 @@  virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
 	if (msl->external)
 		return;
 
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 
 	if (dev->started == false)
 		goto exit;
@@ -405,7 +405,7 @@  virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
 	}
 
 exit:
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 
 	if (ret < 0)
 		PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path);
@@ -491,7 +491,7 @@  virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
 	uint64_t backend_features;
 	int i;
 
-	pthread_mutex_init(&dev->mutex, NULL);
+	rte_thread_mutex_init(&dev->mutex);
 	strlcpy(dev->path, path, PATH_MAX);
 
 	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
@@ -796,13 +796,13 @@  virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
 {
 	int ret;
 
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 	dev->status = status;
 	ret = dev->ops->set_status(dev, status);
 	if (ret && ret != -ENOTSUP)
 		PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path);
 
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 	return ret;
 }
 
@@ -812,7 +812,7 @@  virtio_user_dev_update_status(struct virtio_user_dev *dev)
 	int ret;
 	uint8_t status;
 
-	pthread_mutex_lock(&dev->mutex);
+	rte_thread_mutex_lock(&dev->mutex);
 
 	ret = dev->ops->get_status(dev, &status);
 	if (!ret) {
@@ -837,7 +837,7 @@  virtio_user_dev_update_status(struct virtio_user_dev *dev)
 		PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path);
 	}
 
-	pthread_mutex_unlock(&dev->mutex);
+	rte_thread_mutex_unlock(&dev->mutex);
 	return ret;
 }
 
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 58ad5198b6..8c2df84d74 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -55,7 +55,7 @@  struct virtio_user_dev {
 	bool		qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
 
 	struct virtio_user_backend_ops *ops;
-	pthread_mutex_t	mutex;
+	rte_thread_mutex_t	mutex;
 	bool		started;
 
 	void *backend_data;
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index 76e6a8530b..5afedf4d05 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -69,7 +69,7 @@  static const struct rte_pci_id pci_ifpga_map[] = {
 static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
 
 static int ifpga_monitor_start;
-static pthread_t ifpga_monitor_start_thread;
+static rte_thread_t ifpga_monitor_start_thread;
 
 #define IFPGA_MAX_IRQ 12
 /* 0 for FME interrupt, others are reserved for AFU irq */
@@ -545,11 +545,11 @@  ifpga_monitor_stop_func(void)
 	int ret;
 
 	if (ifpga_monitor_start == 1) {
-		ret = pthread_cancel(ifpga_monitor_start_thread);
+		ret = rte_thread_cancel(ifpga_monitor_start_thread);
 		if (ret)
 			IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread");
 
-		ret = pthread_join(ifpga_monitor_start_thread, NULL);
+		ret = rte_thread_join(ifpga_monitor_start_thread, NULL);
 		if (ret)
 			IFPGA_RAWDEV_PMD_ERR("Can't join the thread");
 
diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
index 1dc813d0a3..7a90237ba2 100644
--- a/drivers/vdpa/ifc/ifcvf_vdpa.c
+++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
@@ -3,7 +3,7 @@ 
  */
 
 #include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <fcntl.h>
 #include <string.h>
 #include <sys/ioctl.h>
@@ -52,7 +52,7 @@  struct ifcvf_internal {
 	int vfio_container_fd;
 	int vfio_group_fd;
 	int vfio_dev_fd;
-	pthread_t tid;	/* thread for notify relay */
+	rte_thread_t tid;	/* thread for notify relay */
 	int epfd;
 	int vid;
 	struct rte_vdpa_device *vdev;
@@ -79,7 +79,7 @@  TAILQ_HEAD(internal_list_head, internal_list);
 static struct internal_list_head internal_list =
 	TAILQ_HEAD_INITIALIZER(internal_list);
 
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
 
@@ -89,7 +89,7 @@  find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
 	int found = 0;
 	struct internal_list *list;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		if (vdev == list->internal->vdev) {
@@ -98,7 +98,7 @@  find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
 		}
 	}
 
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	if (!found)
 		return NULL;
@@ -112,7 +112,7 @@  find_internal_resource_by_dev(struct rte_pci_device *pdev)
 	int found = 0;
 	struct internal_list *list;
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 
 	TAILQ_FOREACH(list, &internal_list, next) {
 		if (!rte_pci_addr_cmp(&pdev->addr,
@@ -122,7 +122,7 @@  find_internal_resource_by_dev(struct rte_pci_device *pdev)
 		}
 	}
 
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	if (!found)
 		return NULL;
@@ -503,7 +503,7 @@  setup_notify_relay(struct ifcvf_internal *internal)
 	ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay,
 				     (void *)internal);
 	if (ret != 0) {
-		DRV_LOG(ERR, "failed to create notify relay pthread.");
+		DRV_LOG(ERR, "failed to create notify relay thread.");
 		return -1;
 	}
 
@@ -513,13 +513,11 @@  setup_notify_relay(struct ifcvf_internal *internal)
 static int
 unset_notify_relay(struct ifcvf_internal *internal)
 {
-	void *status;
-
-	if (internal->tid) {
-		pthread_cancel(internal->tid);
-		pthread_join(internal->tid, &status);
+	if (internal->tid.opaque_id) {
+		rte_thread_cancel(internal->tid);
+		rte_thread_join(internal->tid, NULL);
 	}
-	internal->tid = 0;
+	internal->tid.opaque_id = 0;
 
 	if (internal->epfd >= 0)
 		close(internal->epfd);
@@ -809,7 +807,7 @@  setup_vring_relay(struct ifcvf_internal *internal)
 	ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay,
 				     (void *)internal);
 	if (ret != 0) {
-		DRV_LOG(ERR, "failed to create ring relay pthread.");
+		DRV_LOG(ERR, "failed to create ring relay thread.");
 		return -1;
 	}
 
@@ -819,13 +817,11 @@  setup_vring_relay(struct ifcvf_internal *internal)
 static int
 unset_vring_relay(struct ifcvf_internal *internal)
 {
-	void *status;
-
-	if (internal->tid) {
-		pthread_cancel(internal->tid);
-		pthread_join(internal->tid, &status);
+	if (internal->tid.opaque_id) {
+		rte_thread_cancel(internal->tid);
+		rte_thread_join(internal->tid, NULL);
 	}
-	internal->tid = 0;
+	internal->tid.opaque_id = 0;
 
 	if (internal->epfd >= 0)
 		close(internal->epfd);
@@ -1253,9 +1249,9 @@  ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		goto error;
 	}
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 	TAILQ_INSERT_TAIL(&internal_list, list, next);
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	rte_atomic32_set(&internal->started, 1);
 	update_datapath(internal);
@@ -1293,9 +1289,9 @@  ifcvf_pci_remove(struct rte_pci_device *pci_dev)
 	rte_vfio_container_destroy(internal->vfio_container_fd);
 	rte_vdpa_unregister_device(internal->vdev);
 
-	pthread_mutex_lock(&internal_list_lock);
+	rte_thread_mutex_lock(&internal_list_lock);
 	TAILQ_REMOVE(&internal_list, list, next);
-	pthread_mutex_unlock(&internal_list_lock);
+	rte_thread_mutex_unlock(&internal_list_lock);
 
 	rte_free(list);
 	rte_free(internal);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index e5e03e6582..e7f4acb48d 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -48,7 +48,7 @@ 
 
 TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
 					      TAILQ_HEAD_INITIALIZER(priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 static struct mlx5_vdpa_priv *
 mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
@@ -56,14 +56,14 @@  mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
 	struct mlx5_vdpa_priv *priv;
 	int found = 0;
 
-	pthread_mutex_lock(&priv_list_lock);
+	rte_thread_mutex_lock(&priv_list_lock);
 	TAILQ_FOREACH(priv, &priv_list, next) {
 		if (vdev == priv->vdev) {
 			found = 1;
 			break;
 		}
 	}
-	pthread_mutex_unlock(&priv_list_lock);
+	rte_thread_mutex_unlock(&priv_list_lock);
 	if (!found) {
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		rte_errno = EINVAL;
@@ -143,9 +143,9 @@  mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
-	pthread_mutex_lock(&priv->vq_config_lock);
+	rte_thread_mutex_lock(&priv->vq_config_lock);
 	ret = mlx5_vdpa_virtq_enable(priv, vring, state);
-	pthread_mutex_unlock(&priv->vq_config_lock);
+	rte_thread_mutex_unlock(&priv->vq_config_lock);
 	return ret;
 }
 
@@ -296,7 +296,7 @@  mlx5_vdpa_dev_close(int vid)
 	priv->configured = 0;
 	priv->vid = 0;
 	/* The mutex may stay locked after event thread cancel - initiate it. */
-	pthread_mutex_init(&priv->vq_config_lock, NULL);
+	rte_thread_mutex_init(&priv->vq_config_lock);
 	DRV_LOG(INFO, "vDPA device %d was closed.", vid);
 	return ret;
 }
@@ -736,10 +736,10 @@  mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	}
 	mlx5_vdpa_config_get(pci_dev->device.devargs, priv);
 	SLIST_INIT(&priv->mr_list);
-	pthread_mutex_init(&priv->vq_config_lock, NULL);
-	pthread_mutex_lock(&priv_list_lock);
+	rte_thread_mutex_init(&priv->vq_config_lock);
+	rte_thread_mutex_lock(&priv_list_lock);
 	TAILQ_INSERT_TAIL(&priv_list, priv, next);
-	pthread_mutex_unlock(&priv_list_lock);
+	rte_thread_mutex_unlock(&priv_list_lock);
 	return 0;
 
 error:
@@ -770,7 +770,7 @@  mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
 	struct mlx5_vdpa_priv *priv = NULL;
 	int found = 0;
 
-	pthread_mutex_lock(&priv_list_lock);
+	rte_thread_mutex_lock(&priv_list_lock);
 	TAILQ_FOREACH(priv, &priv_list, next) {
 		if (!rte_pci_addr_cmp(&priv->pci_dev->addr, &pci_dev->addr)) {
 			found = 1;
@@ -779,7 +779,7 @@  mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
 	}
 	if (found)
 		TAILQ_REMOVE(&priv_list, priv, next);
-	pthread_mutex_unlock(&priv_list_lock);
+	rte_thread_mutex_unlock(&priv_list_lock);
 	if (found) {
 		if (priv->configured)
 			mlx5_vdpa_dev_close(priv->vid);
@@ -790,7 +790,7 @@  mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
 		if (priv->vdev)
 			rte_vdpa_unregister_device(priv->vdev);
 		mlx5_glue->close_device(priv->ctx);
-		pthread_mutex_destroy(&priv->vq_config_lock);
+		rte_thread_mutex_destroy(&priv->vq_config_lock);
 		rte_free(priv);
 	}
 	return 0;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 722c72b65e..cf7df473f8 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -119,9 +119,9 @@  enum {
 struct mlx5_vdpa_priv {
 	TAILQ_ENTRY(mlx5_vdpa_priv) next;
 	uint8_t configured;
-	pthread_mutex_t vq_config_lock;
+	rte_thread_mutex_t vq_config_lock;
 	uint64_t no_traffic_counter;
-	pthread_t timer_tid;
+	rte_thread_t timer_tid;
 	int event_mode;
 	int event_core; /* Event thread cpu affinity core. */
 	uint32_t event_us;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 88f6a4256d..b3639659f6 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -285,7 +285,7 @@  mlx5_vdpa_event_handle(void *arg)
 	case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
 		priv->timer_delay_us = priv->event_us;
 		while (1) {
-			pthread_mutex_lock(&priv->vq_config_lock);
+			rte_thread_mutex_lock(&priv->vq_config_lock);
 			max = mlx5_vdpa_queues_complete(priv);
 			if (max == 0 && priv->no_traffic_counter++ >=
 			    priv->no_traffic_max) {
@@ -293,10 +293,10 @@  mlx5_vdpa_event_handle(void *arg)
 					priv->vdev->device->name);
 				mlx5_vdpa_arm_all_cqs(priv);
 				do {
-					pthread_mutex_unlock
+					rte_thread_mutex_unlock
 							(&priv->vq_config_lock);
 					cq = mlx5_vdpa_event_wait(priv);
-					pthread_mutex_lock
+					rte_thread_mutex_lock
 							(&priv->vq_config_lock);
 					if (cq == NULL ||
 					       mlx5_vdpa_queue_complete(cq) > 0)
@@ -307,7 +307,7 @@  mlx5_vdpa_event_handle(void *arg)
 			} else if (max != 0) {
 				priv->no_traffic_counter = 0;
 			}
-			pthread_mutex_unlock(&priv->vq_config_lock);
+			rte_thread_mutex_unlock(&priv->vq_config_lock);
 			mlx5_vdpa_timer_sleep(priv, max);
 		}
 		return NULL;
@@ -315,10 +315,10 @@  mlx5_vdpa_event_handle(void *arg)
 		do {
 			cq = mlx5_vdpa_event_wait(priv);
 			if (cq != NULL) {
-				pthread_mutex_lock(&priv->vq_config_lock);
+				rte_thread_mutex_lock(&priv->vq_config_lock);
 				if (mlx5_vdpa_queue_complete(cq) > 0)
 					mlx5_vdpa_cq_arm(priv, cq);
-				pthread_mutex_unlock(&priv->vq_config_lock);
+				rte_thread_mutex_unlock(&priv->vq_config_lock);
 			}
 		} while (1);
 		return NULL;
@@ -340,7 +340,7 @@  mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
 	struct mlx5_vdpa_virtq *virtq;
 	uint64_t sec;
 
-	pthread_mutex_lock(&priv->vq_config_lock);
+	rte_thread_mutex_lock(&priv->vq_config_lock);
 	while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
 					 sizeof(out.buf)) >=
 				       (ssize_t)sizeof(out.event_resp.cookie)) {
@@ -386,7 +386,7 @@  mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
 			virtq->err_time[i - 1] = virtq->err_time[i];
 		virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
 	}
-	pthread_mutex_unlock(&priv->vq_config_lock);
+	rte_thread_mutex_unlock(&priv->vq_config_lock);
 #endif
 }
 
@@ -473,27 +473,25 @@  mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
 {
 	int ret;
 	rte_cpuset_t cpuset;
-	pthread_attr_t attr;
+	rte_thread_attr_t attr;
 	char name[16];
-	const struct sched_param sp = {
-		.sched_priority = sched_get_priority_max(SCHED_RR),
-	};
 
 	if (!priv->eventc)
 		/* All virtqs are in poll mode. */
 		return 0;
-	pthread_attr_init(&attr);
-	ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
+
+	ret = rte_thread_attr_init(&attr);
 	if (ret) {
-		DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
+		DRV_LOG(ERR, "Failed to initialize thread attributes");
 		return -1;
 	}
-	ret = pthread_attr_setschedparam(&attr, &sp);
+	ret = rte_thread_attr_set_priority(&attr,
+			RTE_THREAD_PRIORITY_REALTIME_CRITICAL);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to set thread priority.");
 		return -1;
 	}
-	ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
+	ret = rte_thread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
 			     (void *)priv);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to create timer thread.");
@@ -504,13 +502,13 @@  mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
 		CPU_SET(priv->event_core, &cpuset);
 	else
 		cpuset = rte_lcore_cpuset(rte_get_main_lcore());
-	ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
+	ret = rte_thread_set_affinity_by_id(priv->timer_tid, &cpuset);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to set thread affinity.");
 		return -1;
 	}
 	snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
-	ret = rte_thread_setname(priv->timer_tid, name);
+	ret = pthread_setname_np(priv->timer_tid.opaque_id, name);
 	if (ret)
 		DRV_LOG(DEBUG, "Cannot set timer thread name.");
 	return 0;
@@ -519,13 +517,11 @@  mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
 void
 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
 {
-	void *status;
-
-	if (priv->timer_tid) {
-		pthread_cancel(priv->timer_tid);
-		pthread_join(priv->timer_tid, &status);
+	if (priv->timer_tid.opaque_id) {
+		rte_thread_cancel(priv->timer_tid);
+		rte_thread_join(priv->timer_tid, NULL);
 	}
-	priv->timer_tid = 0;
+	priv->timer_tid.opaque_id = 0;
 }
 
 void
diff --git a/examples/kni/main.c b/examples/kni/main.c
index beabb3c848..e4741f85ba 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -1042,8 +1042,8 @@  main(int argc, char** argv)
 	int ret;
 	uint16_t nb_sys_ports, port;
 	unsigned i;
-	void *retval;
-	pthread_t kni_link_tid;
+	int retval;
+	rte_thread_t kni_link_tid;
 	int pid;
 
 	/* Associate signal_hanlder function with USR signals */
@@ -1126,7 +1126,7 @@  main(int argc, char** argv)
 			return -1;
 	}
 	monitor_links = 0;
-	pthread_join(kni_link_tid, &retval);
+	rte_thread_join(kni_link_tid, &retval);
 
 	/* Release resources */
 	RTE_ETH_FOREACH_DEV(port) {
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index 257de50692..bef7b59842 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -164,7 +164,7 @@  static void *initial_lthread(void *args __rte_unused)
 			rte_exit(EXIT_FAILURE, "Cannot create helloworld thread\n");
 
 		snprintf(name, sizeof(name), "helloworld-%u", (uint32_t)i);
-		rte_thread_setname(tid[i], name);
+		pthread_setname_np(tid[i], name);
 	}
 
 	/* wait for 1s to allow threads
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..4d02502602 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1634,7 +1634,7 @@  main(int argc, char *argv[])
 	unsigned nb_ports, valid_num_ports;
 	int ret, i;
 	uint16_t portid;
-	static pthread_t tid;
+	static rte_thread_t tid;
 	uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
 
 	signal(SIGINT, sigint_handler);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index fe2b4e4803..5b31d58ac2 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -5,7 +5,7 @@ 
 #ifndef _GNU_SOURCE
 #define _GNU_SOURCE
 #endif
-#include <pthread.h>
+#include <rte_thread.h>
 #include <sched.h>
 
 #include <stdint.h>
@@ -533,7 +533,7 @@  ctrlr_worker(void *arg)
 {
 	struct vhost_blk_ctrlr *ctrlr = (struct vhost_blk_ctrlr *)arg;
 	cpu_set_t cpuset;
-	pthread_t thread;
+	rte_thread_t thread;
 	int i;
 
 	fprintf(stdout, "Ctrlr Worker Thread start\n");
@@ -545,10 +545,10 @@  ctrlr_worker(void *arg)
 		exit(0);
 	}
 
-	thread = pthread_self();
+	thread = rte_thread_self();
 	CPU_ZERO(&cpuset);
 	CPU_SET(0, &cpuset);
-	pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
+	rte_thread_set_affinity_by_id(thread, &cpuset);
 
 	for (i = 0; i < NUM_OF_BLK_QUEUES; i++)
 		submit_inflight_vq(&ctrlr->queues[i]);
@@ -604,7 +604,7 @@  new_device(int vid)
 	struct vhost_blk_queue *vq;
 	char path[PATH_MAX];
 	uint64_t features, protocol_features;
-	pthread_t tid;
+	rte_thread_t tid;
 	int i, ret;
 	bool packed_ring, inflight_shmfd;
 
@@ -693,7 +693,7 @@  new_device(int vid)
 
 	/* device has been started */
 	ctrlr->started = 1;
-	pthread_detach(tid);
+	rte_thread_detach(tid);
 	return 0;
 }
 
diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c
index 9d29696b84..1f6aba498c 100644
--- a/lib/eal/common/eal_common_options.c
+++ b/lib/eal/common/eal_common_options.c
@@ -1872,8 +1872,7 @@  eal_auto_detect_cores(struct rte_config *cfg)
 	unsigned int removed = 0;
 	rte_cpuset_t affinity_set;
 
-	if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
-				&affinity_set))
+	if (rte_thread_get_affinity_by_id(rte_thread_self(), &affinity_set))
 		CPU_ZERO(&affinity_set);
 
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1901,8 +1900,7 @@  compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
 	}
 	RTE_CPU_NOT(cpuset, cpuset);
 
-	if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
-				&default_set))
+	if (rte_thread_get_affinity_by_id(rte_thread_self(), &default_set))
 		CPU_ZERO(&default_set);
 
 	RTE_CPU_AND(cpuset, cpuset, &default_set);
diff --git a/lib/eal/common/eal_common_proc.c b/lib/eal/common/eal_common_proc.c
index dc4a2efa82..e8a93ee829 100644
--- a/lib/eal/common/eal_common_proc.c
+++ b/lib/eal/common/eal_common_proc.c
@@ -37,7 +37,7 @@ 
 static int mp_fd = -1;
 static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
-static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t mp_mutex_action = RTE_THREAD_MUTEX_INITIALIZER;
 static char peer_name[PATH_MAX];
 
 struct action_entry {
@@ -96,10 +96,10 @@  TAILQ_HEAD(pending_request_list, pending_request);
 
 static struct {
 	struct pending_request_list requests;
-	pthread_mutex_t lock;
+	rte_thread_mutex_t lock;
 } pending_requests = {
 	.requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
-	.lock = PTHREAD_MUTEX_INITIALIZER,
+	.lock = RTE_THREAD_MUTEX_INITIALIZER,
 	/**< used in async requests only */
 };
 
@@ -222,15 +222,15 @@  rte_mp_action_register(const char *name, rte_mp_t action)
 	strlcpy(entry->action_name, name, sizeof(entry->action_name));
 	entry->action = action;
 
-	pthread_mutex_lock(&mp_mutex_action);
+	rte_thread_mutex_lock(&mp_mutex_action);
 	if (find_action_entry_by_name(name) != NULL) {
-		pthread_mutex_unlock(&mp_mutex_action);
+		rte_thread_mutex_unlock(&mp_mutex_action);
 		rte_errno = EEXIST;
 		free(entry);
 		return -1;
 	}
 	TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
-	pthread_mutex_unlock(&mp_mutex_action);
+	rte_thread_mutex_unlock(&mp_mutex_action);
 	return 0;
 }
 
@@ -249,14 +249,14 @@  rte_mp_action_unregister(const char *name)
 		return;
 	}
 
-	pthread_mutex_lock(&mp_mutex_action);
+	rte_thread_mutex_lock(&mp_mutex_action);
 	entry = find_action_entry_by_name(name);
 	if (entry == NULL) {
-		pthread_mutex_unlock(&mp_mutex_action);
+		rte_thread_mutex_unlock(&mp_mutex_action);
 		return;
 	}
 	TAILQ_REMOVE(&action_entry_list, entry, next);
-	pthread_mutex_unlock(&mp_mutex_action);
+	rte_thread_mutex_unlock(&mp_mutex_action);
 	free(entry);
 }
 
@@ -328,7 +328,7 @@  process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
 	if (m->type == MP_REP || m->type == MP_IGN) {
 		struct pending_request *req = NULL;
 
-		pthread_mutex_lock(&pending_requests.lock);
+		rte_thread_mutex_lock(&pending_requests.lock);
 		pending_req = find_pending_request(s->sun_path, msg->name);
 		if (pending_req) {
 			memcpy(pending_req->reply, msg, sizeof(*msg));
@@ -343,18 +343,18 @@  process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
 						pending_req);
 		} else
 			RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
-		pthread_mutex_unlock(&pending_requests.lock);
+		rte_thread_mutex_unlock(&pending_requests.lock);
 
 		if (req != NULL)
 			trigger_async_action(req);
 		return;
 	}
 
-	pthread_mutex_lock(&mp_mutex_action);
+	rte_thread_mutex_lock(&mp_mutex_action);
 	entry = find_action_entry_by_name(msg->name);
 	if (entry != NULL)
 		action = entry->action;
-	pthread_mutex_unlock(&mp_mutex_action);
+	rte_thread_mutex_unlock(&mp_mutex_action);
 
 	if (!action) {
 		if (m->type == MP_REQ && !internal_conf->init_complete) {
@@ -524,9 +524,9 @@  async_reply_handle(void *arg)
 {
 	struct pending_request *req;
 
-	pthread_mutex_lock(&pending_requests.lock);
+	rte_thread_mutex_lock(&pending_requests.lock);
 	req = async_reply_handle_thread_unsafe(arg);
-	pthread_mutex_unlock(&pending_requests.lock);
+	rte_thread_mutex_unlock(&pending_requests.lock);
 
 	if (req != NULL)
 		trigger_async_action(req);
@@ -584,7 +584,7 @@  rte_mp_channel_init(void)
 {
 	char path[PATH_MAX];
 	int dir_fd;
-	pthread_t mp_handle_tid;
+	rte_thread_t mp_handle_tid;
 	const struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
@@ -998,9 +998,9 @@  rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
 
 	/* for secondary process, send request to the primary process only */
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		pthread_mutex_lock(&pending_requests.lock);
+		rte_thread_mutex_lock(&pending_requests.lock);
 		ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
-		pthread_mutex_unlock(&pending_requests.lock);
+		rte_thread_mutex_unlock(&pending_requests.lock);
 		goto end;
 	}
 
@@ -1021,7 +1021,7 @@  rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
 		goto close_end;
 	}
 
-	pthread_mutex_lock(&pending_requests.lock);
+	rte_thread_mutex_lock(&pending_requests.lock);
 	while ((ent = readdir(mp_dir))) {
 		char path[PATH_MAX];
 
@@ -1040,7 +1040,7 @@  rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
 	ret = 0;
 
 unlock_end:
-	pthread_mutex_unlock(&pending_requests.lock);
+	rte_thread_mutex_unlock(&pending_requests.lock);
 	/* unlock the directory */
 	flock(dir_fd, LOCK_UN);
 
@@ -1118,7 +1118,7 @@  rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
 	 * of requests to the queue at once, and some of the replies may arrive
 	 * before we add all of the requests to the queue.
 	 */
-	pthread_mutex_lock(&pending_requests.lock);
+	rte_thread_mutex_lock(&pending_requests.lock);
 
 	/* we have to ensure that callback gets triggered even if we don't send
 	 * anything, therefore earlier we have allocated a dummy request. fill
@@ -1141,7 +1141,7 @@  rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
 			dummy_used = true;
 		}
 
-		pthread_mutex_unlock(&pending_requests.lock);
+		rte_thread_mutex_unlock(&pending_requests.lock);
 
 		/* if we couldn't send anything, clean up */
 		if (ret != 0)
@@ -1185,7 +1185,7 @@  rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
 	}
 
 	/* finally, unlock the queue */
-	pthread_mutex_unlock(&pending_requests.lock);
+	rte_thread_mutex_unlock(&pending_requests.lock);
 
 	/* unlock the directory */
 	flock(dir_fd, LOCK_UN);
@@ -1201,7 +1201,7 @@  rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
 closedir_fail:
 	closedir(mp_dir);
 unlock_fail:
-	pthread_mutex_unlock(&pending_requests.lock);
+	rte_thread_mutex_unlock(&pending_requests.lock);
 fail:
 	free(dummy);
 	free(param);
diff --git a/lib/eal/common/eal_common_thread.c b/lib/eal/common/eal_common_thread.c
index 1a52f42a2b..d46ab8cf77 100644
--- a/lib/eal/common/eal_common_thread.c
+++ b/lib/eal/common/eal_common_thread.c
@@ -6,7 +6,6 @@ 
 #include <stdlib.h>
 #include <stdint.h>
 #include <unistd.h>
-#include <pthread.h>
 #include <signal.h>
 #include <sched.h>
 #include <assert.h>
@@ -86,9 +85,8 @@  thread_update_affinity(rte_cpuset_t *cpusetp)
 int
 rte_thread_set_affinity(rte_cpuset_t *cpusetp)
 {
-	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
-			cpusetp) != 0) {
-		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+	if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) {
+		RTE_LOG(ERR, EAL, "rte_thread_set_affinity failed\n");
 		return -1;
 	}
 
@@ -169,14 +167,14 @@  __rte_thread_uninit(void)
 struct rte_thread_ctrl_params {
 	void *(*start_routine)(void *);
 	void *arg;
-	pthread_barrier_t configured;
+	rte_thread_barrier_t configured;
 	unsigned int refcnt;
 };
 
 static void ctrl_params_free(struct rte_thread_ctrl_params *params)
 {
 	if (__atomic_sub_fetch(&params->refcnt, 1, __ATOMIC_ACQ_REL) == 0) {
-		(void)pthread_barrier_destroy(&params->configured);
+		(void)rte_thread_barrier_destroy(&params->configured);
 		free(params);
 	}
 }
@@ -192,7 +190,7 @@  static void *ctrl_thread_init(void *arg)
 
 	__rte_thread_init(rte_lcore_id(), cpuset);
 
-	pthread_barrier_wait(&params->configured);
+	rte_thread_barrier_wait(&params->configured);
 	start_routine = params->start_routine;
 	ctrl_params_free(params);
 
@@ -203,8 +201,8 @@  static void *ctrl_thread_init(void *arg)
 }
 
 int
-rte_ctrl_thread_create(pthread_t *thread, const char *name,
-		const pthread_attr_t *attr,
+rte_ctrl_thread_create(rte_thread_t *thread, const char *name,
+		const rte_thread_attr_t *attr,
 		void *(*start_routine)(void *), void *arg)
 {
 	struct internal_config *internal_conf =
@@ -221,11 +219,11 @@  rte_ctrl_thread_create(pthread_t *thread, const char *name,
 	params->arg = arg;
 	params->refcnt = 2;
 
-	ret = pthread_barrier_init(&params->configured, NULL, 2);
+	ret = rte_thread_barrier_init(&params->configured, 2);
 	if (ret != 0)
 		goto fail_no_barrier;
 
-	ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params);
+	ret = rte_thread_create(thread, attr, ctrl_thread_init, (void *)params);
 	if (ret != 0)
 		goto fail_with_barrier;
 
@@ -236,22 +234,22 @@  rte_ctrl_thread_create(pthread_t *thread, const char *name,
 				"Cannot set name for ctrl thread\n");
 	}
 
-	ret = pthread_setaffinity_np(*thread, sizeof(*cpuset), cpuset);
+	ret = rte_thread_set_affinity_by_id(*thread, cpuset);
 	if (ret != 0)
 		params->start_routine = NULL;
 
-	pthread_barrier_wait(&params->configured);
+	rte_thread_barrier_wait(&params->configured);
 	ctrl_params_free(params);
 
 	if (ret != 0)
 		/* start_routine has been set to NULL above; */
 		/* ctrl thread will exit immediately */
-		pthread_join(*thread, NULL);
+		rte_thread_join(*thread, NULL);
 
 	return -ret;
 
 fail_with_barrier:
-	(void)pthread_barrier_destroy(&params->configured);
+	(void)rte_thread_barrier_destroy(&params->configured);
 
 fail_no_barrier:
 	free(params);
@@ -276,8 +274,7 @@  rte_thread_register(void)
 		rte_errno = EINVAL;
 		return -1;
 	}
-	if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset),
-			&cpuset) != 0)
+	if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0)
 		CPU_ZERO(&cpuset);
 	lcore_id = eal_lcore_non_eal_allocate();
 	if (lcore_id >= RTE_MAX_LCORE)
@@ -304,3 +301,14 @@  rte_thread_unregister(void)
 		RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n",
 			lcore_id);
 }
+
+void rte_thread_priority_init(void)
+{
+	struct internal_config *internal_conf =
+		eal_get_internal_configuration();
+
+	/* If the user doesn't specify the priority through the command
+	 * line arguments, the default 'normal' value will be used.
+	 */
+	internal_conf->thread_priority = RTE_THREAD_PRIORITY_NORMAL;
+}
diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c
index 24e27387b1..6df4a01277 100644
--- a/lib/eal/common/eal_common_trace.c
+++ b/lib/eal/common/eal_common_trace.c
@@ -359,7 +359,7 @@  __rte_trace_mem_per_thread_alloc(void)
 	/* Store the thread name */
 	char *name = header->stream_header.thread_name;
 	memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
-	rte_thread_getname(pthread_self(), name,
+	rte_thread_getname(rte_thread_self(), name,
 		__RTE_TRACE_EMIT_STRING_LEN_MAX);
 
 	trace->lcore_meta[count].mem = header;
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 64cf4e81c8..4b95001d7d 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -19,7 +19,7 @@ 
  * Structure storing internal configuration (per-lcore)
  */
 struct lcore_config {
-	pthread_t thread_id;       /**< pthread identifier */
+	rte_thread_t thread_id;       /**< pthread identifier */
 	int pipe_main2worker[2];   /**< communication pipe with main */
 	int pipe_worker2main[2];   /**< communication pipe with main */
 
diff --git a/lib/eal/common/eal_thread.h b/lib/eal/common/eal_thread.h
index 4a49117be8..7b3b884463 100644
--- a/lib/eal/common/eal_thread.h
+++ b/lib/eal/common/eal_thread.h
@@ -58,4 +58,10 @@  eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size);
 int
 eal_thread_dump_current_affinity(char *str, unsigned int size);
 
+/**
+ * Set the initial thread priority in the internal configuration
+ * to the default value of RTE_THREAD_PRIORITY_NORMAL.
+ */
+void rte_thread_priority_init(void);
+
 #endif /* EAL_THREAD_H */
diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c
index 2e597a17a2..2620b00e62 100644
--- a/lib/eal/common/malloc_mp.c
+++ b/lib/eal/common/malloc_mp.c
@@ -75,10 +75,10 @@  struct mp_request {
 TAILQ_HEAD(mp_request_list, mp_request);
 static struct {
 	struct mp_request_list list;
-	pthread_mutex_t lock;
+	rte_thread_mutex_t lock;
 } mp_request_list = {
 	.list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),
-	.lock = PTHREAD_MUTEX_INITIALIZER
+	.lock = RTE_THREAD_MUTEX_INITIALIZER
 };
 
 /**
@@ -301,7 +301,7 @@  handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
 	int ret;
 
 	/* lock access to request */
-	pthread_mutex_lock(&mp_request_list.lock);
+	rte_thread_mutex_lock(&mp_request_list.lock);
 
 	/* make sure it's not a dupe */
 	entry = find_request_by_id(m->id);
@@ -387,10 +387,10 @@  handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
 
 		TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
 	}
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return 0;
 fail:
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	free(entry);
 	return -1;
 }
@@ -409,7 +409,7 @@  handle_sync_response(const struct rte_mp_msg *request,
 	int i;
 
 	/* lock the request */
-	pthread_mutex_lock(&mp_request_list.lock);
+	rte_thread_mutex_lock(&mp_request_list.lock);
 
 	entry = find_request_by_id(mpreq->id);
 	if (entry == NULL) {
@@ -539,10 +539,10 @@  handle_sync_response(const struct rte_mp_msg *request,
 		goto fail;
 	}
 
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return 0;
 fail:
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return -1;
 }
 
@@ -557,7 +557,7 @@  handle_rollback_response(const struct rte_mp_msg *request,
 	struct mp_request *entry;
 
 	/* lock the request */
-	pthread_mutex_lock(&mp_request_list.lock);
+	rte_thread_mutex_lock(&mp_request_list.lock);
 
 	memset(&msg, 0, sizeof(msg));
 
@@ -588,10 +588,10 @@  handle_rollback_response(const struct rte_mp_msg *request,
 	free(entry->alloc_state.ms);
 	free(entry);
 
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return 0;
 fail:
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return -1;
 }
 
@@ -603,7 +603,7 @@  handle_response(const struct rte_mp_msg *msg, const void *peer  __rte_unused)
 			(const struct malloc_mp_req *)msg->param;
 	struct mp_request *entry;
 
-	pthread_mutex_lock(&mp_request_list.lock);
+	rte_thread_mutex_lock(&mp_request_list.lock);
 
 	entry = find_request_by_id(m->id);
 	if (entry != NULL) {
@@ -616,7 +616,7 @@  handle_response(const struct rte_mp_msg *msg, const void *peer  __rte_unused)
 		pthread_cond_signal(&entry->cond);
 	}
 
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 
 	return 0;
 }
@@ -706,7 +706,7 @@  request_to_primary(struct malloc_mp_req *user_req)
 	memset(&msg, 0, sizeof(msg));
 	memset(&ts, 0, sizeof(ts));
 
-	pthread_mutex_lock(&mp_request_list.lock);
+	rte_thread_mutex_lock(&mp_request_list.lock);
 
 	entry = malloc(sizeof(*entry));
 	if (entry == NULL) {
@@ -767,10 +767,10 @@  request_to_primary(struct malloc_mp_req *user_req)
 	TAILQ_REMOVE(&mp_request_list.list, entry, next);
 	free(entry);
 
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	return ret;
 fail:
-	pthread_mutex_unlock(&mp_request_list.lock);
+	rte_thread_mutex_unlock(&mp_request_list.lock);
 	free(entry);
 	return -1;
 }
diff --git a/lib/eal/common/meson.build b/lib/eal/common/meson.build
index edfca77779..eda250247b 100644
--- a/lib/eal/common/meson.build
+++ b/lib/eal/common/meson.build
@@ -80,6 +80,7 @@  sources += files(
         'rte_random.c',
         'rte_reciprocal.c',
         'rte_service.c',
+        'rte_thread.c',
         'rte_version.c',
 )
 
diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index f4d1676754..ec7fd57e7d 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -667,7 +667,7 @@  int
 rte_eal_init(int argc, char **argv)
 {
 	int i, fctret, ret;
-	pthread_t thread_id;
+	rte_thread_t thread_id;
 	static uint32_t run_once;
 	uint32_t has_run = 0;
 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
@@ -676,6 +676,8 @@  rte_eal_init(int argc, char **argv)
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	rte_thread_priority_init();
+
 	/* checks if the machine is adequate */
 	if (!rte_cpu_is_supported()) {
 		rte_eal_init_alert("unsupported cpu type.");
@@ -690,7 +692,7 @@  rte_eal_init(int argc, char **argv)
 		return -1;
 	}
 
-	thread_id = pthread_self();
+	thread_id = rte_thread_self();
 
 	eal_reset_internal_config(internal_conf);
 
@@ -854,7 +856,14 @@  rte_eal_init(int argc, char **argv)
 
 	eal_check_mem_on_local_socket();
 
-	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+	ret = rte_thread_set_priority(rte_thread_self(),
+				      internal_conf->thread_priority);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot set thread priority");
+		rte_errno = ret;
+		return -1;
+	}
+	if (rte_thread_set_affinity_by_id(rte_thread_self(),
 			&lcore_config[config->main_lcore].cpuset) != 0) {
 		rte_eal_init_alert("Cannot set affinity");
 		rte_errno = EINVAL;
@@ -865,10 +874,25 @@  rte_eal_init(int argc, char **argv)
 
 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
 
-	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
-		config->main_lcore, thread_id, cpuset,
+	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (cpuset=[%s%s])\n",
+		config->main_lcore, cpuset,
 		ret == 0 ? "" : "...");
 
+	rte_thread_attr_t thread_attr;
+	ret = rte_thread_attr_init(&thread_attr);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot initialize thread attributes");
+		rte_errno = ret;
+		return -1;
+	}
+	ret = rte_thread_attr_set_priority(&thread_attr,
+					   internal_conf->thread_priority);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot set thread priority attribute");
+		rte_errno = ret;
+		return -1;
+	}
+
 	RTE_LCORE_FOREACH_WORKER(i) {
 
 		/*
@@ -882,8 +906,10 @@  rte_eal_init(int argc, char **argv)
 
 		lcore_config[i].state = WAIT;
 
+		rte_thread_attr_set_affinity(&thread_attr,
+					     &lcore_config[i].cpuset);
 		/* create a thread for each lcore */
-		ret = pthread_create(&lcore_config[i].thread_id, NULL,
+		ret = rte_thread_create(&lcore_config[i].thread_id, &thread_attr,
 				     eal_thread_loop, NULL);
 		if (ret != 0)
 			rte_panic("Cannot create thread\n");
@@ -893,10 +919,6 @@  rte_eal_init(int argc, char **argv)
 				"lcore-worker-%d", i);
 		rte_thread_setname(lcore_config[i].thread_id, thread_name);
 
-		ret = pthread_setaffinity_np(lcore_config[i].thread_id,
-			sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
-		if (ret != 0)
-			rte_panic("Cannot set affinity\n");
 	}
 
 	/*
diff --git a/lib/eal/freebsd/eal_alarm.c b/lib/eal/freebsd/eal_alarm.c
index c38b2e04f8..e5d7b130b1 100644
--- a/lib/eal/freebsd/eal_alarm.c
+++ b/lib/eal/freebsd/eal_alarm.c
@@ -37,7 +37,7 @@  struct alarm_entry {
 	rte_eal_alarm_callback cb_fn;
 	void *cb_arg;
 	volatile uint8_t executing;
-	volatile pthread_t executing_id;
+	volatile rte_thread_t executing_id;
 };
 
 static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
@@ -156,7 +156,7 @@  eal_alarm_callback(void *arg __rte_unused)
 
 	while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) {
 		ap->executing = 1;
-		ap->executing_id = pthread_self();
+		ap->executing_id = rte_thread_self();
 		rte_spinlock_unlock(&alarm_list_lk);
 
 		ap->cb_fn(ap->cb_arg);
@@ -263,8 +263,8 @@  rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
 				 * finish. Otherwise we are trying to cancel
 				 * ourselves - mark it by EINPROGRESS.
 				 */
-				if (pthread_equal(ap->executing_id,
-						pthread_self()) == 0)
+				if (rte_thread_equal(ap->executing_id,
+						rte_thread_self()) == 0)
 					executing++;
 				else
 					err = EINPROGRESS;
@@ -285,8 +285,8 @@  rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
 					free(ap);
 					count++;
 					ap = ap_prev;
-				} else if (pthread_equal(ap->executing_id,
-							 pthread_self()) == 0) {
+				} else if (rte_thread_equal(ap->executing_id,
+						rte_thread_self()) == 0) {
 					executing++;
 				} else {
 					err = EINPROGRESS;
diff --git a/lib/eal/freebsd/eal_interrupts.c b/lib/eal/freebsd/eal_interrupts.c
index 86810845fe..c0bf6c882a 100644
--- a/lib/eal/freebsd/eal_interrupts.c
+++ b/lib/eal/freebsd/eal_interrupts.c
@@ -52,7 +52,7 @@  static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
 static struct rte_intr_source_list intr_sources;
 
 /* interrupt handling thread */
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
 
 static volatile int kq = -1;
 
@@ -737,5 +737,5 @@  rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
 
 int rte_thread_is_intr(void)
 {
-	return pthread_equal(intr_thread, pthread_self());
+	return rte_thread_equal(intr_thread, rte_thread_self());
 }
diff --git a/lib/eal/freebsd/eal_thread.c b/lib/eal/freebsd/eal_thread.c
index 1dce9b04f2..cb8563cb7a 100644
--- a/lib/eal/freebsd/eal_thread.c
+++ b/lib/eal/freebsd/eal_thread.c
@@ -73,15 +73,15 @@  eal_thread_loop(__rte_unused void *arg)
 	char c;
 	int n, ret;
 	unsigned lcore_id;
-	pthread_t thread_id;
+	rte_thread_t thread_id;
 	int m2w, w2m;
 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
 
-	thread_id = pthread_self();
+	thread_id = rte_thread_self();
 
 	/* retrieve our lcore_id from the configuration structure */
 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		if (thread_id == lcore_config[lcore_id].thread_id)
+		if (rte_thread_equal(thread_id, lcore_config[lcore_id].thread_id))
 			break;
 	}
 	if (lcore_id == RTE_MAX_LCORE)
@@ -93,8 +93,8 @@  eal_thread_loop(__rte_unused void *arg)
 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
 
 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
-	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
-		lcore_id, thread_id, cpuset, ret == 0 ? "" : "...");
+	RTE_LOG(DEBUG, EAL, "lcore %u is ready (cpuset=[%s%s])\n",
+		lcore_id, cpuset, ret == 0 ? "" : "...");
 
 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
 
@@ -143,14 +143,14 @@  int rte_sys_gettid(void)
 	return (int)lwpid;
 }
 
-int rte_thread_setname(pthread_t id, const char *name)
+int rte_thread_setname(rte_thread_t id, const char *name)
 {
 	/* this BSD function returns no error */
 	pthread_set_name_np(id, name);
 	return 0;
 }
 
-int rte_thread_getname(pthread_t id, char *name, size_t len)
+int rte_thread_getname(rte_thread_t id, char *name, size_t len)
 {
 	RTE_SET_USED(id);
 	RTE_SET_USED(name);
diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build
index 88a9eba12f..3b872f228f 100644
--- a/lib/eal/include/meson.build
+++ b/lib/eal/include/meson.build
@@ -40,6 +40,7 @@  headers += files(
         'rte_string_fns.h',
         'rte_tailq.h',
         'rte_thread.h',
+        'rte_thread_types.h',
         'rte_time.h',
         'rte_trace.h',
         'rte_trace_point.h',
diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h
index 1550b75da0..d5e004105d 100644
--- a/lib/eal/include/rte_lcore.h
+++ b/lib/eal/include/rte_lcore.h
@@ -374,7 +374,7 @@  rte_lcore_dump(FILE *f);
  * @return
  *   On success, return 0; otherwise return a negative value.
  */
-int rte_thread_setname(pthread_t id, const char *name);
+int rte_thread_setname(rte_thread_t id, const char *name);
 
 /**
  * Get thread name.
@@ -391,7 +391,7 @@  int rte_thread_setname(pthread_t id, const char *name);
  *   On success, return 0; otherwise return a negative value.
  */
 __rte_experimental
-int rte_thread_getname(pthread_t id, char *name, size_t len);
+int rte_thread_getname(rte_thread_t id, char *name, size_t len);
 
 /**
  * Register current non-EAL thread as a lcore.
@@ -440,8 +440,8 @@  rte_thread_unregister(void);
  *   corresponding to the error number.
  */
 int
-rte_ctrl_thread_create(pthread_t *thread, const char *name,
-		const pthread_attr_t *attr,
+rte_ctrl_thread_create(rte_thread_t *thread, const char *name,
+		const rte_thread_attr_t *attr,
 		void *(*start_routine)(void *), void *arg);
 
 #ifdef __cplusplus
diff --git a/lib/eal/include/rte_per_lcore.h b/lib/eal/include/rte_per_lcore.h
index eaedf0cb37..025d97f962 100644
--- a/lib/eal/include/rte_per_lcore.h
+++ b/lib/eal/include/rte_per_lcore.h
@@ -22,8 +22,6 @@ 
 extern "C" {
 #endif
 
-#include <pthread.h>
-
 /**
  * Macro to define a per lcore variable "var" of type "type", don't
  * use keywords like "static" or "volatile" in type, just prefix the
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index ba19fc6347..70357e099a 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -963,7 +963,6 @@  int
 rte_eal_init(int argc, char **argv)
 {
 	int i, fctret, ret;
-	pthread_t thread_id;
 	static uint32_t run_once;
 	uint32_t has_run = 0;
 	const char *p;
@@ -975,6 +974,8 @@  rte_eal_init(int argc, char **argv)
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	rte_thread_priority_init();
+
 	/* checks if the machine is adequate */
 	if (!rte_cpu_is_supported()) {
 		rte_eal_init_alert("unsupported cpu type.");
@@ -991,7 +992,6 @@  rte_eal_init(int argc, char **argv)
 
 	p = strrchr(argv[0], '/');
 	strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
-	thread_id = pthread_self();
 
 	eal_reset_internal_config(internal_conf);
 
@@ -1219,7 +1219,15 @@  rte_eal_init(int argc, char **argv)
 
 	eal_check_mem_on_local_socket();
 
-	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+	ret = rte_thread_set_priority(rte_thread_self(),
+				      internal_conf->thread_priority);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot set thread priority");
+		rte_errno = ret;
+		return -1;
+	}
+
+	if (rte_thread_set_affinity_by_id(rte_thread_self(),
 			&lcore_config[config->main_lcore].cpuset) != 0) {
 		rte_eal_init_alert("Cannot set affinity");
 		rte_errno = EINVAL;
@@ -1229,10 +1237,27 @@  rte_eal_init(int argc, char **argv)
 		&lcore_config[config->main_lcore].cpuset);
 
 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
-	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
-		config->main_lcore, (uintptr_t)thread_id, cpuset,
+	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (cpuset=[%s%s])\n",
+		config->main_lcore, cpuset,
 		ret == 0 ? "" : "...");
 
+	rte_thread_attr_t thread_attr;
+	ret = rte_thread_attr_init(&thread_attr);
+	if (ret != 0) {
+		RTE_LOG(DEBUG, EAL, "Cannot initialize thread attributes,"
+			"ret = %d\n", ret);
+		rte_errno = ret;
+		return -1;
+	}
+	ret = rte_thread_attr_set_priority(&thread_attr,
+					   internal_conf->thread_priority);
+	if (ret != 0) {
+		RTE_LOG(DEBUG, EAL, "Cannot set thread priority attribute,"
+			"ret = %d\n", ret);
+		rte_errno = ret;
+		return -1;
+	}
+
 	RTE_LCORE_FOREACH_WORKER(i) {
 
 		/*
@@ -1246,9 +1271,11 @@  rte_eal_init(int argc, char **argv)
 
 		lcore_config[i].state = WAIT;
 
+		rte_thread_attr_set_affinity(&thread_attr,
+					     &lcore_config[i].cpuset);
 		/* create a thread for each lcore */
-		ret = pthread_create(&lcore_config[i].thread_id, NULL,
-				     eal_thread_loop, NULL);
+		ret = rte_thread_create(&lcore_config[i].thread_id,
+					&thread_attr, eal_thread_loop, NULL);
 		if (ret != 0)
 			rte_panic("Cannot create thread\n");
 
@@ -1260,11 +1287,6 @@  rte_eal_init(int argc, char **argv)
 		if (ret != 0)
 			RTE_LOG(DEBUG, EAL,
 				"Cannot set name for lcore thread\n");
-
-		ret = pthread_setaffinity_np(lcore_config[i].thread_id,
-			sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
-		if (ret != 0)
-			rte_panic("Cannot set affinity\n");
 	}
 
 	/*
diff --git a/lib/eal/linux/eal_alarm.c b/lib/eal/linux/eal_alarm.c
index 3252c6fa59..fef22a347c 100644
--- a/lib/eal/linux/eal_alarm.c
+++ b/lib/eal/linux/eal_alarm.c
@@ -48,7 +48,7 @@  struct alarm_entry {
 	rte_eal_alarm_callback cb_fn;
 	void *cb_arg;
 	volatile uint8_t executing;
-	volatile pthread_t executing_id;
+	volatile rte_thread_t executing_id;
 };
 
 static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
@@ -86,7 +86,7 @@  eal_alarm_callback(void *arg __rte_unused)
 			(ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec &&
 						(ap->time.tv_usec * NS_PER_US) <= now.tv_nsec))) {
 		ap->executing = 1;
-		ap->executing_id = pthread_self();
+		ap->executing_id = rte_thread_self();
 		rte_spinlock_unlock(&alarm_list_lk);
 
 		ap->cb_fn(ap->cb_arg);
@@ -207,7 +207,8 @@  rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
 				/* If calling from other context, mark that alarm is executing
 				 * so loop can spin till it finish. Otherwise we are trying to
 				 * cancel our self - mark it by EINPROGRESS */
-				if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+				if (rte_thread_equal(ap->executing_id,
+						rte_thread_self()) == 0)
 					executing++;
 				else
 					err = EINPROGRESS;
@@ -228,7 +229,8 @@  rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
 					free(ap);
 					count++;
 					ap = ap_prev;
-				} else if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+				} else if (rte_thread_equal(ap->executing_id,
+						rte_thread_self()) == 0)
 					executing++;
 				else
 					err = EINPROGRESS;
diff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c
index 22b3b7bcd9..e106fa186c 100644
--- a/lib/eal/linux/eal_interrupts.c
+++ b/lib/eal/linux/eal_interrupts.c
@@ -97,7 +97,7 @@  static union intr_pipefds intr_pipe;
 static struct rte_intr_source_list intr_sources;
 
 /* interrupt handling thread */
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
 
 /* VFIO interrupts */
 #ifdef VFIO_PRESENT
@@ -1570,5 +1570,5 @@  rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
 
 int rte_thread_is_intr(void)
 {
-	return pthread_equal(intr_thread, pthread_self());
+	return rte_thread_equal(intr_thread, rte_thread_self());
 }
diff --git a/lib/eal/linux/eal_thread.c b/lib/eal/linux/eal_thread.c
index 83c2034b93..fe6b02788f 100644
--- a/lib/eal/linux/eal_thread.c
+++ b/lib/eal/linux/eal_thread.c
@@ -73,15 +73,15 @@  eal_thread_loop(__rte_unused void *arg)
 	char c;
 	int n, ret;
 	unsigned lcore_id;
-	pthread_t thread_id;
+	rte_thread_t thread_id;
 	int m2w, w2m;
 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
 
-	thread_id = pthread_self();
+	thread_id = rte_thread_self();
 
 	/* retrieve our lcore_id from the configuration structure */
 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		if (thread_id == lcore_config[lcore_id].thread_id)
+		if (rte_thread_equal(thread_id,lcore_config[lcore_id].thread_id))
 			break;
 	}
 	if (lcore_id == RTE_MAX_LCORE)
@@ -93,8 +93,8 @@  eal_thread_loop(__rte_unused void *arg)
 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
 
 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
-	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
-		lcore_id, (uintptr_t)thread_id, cpuset, ret == 0 ? "" : "...");
+	RTE_LOG(DEBUG, EAL, "lcore %u is ready (cpuset=[%s%s])\n",
+		lcore_id, cpuset, ret == 0 ? "" : "...");
 
 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
 
@@ -148,7 +148,7 @@  int rte_sys_gettid(void)
 	return (int)syscall(SYS_gettid);
 }
 
-int rte_thread_setname(pthread_t id, const char *name)
+int rte_thread_setname(rte_thread_t id, const char *name)
 {
 	int ret = ENOSYS;
 #if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
@@ -156,7 +156,7 @@  int rte_thread_setname(pthread_t id, const char *name)
 	char truncated[16];
 
 	strlcpy(truncated, name, sizeof(truncated));
-	ret = pthread_setname_np(id, truncated);
+	ret = pthread_setname_np(id.opaque_id, truncated);
 #endif
 #endif
 	RTE_SET_USED(id);
@@ -164,12 +164,12 @@  int rte_thread_setname(pthread_t id, const char *name)
 	return -ret;
 }
 
-int rte_thread_getname(pthread_t id, char *name, size_t len)
+int rte_thread_getname(rte_thread_t id, char *name, size_t len)
 {
 	int ret = ENOSYS;
 #if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
 #if __GLIBC_PREREQ(2, 12)
-	ret = pthread_getname_np(id, name, len);
+	ret = pthread_getname_np(id.opaque_id, name, len);
 #endif
 #endif
 	RTE_SET_USED(id);
diff --git a/lib/eal/linux/eal_timer.c b/lib/eal/linux/eal_timer.c
index 7cf15cabac..b4099a0aa2 100644
--- a/lib/eal/linux/eal_timer.c
+++ b/lib/eal/linux/eal_timer.c
@@ -80,7 +80,7 @@  static uint64_t eal_hpet_resolution_hz = 0;
 /* Incremented 4 times during one 32bits hpet full count */
 static uint32_t eal_hpet_msb;
 
-static pthread_t msb_inc_thread_id;
+static rte_thread_t msb_inc_thread_id;
 
 /*
  * This function runs on a specific thread to update a global variable
diff --git a/lib/eal/unix/meson.build b/lib/eal/unix/meson.build
index dc711b4240..f2b8063760 100644
--- a/lib/eal/unix/meson.build
+++ b/lib/eal/unix/meson.build
@@ -5,5 +5,4 @@  sources += files(
         'eal_file.c',
         'eal_unix_memory.c',
         'eal_unix_timer.c',
-        'rte_thread.c',
 )
diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c
deleted file mode 100644
index c72d619ec1..0000000000
--- a/lib/eal/unix/rte_thread.c
+++ /dev/null
@@ -1,92 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 Mellanox Technologies, Ltd
- */
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_errno.h>
-#include <rte_log.h>
-#include <rte_thread.h>
-
-struct eal_tls_key {
-	pthread_key_t thread_index;
-};
-
-int
-rte_thread_key_create(rte_thread_key *key, void (*destructor)(void *))
-{
-	int err;
-
-	*key = malloc(sizeof(**key));
-	if ((*key) == NULL) {
-		RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n");
-		rte_errno = ENOMEM;
-		return -1;
-	}
-	err = pthread_key_create(&((*key)->thread_index), destructor);
-	if (err) {
-		RTE_LOG(DEBUG, EAL, "pthread_key_create failed: %s\n",
-			 strerror(err));
-		free(*key);
-		rte_errno = ENOEXEC;
-		return -1;
-	}
-	return 0;
-}
-
-int
-rte_thread_key_delete(rte_thread_key key)
-{
-	int err;
-
-	if (!key) {
-		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
-		rte_errno = EINVAL;
-		return -1;
-	}
-	err = pthread_key_delete(key->thread_index);
-	if (err) {
-		RTE_LOG(DEBUG, EAL, "pthread_key_delete failed: %s\n",
-			 strerror(err));
-		free(key);
-		rte_errno = ENOEXEC;
-		return -1;
-	}
-	free(key);
-	return 0;
-}
-
-int
-rte_thread_value_set(rte_thread_key key, const void *value)
-{
-	int err;
-
-	if (!key) {
-		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
-		rte_errno = EINVAL;
-		return -1;
-	}
-	err = pthread_setspecific(key->thread_index, value);
-	if (err) {
-		RTE_LOG(DEBUG, EAL, "pthread_setspecific failed: %s\n",
-			strerror(err));
-		rte_errno = ENOEXEC;
-		return -1;
-	}
-	return 0;
-}
-
-void *
-rte_thread_value_get(rte_thread_key key)
-{
-	if (!key) {
-		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
-		rte_errno = EINVAL;
-		return NULL;
-	}
-	return pthread_getspecific(key->thread_index);
-}
diff --git a/lib/eal/version.map b/lib/eal/version.map
index fe5c3dac98..3126c77e4d 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -423,6 +423,28 @@  EXPERIMENTAL {
 	rte_version_release; # WINDOWS_NO_EXPORT
 	rte_version_suffix; # WINDOWS_NO_EXPORT
 	rte_version_year; # WINDOWS_NO_EXPORT
+
+	rte_thread_mutex_lock;
+	rte_thread_mutex_unlock;
+	rte_thread_mutex_init;
+	rte_thread_mutex_destroy;
+	rte_thread_create;
+	rte_thread_set_affinity_by_id;
+	rte_thread_get_affinity_by_id;
+	rte_thread_set_priority;
+	rte_thread_attr_init;
+	rte_thread_attr_set_affinity;
+	rte_thread_attr_get_affinity;
+	rte_thread_join;
+	rte_thread_self;
+	rte_thread_equal;
+	rte_thread_barrier_init;
+	rte_thread_barrier_wait;
+	rte_thread_barrier_destroy;
+	rte_thread_cancel; # WINDOWS_NO_EXPORT
+	rte_thread_detach;
+	rte_thread_attr_set_priority;
+
 };
 
 INTERNAL {
diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c
index 28c787c0b0..3e0834a24a 100644
--- a/lib/eal/windows/eal.c
+++ b/lib/eal/windows/eal.c
@@ -274,6 +274,8 @@  rte_eal_init(int argc, char **argv)
 		eal_get_internal_configuration();
 	int ret;
 
+	rte_thread_priority_init();
+
 	eal_log_init(NULL, 0);
 
 	eal_log_level_parse(argc, argv);
@@ -375,6 +377,36 @@  rte_eal_init(int argc, char **argv)
 		return -1;
 	}
 
+	if (rte_thread_set_affinity_by_id(rte_thread_self(),
+			&lcore_config[config->main_lcore].cpuset) != 0) {
+		rte_eal_init_alert("Cannot set affinity");
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	ret = rte_thread_set_priority(rte_thread_self(),
+				      internal_conf->thread_priority);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot set thread priority");
+		rte_errno = ret;
+		return -1;
+	}
+
+	rte_thread_attr_t thread_attr;
+	ret = rte_thread_attr_init(&thread_attr);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot initialize thread attributes");
+		rte_errno = ret;
+		return -1;
+	}
+	ret = rte_thread_attr_set_priority(&thread_attr,
+					   internal_conf->thread_priority);
+	if (ret != 0) {
+		rte_eal_init_alert("Cannot set thread priority attribute");
+		rte_errno = ret;
+		return -1;
+	}
+
 	RTE_LCORE_FOREACH_WORKER(i) {
 
 		/*
@@ -390,8 +422,17 @@  rte_eal_init(int argc, char **argv)
 
 		lcore_config[i].state = WAIT;
 
+		ret = rte_thread_attr_set_affinity(&thread_attr, &lcore_config[i].cpuset);
+		if (ret != 0) {
+			rte_eal_init_alert("Cannot set thread affinity attribute");
+			rte_errno = ret;
+			return -1;
+		}
+
 		/* create a thread for each lcore */
-		if (eal_thread_create(&lcore_config[i].thread_id) != 0)
+		ret = rte_thread_create(&lcore_config[i].thread_id,
+					&thread_attr, eal_thread_loop, NULL);
+		if (ret != 0)
 			rte_panic("Cannot create thread\n");
 	}
 
diff --git a/lib/eal/windows/eal_interrupts.c b/lib/eal/windows/eal_interrupts.c
index 1d4cf794df..a529ef95e3 100644
--- a/lib/eal/windows/eal_interrupts.c
+++ b/lib/eal/windows/eal_interrupts.c
@@ -2,12 +2,14 @@ 
  * Copyright 2020 Mellanox Technologies, Ltd
  */
 
+#include <inttypes.h>
+
 #include <rte_interrupts.h>
 
 #include "eal_private.h"
 #include "eal_windows.h"
 
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
 
 static HANDLE intr_iocp;
 
@@ -76,7 +78,7 @@  rte_eal_intr_init(void)
 int
 rte_thread_is_intr(void)
 {
-	return pthread_equal(intr_thread, pthread_self());
+	return rte_thread_equal(intr_thread, rte_thread_self());
 }
 
 int
@@ -92,9 +94,9 @@  eal_intr_thread_schedule(void (*func)(void *arg), void *arg)
 {
 	HANDLE handle;
 
-	handle = OpenThread(THREAD_ALL_ACCESS, FALSE, intr_thread);
+	handle = OpenThread(THREAD_ALL_ACCESS, FALSE, intr_thread.opaque_id);
 	if (handle == NULL) {
-		RTE_LOG_WIN32_ERR("OpenThread(%llu)", intr_thread);
+		RTE_LOG_WIN32_ERR("OpenThread (%" PRIuPTR ")", intr_thread.opaque_id);
 		return -ENOENT;
 	}
 
diff --git a/lib/eal/windows/eal_thread.c b/lib/eal/windows/eal_thread.c
index 9c3f6d69fd..0c6bca24cf 100644
--- a/lib/eal/windows/eal_thread.c
+++ b/lib/eal/windows/eal_thread.c
@@ -60,15 +60,15 @@  eal_thread_loop(void *arg __rte_unused)
 	char c;
 	int n, ret;
 	unsigned int lcore_id;
-	pthread_t thread_id;
+	rte_thread_t thread_id;
 	int m2w, w2m;
 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
 
-	thread_id = pthread_self();
+	thread_id = rte_thread_self();
 
 	/* retrieve our lcore_id from the configuration structure */
 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
-		if (thread_id == lcore_config[lcore_id].thread_id)
+		if (rte_thread_equal(thread_id, lcore_config[lcore_id].thread_id))
 			break;
 	}
 	if (lcore_id == RTE_MAX_LCORE)
@@ -80,7 +80,7 @@  eal_thread_loop(void *arg __rte_unused)
 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
 
 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s])\n",
-		lcore_id, (uintptr_t)thread_id, cpuset);
+		lcore_id, thread_id.opaque_id, cpuset);
 
 	/* read on our pipe to get commands */
 	while (1) {
@@ -122,24 +122,6 @@  eal_thread_loop(void *arg __rte_unused)
 	}
 }
 
-/* function to create threads */
-int
-eal_thread_create(pthread_t *thread)
-{
-	HANDLE th;
-
-	th = CreateThread(NULL, 0,
-		(LPTHREAD_START_ROUTINE)(ULONG_PTR)eal_thread_loop,
-						NULL, 0, (LPDWORD)thread);
-	if (!th)
-		return -1;
-
-	SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
-	SetThreadPriority(th, THREAD_PRIORITY_NORMAL);
-
-	return 0;
-}
-
 /* get current thread ID */
 int
 rte_sys_gettid(void)
@@ -148,7 +130,7 @@  rte_sys_gettid(void)
 }
 
 int
-rte_thread_setname(__rte_unused pthread_t id, __rte_unused const char *name)
+rte_thread_setname(__rte_unused rte_thread_t id, __rte_unused const char *name)
 {
 	/* TODO */
 	/* This is a stub, not the expected result */
diff --git a/lib/eal/windows/eal_windows.h b/lib/eal/windows/eal_windows.h
index dc5dc8240a..4b92b198c7 100644
--- a/lib/eal/windows/eal_windows.h
+++ b/lib/eal/windows/eal_windows.h
@@ -35,16 +35,6 @@ 
  */
 int eal_create_cpu_map(void);
 
-/**
- * Create a thread.
- *
- * @param thread
- *   The location to store the thread id if successful.
- * @return
- *   0 for success, -1 if the thread is not created.
- */
-int eal_thread_create(pthread_t *thread);
-
 /**
  * Get system NUMA node number for a socket ID.
  *
diff --git a/lib/eal/windows/include/meson.build b/lib/eal/windows/include/meson.build
index b3534b025f..7d9b3393e4 100644
--- a/lib/eal/windows/include/meson.build
+++ b/lib/eal/windows/include/meson.build
@@ -7,4 +7,5 @@  headers += files(
         'rte_os.h',
         'rte_virt2phys.h',
         'rte_windows.h',
+        'rte_windows_thread_types.h',
 )
diff --git a/lib/eal/windows/include/pthread.h b/lib/eal/windows/include/pthread.h
deleted file mode 100644
index 27fd2cca52..0000000000
--- a/lib/eal/windows/include/pthread.h
+++ /dev/null
@@ -1,192 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _PTHREAD_H_
-#define _PTHREAD_H_
-
-#include <stdint.h>
-#include <sched.h>
-
-/**
- * This file is required to support the common code in eal_common_proc.c,
- * eal_common_thread.c and common\include\rte_per_lcore.h as Microsoft libc
- * does not contain pthread.h. This may be removed in future releases.
- */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include <rte_windows.h>
-
-#define PTHREAD_BARRIER_SERIAL_THREAD TRUE
-
-/* defining pthread_t type on Windows since there is no in Microsoft libc*/
-typedef uintptr_t pthread_t;
-
-/* defining pthread_attr_t type on Windows since there is no in Microsoft libc*/
-typedef void *pthread_attr_t;
-
-typedef void *pthread_mutexattr_t;
-
-typedef CRITICAL_SECTION pthread_mutex_t;
-
-typedef SYNCHRONIZATION_BARRIER pthread_barrier_t;
-
-#define pthread_barrier_init(barrier, attr, count) \
-	!InitializeSynchronizationBarrier(barrier, count, -1)
-#define pthread_barrier_wait(barrier) EnterSynchronizationBarrier(barrier, \
-	SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY)
-#define pthread_barrier_destroy(barrier) \
-	!DeleteSynchronizationBarrier(barrier)
-#define pthread_cancel(thread) !TerminateThread((HANDLE) thread, 0)
-
-/* pthread function overrides */
-#define pthread_self() \
-	((pthread_t)GetCurrentThreadId())
-
-
-static inline int
-pthread_equal(pthread_t t1, pthread_t t2)
-{
-	return t1 == t2;
-}
-
-static inline int
-pthread_setaffinity_np(pthread_t threadid, size_t cpuset_size,
-			rte_cpuset_t *cpuset)
-{
-	DWORD_PTR ret = 0;
-	HANDLE thread_handle;
-
-	if (cpuset == NULL || cpuset_size == 0)
-		return -1;
-
-	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid);
-	if (thread_handle == NULL) {
-		RTE_LOG_WIN32_ERR("OpenThread()");
-		return -1;
-	}
-
-	ret = SetThreadAffinityMask(thread_handle, *cpuset->_bits);
-	if (ret == 0) {
-		RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
-		goto close_handle;
-	}
-
-close_handle:
-	if (CloseHandle(thread_handle) == 0) {
-		RTE_LOG_WIN32_ERR("CloseHandle()");
-		return -1;
-	}
-	return (ret == 0) ? -1 : 0;
-}
-
-static inline int
-pthread_getaffinity_np(pthread_t threadid, size_t cpuset_size,
-			rte_cpuset_t *cpuset)
-{
-	/* Workaround for the lack of a GetThreadAffinityMask()
-	 *API in Windows
-	 */
-	DWORD_PTR prev_affinity_mask;
-	HANDLE thread_handle;
-	DWORD_PTR ret = 0;
-
-	if (cpuset == NULL || cpuset_size == 0)
-		return -1;
-
-	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid);
-	if (thread_handle == NULL) {
-		RTE_LOG_WIN32_ERR("OpenThread()");
-		return -1;
-	}
-
-	/* obtain previous mask by setting dummy mask */
-	prev_affinity_mask = SetThreadAffinityMask(thread_handle, 0x1);
-	if (prev_affinity_mask == 0) {
-		RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
-		goto close_handle;
-	}
-
-	/* set it back! */
-	ret = SetThreadAffinityMask(thread_handle, prev_affinity_mask);
-	if (ret == 0) {
-		RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
-		goto close_handle;
-	}
-
-	memset(cpuset, 0, cpuset_size);
-	*cpuset->_bits = prev_affinity_mask;
-
-close_handle:
-	if (CloseHandle(thread_handle) == 0) {
-		RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
-		return -1;
-	}
-	return (ret == 0) ? -1 : 0;
-}
-
-static inline int
-pthread_create(void *threadid, const void *threadattr, void *threadfunc,
-		void *args)
-{
-	RTE_SET_USED(threadattr);
-	HANDLE hThread;
-	hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc,
-		args, 0, (LPDWORD)threadid);
-	if (hThread) {
-		SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
-		SetThreadPriority(hThread, THREAD_PRIORITY_NORMAL);
-	}
-	return ((hThread != NULL) ? 0 : E_FAIL);
-}
-
-static inline int
-pthread_detach(__rte_unused pthread_t thread)
-{
-	return 0;
-}
-
-static inline int
-pthread_join(__rte_unused pthread_t thread,
-	__rte_unused void **value_ptr)
-{
-	return 0;
-}
-
-static inline int
-pthread_mutex_init(pthread_mutex_t *mutex,
-		   __rte_unused pthread_mutexattr_t *attr)
-{
-	InitializeCriticalSection(mutex);
-	return 0;
-}
-
-static inline int
-pthread_mutex_lock(pthread_mutex_t *mutex)
-{
-	EnterCriticalSection(mutex);
-	return 0;
-}
-
-static inline int
-pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
-	LeaveCriticalSection(mutex);
-	return 0;
-}
-
-static inline int
-pthread_mutex_destroy(pthread_mutex_t *mutex)
-{
-	DeleteCriticalSection(mutex);
-	return 0;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PTHREAD_H_ */
diff --git a/lib/eal/windows/include/sched.h b/lib/eal/windows/include/sched.h
index ff572b5dcb..8f0b3cb71e 100644
--- a/lib/eal/windows/include/sched.h
+++ b/lib/eal/windows/include/sched.h
@@ -44,7 +44,7 @@  typedef struct _rte_cpuset_s {
 	(1LL << _WHICH_BIT(b))) != 0LL)
 
 static inline int
-count_cpu(rte_cpuset_t *s)
+count_cpu(const rte_cpuset_t *s)
 {
 	unsigned int _i;
 	int count = 0;
diff --git a/lib/eal/windows/meson.build b/lib/eal/windows/meson.build
index ff9cbec417..4b7db4754b 100644
--- a/lib/eal/windows/meson.build
+++ b/lib/eal/windows/meson.build
@@ -19,7 +19,12 @@  sources += files(
         'eal_timer.c',
         'fnmatch.c',
         'getopt.c',
-        'rte_thread.c',
 )
 
+if get_option('use_external_thread_lib')
+	sources += 'eal/common/rte_thread.c'
+else
+	sources += 'eal/windows/rte_thread.c'
+endif
+
 dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index c607eabb5b..bc0a4c973a 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -526,7 +526,7 @@  rte_eth_dev_allocate(const char *name)
 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
 	eth_dev->data->port_id = port_id;
 	eth_dev->data->mtu = RTE_ETHER_MTU;
-	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
+	rte_thread_mutex_init(&eth_dev->data->flow_ops_mutex);
 
 unlock:
 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
@@ -600,7 +600,7 @@  rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
 		rte_free(eth_dev->data->mac_addrs);
 		rte_free(eth_dev->data->hash_mac_addrs);
 		rte_free(eth_dev->data->dev_private);
-		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
+		rte_thread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
 	}
 
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index 4679d948fa..ad1053b561 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -5,7 +5,8 @@ 
 #ifndef _RTE_ETHDEV_CORE_H_
 #define _RTE_ETHDEV_CORE_H_
 
-#include <pthread.h>
+#include <rte_thread.h>
+#include <sys/types.h>
 
 /**
  * @file
@@ -182,7 +183,7 @@  struct rte_eth_dev_data {
 			 *   Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags.
 			 */
 
-	pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */
+	rte_thread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */
 	uint64_t reserved_64s[4]; /**< Reserved for future fields */
 	void *reserved_ptrs[4];   /**< Reserved for future fields */
 } __rte_cache_aligned;
diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c
index 8cb7a069c8..4f112d1af7 100644
--- a/lib/ethdev/rte_flow.c
+++ b/lib/ethdev/rte_flow.c
@@ -226,14 +226,14 @@  static inline void
 fts_enter(struct rte_eth_dev *dev)
 {
 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
-		pthread_mutex_lock(&dev->data->flow_ops_mutex);
+		rte_thread_mutex_lock(&dev->data->flow_ops_mutex);
 }
 
 static inline void
 fts_exit(struct rte_eth_dev *dev)
 {
 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
-		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
+		rte_thread_mutex_unlock(&dev->data->flow_ops_mutex);
 }
 
 static int
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 13dfb28401..be39cf9089 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -147,7 +147,7 @@  struct rte_event_eth_rx_adapter {
 	/* Count of interrupt vectors in use */
 	uint32_t num_intr_vec;
 	/* Thread blocked on Rx interrupts */
-	pthread_t rx_intr_thread;
+	rte_thread_t rx_intr_thread;
 	/* Configuration callback for rte_service configuration */
 	rte_event_eth_rx_adapter_conf_cb conf_cb;
 	/* Configuration callback argument */
@@ -1449,12 +1449,12 @@  rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 {
 	int err;
 
-	err = pthread_cancel(rx_adapter->rx_intr_thread);
+	err = rte_thread_cancel(rx_adapter->rx_intr_thread);
 	if (err)
 		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
 				err);
 
-	err = pthread_join(rx_adapter->rx_intr_thread, NULL);
+	err = rte_thread_join(rx_adapter->rx_intr_thread, NULL);
 	if (err)
 		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
 
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 55d4856f9e..b97774ccd4 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -61,9 +61,9 @@  fdset_shrink_nolock(struct fdset *pfdset)
 static void
 fdset_shrink(struct fdset *pfdset)
 {
-	pthread_mutex_lock(&pfdset->fd_mutex);
+	rte_thread_mutex_lock(&pfdset->fd_mutex);
 	fdset_shrink_nolock(pfdset);
-	pthread_mutex_unlock(&pfdset->fd_mutex);
+	rte_thread_mutex_unlock(&pfdset->fd_mutex);
 }
 
 /**
@@ -126,21 +126,21 @@  fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
 	if (pfdset == NULL || fd == -1)
 		return -1;
 
-	pthread_mutex_lock(&pfdset->fd_mutex);
+	rte_thread_mutex_lock(&pfdset->fd_mutex);
 	i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
 	if (i == -1) {
-		pthread_mutex_lock(&pfdset->fd_pooling_mutex);
+		rte_thread_mutex_lock(&pfdset->fd_pooling_mutex);
 		fdset_shrink_nolock(pfdset);
-		pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
+		rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex);
 		i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
 		if (i == -1) {
-			pthread_mutex_unlock(&pfdset->fd_mutex);
+			rte_thread_mutex_unlock(&pfdset->fd_mutex);
 			return -2;
 		}
 	}
 
 	fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
-	pthread_mutex_unlock(&pfdset->fd_mutex);
+	rte_thread_mutex_unlock(&pfdset->fd_mutex);
 
 	return 0;
 }
@@ -159,7 +159,7 @@  fdset_del(struct fdset *pfdset, int fd)
 		return NULL;
 
 	do {
-		pthread_mutex_lock(&pfdset->fd_mutex);
+		rte_thread_mutex_lock(&pfdset->fd_mutex);
 
 		i = fdset_find_fd(pfdset, fd);
 		if (i != -1 && pfdset->fd[i].busy == 0) {
@@ -170,7 +170,7 @@  fdset_del(struct fdset *pfdset, int fd)
 			pfdset->fd[i].dat = NULL;
 			i = -1;
 		}
-		pthread_mutex_unlock(&pfdset->fd_mutex);
+		rte_thread_mutex_unlock(&pfdset->fd_mutex);
 	} while (i != -1);
 
 	return dat;
@@ -192,10 +192,10 @@  fdset_try_del(struct fdset *pfdset, int fd)
 	if (pfdset == NULL || fd == -1)
 		return -2;
 
-	pthread_mutex_lock(&pfdset->fd_mutex);
+	rte_thread_mutex_lock(&pfdset->fd_mutex);
 	i = fdset_find_fd(pfdset, fd);
 	if (i != -1 && pfdset->fd[i].busy) {
-		pthread_mutex_unlock(&pfdset->fd_mutex);
+		rte_thread_mutex_unlock(&pfdset->fd_mutex);
 		return -1;
 	}
 
@@ -205,7 +205,7 @@  fdset_try_del(struct fdset *pfdset, int fd)
 		pfdset->fd[i].dat = NULL;
 	}
 
-	pthread_mutex_unlock(&pfdset->fd_mutex);
+	rte_thread_mutex_unlock(&pfdset->fd_mutex);
 	return 0;
 }
 
@@ -244,19 +244,19 @@  fdset_event_dispatch(void *arg)
 		 * might have been updated. It is ok if there is unwanted call
 		 * for new listenfds.
 		 */
-		pthread_mutex_lock(&pfdset->fd_mutex);
+		rte_thread_mutex_lock(&pfdset->fd_mutex);
 		numfds = pfdset->num;
-		pthread_mutex_unlock(&pfdset->fd_mutex);
+		rte_thread_mutex_unlock(&pfdset->fd_mutex);
 
-		pthread_mutex_lock(&pfdset->fd_pooling_mutex);
+		rte_thread_mutex_lock(&pfdset->fd_pooling_mutex);
 		val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
-		pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
+		rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex);
 		if (val < 0)
 			continue;
 
 		need_shrink = 0;
 		for (i = 0; i < numfds; i++) {
-			pthread_mutex_lock(&pfdset->fd_mutex);
+			rte_thread_mutex_lock(&pfdset->fd_mutex);
 
 			pfdentry = &pfdset->fd[i];
 			fd = pfdentry->fd;
@@ -264,12 +264,12 @@  fdset_event_dispatch(void *arg)
 
 			if (fd < 0) {
 				need_shrink = 1;
-				pthread_mutex_unlock(&pfdset->fd_mutex);
+				rte_thread_mutex_unlock(&pfdset->fd_mutex);
 				continue;
 			}
 
 			if (!pfd->revents) {
-				pthread_mutex_unlock(&pfdset->fd_mutex);
+				rte_thread_mutex_unlock(&pfdset->fd_mutex);
 				continue;
 			}
 
@@ -280,7 +280,7 @@  fdset_event_dispatch(void *arg)
 			dat = pfdentry->dat;
 			pfdentry->busy = 1;
 
-			pthread_mutex_unlock(&pfdset->fd_mutex);
+			rte_thread_mutex_unlock(&pfdset->fd_mutex);
 
 			if (rcb && pfd->revents & (POLLIN | FDPOLLERR))
 				rcb(fd, dat, &remove1);
diff --git a/lib/vhost/fd_man.h b/lib/vhost/fd_man.h
index 3ab5cfdd60..ba58d849e8 100644
--- a/lib/vhost/fd_man.h
+++ b/lib/vhost/fd_man.h
@@ -5,7 +5,7 @@ 
 #ifndef _FD_MAN_H_
 #define _FD_MAN_H_
 #include <stdint.h>
-#include <pthread.h>
+#include <rte_thread.h>
 #include <poll.h>
 
 #define MAX_FDS 1024
@@ -23,8 +23,8 @@  struct fdentry {
 struct fdset {
 	struct pollfd rwfds[MAX_FDS];
 	struct fdentry fd[MAX_FDS];
-	pthread_mutex_t fd_mutex;
-	pthread_mutex_t fd_pooling_mutex;
+	rte_thread_mutex_t fd_mutex;
+	rte_thread_mutex_t fd_pooling_mutex;
 	int num;	/* current fd number of this fdset */
 
 	union pipefds {
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index 5d0d728d52..36a430ae25 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -14,7 +14,7 @@ 
 #include <sys/queue.h>
 #include <errno.h>
 #include <fcntl.h>
-#include <pthread.h>
+#include <rte_thread.h>
 
 #include <rte_log.h>
 
@@ -31,7 +31,7 @@  TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
  */
 struct vhost_user_socket {
 	struct vhost_user_connection_list conn_list;
-	pthread_mutex_t conn_mutex;
+	rte_thread_mutex_t conn_mutex;
 	char *path;
 	int socket_fd;
 	struct sockaddr_un un;
@@ -74,7 +74,7 @@  struct vhost_user {
 	struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
 	struct fdset fdset;
 	int vsocket_cnt;
-	pthread_mutex_t mutex;
+	rte_thread_mutex_t mutex;
 };
 
 #define MAX_VIRTIO_BACKLOG 128
@@ -87,12 +87,12 @@  static int vhost_user_start_client(struct vhost_user_socket *vsocket);
 static struct vhost_user vhost_user = {
 	.fdset = {
 		.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
-		.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
-		.fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
+		.fd_mutex = RTE_THREAD_MUTEX_INITIALIZER,
+		.fd_pooling_mutex = RTE_THREAD_MUTEX_INITIALIZER,
 		.num = 0
 	},
 	.vsocket_cnt = 0,
-	.mutex = PTHREAD_MUTEX_INITIALIZER,
+	.mutex = RTE_THREAD_MUTEX_INITIALIZER,
 };
 
 /*
@@ -271,9 +271,9 @@  vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 		goto err_cleanup;
 	}
 
-	pthread_mutex_lock(&vsocket->conn_mutex);
+	rte_thread_mutex_lock(&vsocket->conn_mutex);
 	TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
-	pthread_mutex_unlock(&vsocket->conn_mutex);
+	rte_thread_mutex_unlock(&vsocket->conn_mutex);
 
 	fdset_pipe_notify(&vhost_user.fdset);
 	return;
@@ -326,9 +326,9 @@  vhost_user_read_cb(int connfd, void *dat, int *remove)
 			vhost_user_start_client(vsocket);
 		}
 
-		pthread_mutex_lock(&vsocket->conn_mutex);
+		rte_thread_mutex_lock(&vsocket->conn_mutex);
 		TAILQ_REMOVE(&vsocket->conn_list, conn, next);
-		pthread_mutex_unlock(&vsocket->conn_mutex);
+		rte_thread_mutex_unlock(&vsocket->conn_mutex);
 
 		free(conn);
 	}
@@ -420,11 +420,11 @@  struct vhost_user_reconnect {
 TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
 struct vhost_user_reconnect_list {
 	struct vhost_user_reconnect_tailq_list head;
-	pthread_mutex_t mutex;
+	rte_thread_mutex_t mutex;
 };
 
 static struct vhost_user_reconnect_list reconn_list;
-static pthread_t reconn_tid;
+static rte_thread_t reconn_tid;
 
 static int
 vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
@@ -456,7 +456,7 @@  vhost_user_client_reconnect(void *arg __rte_unused)
 	struct vhost_user_reconnect *reconn, *next;
 
 	while (1) {
-		pthread_mutex_lock(&reconn_list.mutex);
+		rte_thread_mutex_lock(&reconn_list.mutex);
 
 		/*
 		 * An equal implementation of TAILQ_FOREACH_SAFE,
@@ -487,7 +487,7 @@  vhost_user_client_reconnect(void *arg __rte_unused)
 			free(reconn);
 		}
 
-		pthread_mutex_unlock(&reconn_list.mutex);
+		rte_thread_mutex_unlock(&reconn_list.mutex);
 		sleep(1);
 	}
 
@@ -499,7 +499,7 @@  vhost_user_reconnect_init(void)
 {
 	int ret;
 
-	ret = pthread_mutex_init(&reconn_list.mutex, NULL);
+	ret = rte_thread_mutex_init(&reconn_list.mutex);
 	if (ret < 0) {
 		VHOST_LOG_CONFIG(ERR, "failed to initialize mutex");
 		return ret;
@@ -510,7 +510,7 @@  vhost_user_reconnect_init(void)
 			     vhost_user_client_reconnect, NULL);
 	if (ret != 0) {
 		VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread");
-		if (pthread_mutex_destroy(&reconn_list.mutex)) {
+		if (rte_thread_mutex_destroy(&reconn_list.mutex)) {
 			VHOST_LOG_CONFIG(ERR,
 				"failed to destroy reconnect mutex");
 		}
@@ -554,9 +554,9 @@  vhost_user_start_client(struct vhost_user_socket *vsocket)
 	reconn->un = vsocket->un;
 	reconn->fd = fd;
 	reconn->vsocket = vsocket;
-	pthread_mutex_lock(&reconn_list.mutex);
+	rte_thread_mutex_lock(&reconn_list.mutex);
 	TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
-	pthread_mutex_unlock(&reconn_list.mutex);
+	rte_thread_mutex_unlock(&reconn_list.mutex);
 
 	return 0;
 }
@@ -588,11 +588,11 @@  rte_vhost_driver_attach_vdpa_device(const char *path,
 	if (dev == NULL || path == NULL)
 		return -1;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket)
 		vsocket->vdpa_dev = dev;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -602,11 +602,11 @@  rte_vhost_driver_detach_vdpa_device(const char *path)
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket)
 		vsocket->vdpa_dev = NULL;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -617,11 +617,11 @@  rte_vhost_driver_get_vdpa_device(const char *path)
 	struct vhost_user_socket *vsocket;
 	struct rte_vdpa_device *dev = NULL;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket)
 		dev = vsocket->vdpa_dev;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return dev;
 }
@@ -631,7 +631,7 @@  rte_vhost_driver_disable_features(const char *path, uint64_t features)
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 
 	/* Note that use_builtin_virtio_net is not affected by this function
@@ -641,7 +641,7 @@  rte_vhost_driver_disable_features(const char *path, uint64_t features)
 
 	if (vsocket)
 		vsocket->features &= ~features;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -651,7 +651,7 @@  rte_vhost_driver_enable_features(const char *path, uint64_t features)
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket) {
 		if ((vsocket->supported_features & features) != features) {
@@ -659,12 +659,12 @@  rte_vhost_driver_enable_features(const char *path, uint64_t features)
 			 * trying to enable features the driver doesn't
 			 * support.
 			 */
-			pthread_mutex_unlock(&vhost_user.mutex);
+			rte_thread_mutex_unlock(&vhost_user.mutex);
 			return -1;
 		}
 		vsocket->features |= features;
 	}
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -674,7 +674,7 @@  rte_vhost_driver_set_features(const char *path, uint64_t features)
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket) {
 		vsocket->supported_features = features;
@@ -685,7 +685,7 @@  rte_vhost_driver_set_features(const char *path, uint64_t features)
 		 */
 		vsocket->use_builtin_virtio_net = false;
 	}
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -698,7 +698,7 @@  rte_vhost_driver_get_features(const char *path, uint64_t *features)
 	struct rte_vdpa_device *vdpa_dev;
 	int ret = 0;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
 		VHOST_LOG_CONFIG(ERR,
@@ -724,7 +724,7 @@  rte_vhost_driver_get_features(const char *path, uint64_t *features)
 	*features = vsocket->features & vdpa_features;
 
 unlock_exit:
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 	return ret;
 }
 
@@ -734,11 +734,11 @@  rte_vhost_driver_set_protocol_features(const char *path,
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket)
 		vsocket->protocol_features = protocol_features;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 	return vsocket ? 0 : -1;
 }
 
@@ -751,7 +751,7 @@  rte_vhost_driver_get_protocol_features(const char *path,
 	struct rte_vdpa_device *vdpa_dev;
 	int ret = 0;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
 		VHOST_LOG_CONFIG(ERR,
@@ -779,7 +779,7 @@  rte_vhost_driver_get_protocol_features(const char *path,
 		& vdpa_protocol_features;
 
 unlock_exit:
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 	return ret;
 }
 
@@ -791,7 +791,7 @@  rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
 	struct rte_vdpa_device *vdpa_dev;
 	int ret = 0;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
 		VHOST_LOG_CONFIG(ERR,
@@ -817,7 +817,7 @@  rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
 	*queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
 
 unlock_exit:
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 	return ret;
 }
 
@@ -849,7 +849,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 	if (!path)
 		return -1;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 
 	if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
 		VHOST_LOG_CONFIG(ERR,
@@ -869,7 +869,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 		goto out;
 	}
 	TAILQ_INIT(&vsocket->conn_list);
-	ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
+	ret = rte_thread_mutex_init(&vsocket->conn_mutex);
 	if (ret) {
 		VHOST_LOG_CONFIG(ERR,
 			"error: failed to init connection mutex\n");
@@ -951,7 +951,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 
 	if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
 		vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
-		if (vsocket->reconnect && reconn_tid == 0) {
+		if (vsocket->reconnect && reconn_tid.opaque_id == 0) {
 			if (vhost_user_reconnect_init() != 0)
 				goto out_mutex;
 		}
@@ -965,18 +965,18 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 
 	vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
 
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 	return ret;
 
 out_mutex:
-	if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
+	if (rte_thread_mutex_destroy(&vsocket->conn_mutex)) {
 		VHOST_LOG_CONFIG(ERR,
 			"error: failed to destroy connection mutex\n");
 	}
 out_free:
 	vhost_user_socket_mem_free(vsocket);
 out:
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return ret;
 }
@@ -987,7 +987,7 @@  vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
 	int found = false;
 	struct vhost_user_reconnect *reconn, *next;
 
-	pthread_mutex_lock(&reconn_list.mutex);
+	rte_thread_mutex_lock(&reconn_list.mutex);
 
 	for (reconn = TAILQ_FIRST(&reconn_list.head);
 	     reconn != NULL; reconn = next) {
@@ -1001,7 +1001,7 @@  vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
 			break;
 		}
 	}
-	pthread_mutex_unlock(&reconn_list.mutex);
+	rte_thread_mutex_unlock(&reconn_list.mutex);
 	return found;
 }
 
@@ -1019,13 +1019,13 @@  rte_vhost_driver_unregister(const char *path)
 		return -1;
 
 again:
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 
 	for (i = 0; i < vhost_user.vsocket_cnt; i++) {
 		struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
 
 		if (!strcmp(vsocket->path, path)) {
-			pthread_mutex_lock(&vsocket->conn_mutex);
+			rte_thread_mutex_lock(&vsocket->conn_mutex);
 			for (conn = TAILQ_FIRST(&vsocket->conn_list);
 			     conn != NULL;
 			     conn = next) {
@@ -1039,9 +1039,10 @@  rte_vhost_driver_unregister(const char *path)
 				 */
 				if (fdset_try_del(&vhost_user.fdset,
 						  conn->connfd) == -1) {
-					pthread_mutex_unlock(
+					rte_thread_mutex_unlock(
 							&vsocket->conn_mutex);
-					pthread_mutex_unlock(&vhost_user.mutex);
+					rte_thread_mutex_unlock(
+							&vhost_user.mutex);
 					goto again;
 				}
 
@@ -1053,7 +1054,7 @@  rte_vhost_driver_unregister(const char *path)
 				TAILQ_REMOVE(&vsocket->conn_list, conn, next);
 				free(conn);
 			}
-			pthread_mutex_unlock(&vsocket->conn_mutex);
+			rte_thread_mutex_unlock(&vsocket->conn_mutex);
 
 			if (vsocket->is_server) {
 				/*
@@ -1063,7 +1064,8 @@  rte_vhost_driver_unregister(const char *path)
 				 */
 				if (fdset_try_del(&vhost_user.fdset,
 						vsocket->socket_fd) == -1) {
-					pthread_mutex_unlock(&vhost_user.mutex);
+					rte_thread_mutex_unlock(
+							&vhost_user.mutex);
 					goto again;
 				}
 
@@ -1073,18 +1075,18 @@  rte_vhost_driver_unregister(const char *path)
 				vhost_user_remove_reconnect(vsocket);
 			}
 
-			pthread_mutex_destroy(&vsocket->conn_mutex);
+			rte_thread_mutex_destroy(&vsocket->conn_mutex);
 			vhost_user_socket_mem_free(vsocket);
 
 			count = --vhost_user.vsocket_cnt;
 			vhost_user.vsockets[i] = vhost_user.vsockets[count];
 			vhost_user.vsockets[count] = NULL;
-			pthread_mutex_unlock(&vhost_user.mutex);
+			rte_thread_mutex_unlock(&vhost_user.mutex);
 
 			return 0;
 		}
 	}
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return -1;
 }
@@ -1098,11 +1100,11 @@  rte_vhost_driver_callback_register(const char *path,
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (vsocket)
 		vsocket->notify_ops = ops;
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? 0 : -1;
 }
@@ -1112,9 +1114,9 @@  vhost_driver_callback_get(const char *path)
 {
 	struct vhost_user_socket *vsocket;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	return vsocket ? vsocket->notify_ops : NULL;
 }
@@ -1123,16 +1125,16 @@  int
 rte_vhost_driver_start(const char *path)
 {
 	struct vhost_user_socket *vsocket;
-	static pthread_t fdset_tid;
+	static rte_thread_t fdset_tid;
 
-	pthread_mutex_lock(&vhost_user.mutex);
+	rte_thread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
-	pthread_mutex_unlock(&vhost_user.mutex);
+	rte_thread_mutex_unlock(&vhost_user.mutex);
 
 	if (!vsocket)
 		return -1;
 
-	if (fdset_tid == 0) {
+	if (fdset_tid.opaque_id == 0) {
 		/**
 		 * create a pipe which will be waited by poll and notified to
 		 * rebuild the wait list of poll.
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index c96f6335c8..ee7470bf8b 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -26,7 +26,7 @@ 
 #include "vhost_user.h"
 
 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
+rte_thread_mutex_t vhost_dev_lock = RTE_THREAD_MUTEX_INITIALIZER;
 
 /* Called with iotlb_lock read-locked */
 uint64_t
@@ -657,7 +657,7 @@  vhost_new_device(void)
 	struct virtio_net *dev;
 	int i;
 
-	pthread_mutex_lock(&vhost_dev_lock);
+	rte_thread_mutex_lock(&vhost_dev_lock);
 	for (i = 0; i < MAX_VHOST_DEVICE; i++) {
 		if (vhost_devices[i] == NULL)
 			break;
@@ -666,7 +666,7 @@  vhost_new_device(void)
 	if (i == MAX_VHOST_DEVICE) {
 		VHOST_LOG_CONFIG(ERR,
 			"Failed to find a free slot for new device.\n");
-		pthread_mutex_unlock(&vhost_dev_lock);
+		rte_thread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
 
@@ -674,12 +674,12 @@  vhost_new_device(void)
 	if (dev == NULL) {
 		VHOST_LOG_CONFIG(ERR,
 			"Failed to allocate memory for new dev.\n");
-		pthread_mutex_unlock(&vhost_dev_lock);
+		rte_thread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
 
 	vhost_devices[i] = dev;
-	pthread_mutex_unlock(&vhost_dev_lock);
+	rte_thread_mutex_unlock(&vhost_dev_lock);
 
 	dev->vid = i;
 	dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
diff --git a/meson_options.txt b/meson_options.txt
index 56bdfd0f0a..46d156cf2f 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -42,5 +42,7 @@  option('enable_trace_fp', type: 'boolean', value: false, description:
        'enable fast path trace points.')
 option('tests', type: 'boolean', value: true, description:
        'build unit tests')
+option('use_external_thread_lib', type: 'boolean', value: false,
+	description: 'use an external thread library')
 option('use_hpet', type: 'boolean', value: false, description:
        'use HPET timer in EAL')