@@ -26,7 +26,7 @@
#ifdef RTE_LIB_PDUMP
#ifdef RTE_NET_RING
-#include <pthread.h>
+#include <rte_thread.h>
extern void *send_pkts(void *empty);
extern uint16_t flag_for_send_pkts;
#endif
@@ -47,7 +47,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
char path[32];
#ifdef RTE_LIB_PDUMP
#ifdef RTE_NET_RING
- pthread_t thread;
+ rte_thread_t thread;
int rc;
#endif
#endif
@@ -134,7 +134,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
#ifdef RTE_LIB_PDUMP
#ifdef RTE_NET_RING
if ((strcmp(env_value, "run_pdump_server_tests") == 0)) {
- rc = pthread_create(&thread, NULL, &send_pkts, NULL);
+ rc = rte_thread_create(&thread, NULL, &send_pkts, NULL);
if (rc != 0) {
rte_panic("Cannot start send pkts thread: %s\n",
strerror(rc));
@@ -149,7 +149,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
#ifdef RTE_NET_RING
if ((strcmp(env_value, "run_pdump_server_tests") == 0)) {
flag_for_send_pkts = 0;
- pthread_join(thread, NULL);
+ rte_thread_join(thread, NULL);
}
#endif
#endif
@@ -2,7 +2,7 @@
* Copyright (c) 2020 Red Hat, Inc.
*/
-#include <pthread.h>
+#include <rte_thread.h>
#include <string.h>
#include <rte_common.h>
@@ -14,7 +14,7 @@
struct thread_context {
enum { INIT, ERROR, DONE } state;
bool lcore_id_any;
- pthread_t id;
+ rte_thread_t id;
unsigned int *registered_count;
};
@@ -77,7 +77,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = false;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
break;
non_eal_threads_count++;
}
@@ -96,7 +96,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = true;
- if (pthread_create(&t->id, NULL, thread_loop, t) == 0) {
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -110,7 +110,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
if (t->state != DONE)
ret = -1;
}
@@ -262,7 +262,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = false;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -285,7 +285,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = true;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -309,7 +309,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
if (t->state != DONE)
ret = -1;
}
@@ -330,7 +330,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
__atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
}
error:
if (handle[1] != NULL)
@@ -3,6 +3,7 @@
*/
#include "unistd.h"
+#include <pthread.h>
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
@@ -203,7 +204,7 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
static int slaves_initialized;
static int mac_slaves_initialized;
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(mutex)
static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER;
@@ -1191,11 +1192,11 @@ test_bonding_lsc_event_callback(uint16_t port_id __rte_unused,
void *param __rte_unused,
void *ret_param __rte_unused)
{
- pthread_mutex_lock(&mutex);
+ rte_thread_mutex_lock(&mutex);
test_lsc_interrupt_count++;
pthread_cond_signal(&cvar);
- pthread_mutex_unlock(&mutex);
+ rte_thread_mutex_unlock(&mutex);
return 0;
}
@@ -1220,11 +1221,12 @@ lsc_timeout(int wait_us)
ts.tv_sec += 1;
}
- pthread_mutex_lock(&mutex);
+ rte_thread_mutex_lock(&mutex);
if (test_lsc_interrupt_count < 1)
- retval = pthread_cond_timedwait(&cvar, &mutex, &ts);
+ retval = pthread_cond_timedwait(&cvar,
+ (pthread_mutex_t *)mutex.mutex_id, &ts);
- pthread_mutex_unlock(&mutex);
+ rte_thread_mutex_unlock(&mutex);
if (retval == 0 && test_lsc_interrupt_count < 1)
return -1;
@@ -25,7 +25,7 @@ static volatile uint32_t thr_id;
static uint64_t gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use mutex to provide thread safety */
-static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(lpm_mutex)
/* Report quiescent state interval every 1024 lookups. Larger critical
* sections in reader will result in writer polling multiple times.
@@ -443,7 +443,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
/* Add all the entries */
for (j = si; j < ei; j++) {
if (num_writers > 1)
- pthread_mutex_lock(&lpm_mutex);
+ rte_thread_mutex_lock(&lpm_mutex);
if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
large_ldepth_route_table[j].depth,
next_hop_add) != 0) {
@@ -452,13 +452,13 @@ test_lpm_rcu_qsbr_writer(void *arg)
goto error;
}
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
}
/* Delete all the entries */
for (j = si; j < ei; j++) {
if (num_writers > 1)
- pthread_mutex_lock(&lpm_mutex);
+ rte_thread_mutex_lock(&lpm_mutex);
if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
large_ldepth_route_table[j].depth) != 0) {
printf("Failed to delete iteration %d, route# %d\n",
@@ -466,7 +466,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
goto error;
}
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
}
}
@@ -478,7 +478,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
error:
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
return -1;
}
@@ -38,11 +38,10 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
struct dpaa_ioctl_irq_map irq_map;
/* Verify the thread's cpu-affinity */
- ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &cpuset);
+ ret = rte_thread_set_affinity_by_id(rte_thread_self(), &cpuset);
if (ret) {
errno = ret;
- err(0, "pthread_getaffinity_np()");
+ err(0, "rte_thread_set_affinity_by_id()");
return ret;
}
pcfg.cpu = -1;
@@ -18,16 +18,16 @@ struct process_interrupt {
};
static COMPAT_LIST_HEAD(process_irq_list);
-static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(process_irq_lock)
static void process_interrupt_install(struct process_interrupt *irq)
{
int ret;
/* Add the irq to the end of the list */
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_add_tail(&irq->node, &process_irq_list);
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
}
@@ -35,10 +35,10 @@ static void process_interrupt_remove(struct process_interrupt *irq)
{
int ret;
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_del(&irq->node);
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
}
@@ -47,14 +47,14 @@ static struct process_interrupt *process_interrupt_find(int irq_num)
int ret;
struct process_interrupt *i = NULL;
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_for_each_entry(i, &process_irq_list, node) {
if (i->irq == irq_num)
goto done;
}
done:
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
return i;
}
@@ -21,7 +21,7 @@
* what the lock is for.
*/
static int fd = -1;
-static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(fd_init_lock)
static int check_fd(void)
{
@@ -29,12 +29,12 @@ static int check_fd(void)
if (fd >= 0)
return 0;
- ret = pthread_mutex_lock(&fd_init_lock);
+ ret = rte_thread_mutex_lock(&fd_init_lock);
assert(!ret);
/* check again with the lock held */
if (fd < 0)
fd = open(PROCESS_PATH, O_RDWR);
- ret = pthread_mutex_unlock(&fd_init_lock);
+ ret = rte_thread_mutex_unlock(&fd_init_lock);
assert(!ret);
return (fd >= 0) ? 0 : -ENODEV;
}
@@ -10,7 +10,7 @@
#include <limits.h>
#include <sched.h>
#include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <sys/types.h>
#include <sys/eventfd.h>
@@ -47,7 +47,7 @@ static struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
-static pthread_key_t dpaa_portal_key;
+static rte_thread_key dpaa_portal_key;
unsigned int dpaa_svr_family;
@@ -315,10 +315,10 @@ int rte_dpaa_portal_init(void *arg)
DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
DPAA_PER_LCORE_PORTAL->tid = rte_gettid();
- ret = pthread_setspecific(dpaa_portal_key,
+ ret = rte_thread_value_set(dpaa_portal_key,
(void *)DPAA_PER_LCORE_PORTAL);
if (ret) {
- DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
+ DPAA_BUS_LOG(ERR, "rte_thread_value_set failed on core %u"
" (lcore=%u) with ret: %d", cpu, lcore, ret);
dpaa_portal_finish(NULL);
@@ -376,7 +376,7 @@ dpaa_portal_finish(void *arg)
bman_thread_finish();
qman_thread_finish();
- pthread_setspecific(dpaa_portal_key, NULL);
+ rte_thread_value_set(dpaa_portal_key, NULL);
rte_free(dpaa_io_portal);
dpaa_io_portal = NULL;
@@ -452,9 +452,9 @@ rte_dpaa_bus_scan(void)
/* create the key, supplying a function that'll be invoked
* when a portal affined thread will be deleted.
*/
- ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ ret = rte_thread_key_create(&dpaa_portal_key, dpaa_portal_finish);
if (ret) {
- DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ DPAA_BUS_LOG(DEBUG, "Unable to create thread key. (%d)", ret);
dpaa_clean_device_list();
return ret;
}
@@ -13,7 +13,7 @@
#include <stdarg.h>
#include <inttypes.h>
#include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/ioctl.h>
@@ -59,7 +59,7 @@ uint8_t dpaa2_dqrr_size;
uint8_t dpaa2_eqcr_size;
/* Variable to hold the portal_key, once created.*/
-static pthread_key_t dpaa2_portal_key;
+static rte_thread_key dpaa2_portal_key;
/*Stashing Macros default for LS208x*/
static int dpaa2_core_cluster_base = 0x04;
@@ -92,10 +92,9 @@ dpaa2_get_core_id(void)
rte_cpuset_t cpuset;
int i, ret, cpu_id = -1;
- ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &cpuset);
+ ret = rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset);
if (ret) {
- DPAA2_BUS_ERR("pthread_getaffinity_np() failed");
+ DPAA2_BUS_ERR("rte_thread_get_affinity_by_id() failed");
return ret;
}
@@ -295,9 +294,9 @@ static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
}
}
- ret = pthread_setspecific(dpaa2_portal_key, (void *)dpio_dev);
+ ret = rte_thread_value_set(dpaa2_portal_key, (void *)dpio_dev);
if (ret) {
- DPAA2_BUS_ERR("pthread_setspecific failed with ret: %d", ret);
+ DPAA2_BUS_ERR("rte_thread_value_set failed with ret: %d", ret);
dpaa2_put_qbman_swp(dpio_dev);
return NULL;
}
@@ -356,7 +355,7 @@ static void dpaa2_portal_finish(void *arg)
dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).dpio_dev);
dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev);
- pthread_setspecific(dpaa2_portal_key, NULL);
+ rte_thread_value_set(dpaa2_portal_key, NULL);
}
static int
@@ -514,10 +513,10 @@ dpaa2_create_dpio_device(int vdev_fd,
/* create the key, supplying a function that'll be invoked
* when a portal affined thread will be deleted.
*/
- ret = pthread_key_create(&dpaa2_portal_key,
+ ret = rte_thread_key_create(&dpaa2_portal_key,
dpaa2_portal_finish);
if (ret) {
- DPAA2_BUS_DEBUG("Unable to create pthread key (%d)",
+ DPAA2_BUS_DEBUG("Unable to create thread key (%d)",
ret);
goto err;
}
@@ -16,7 +16,7 @@
#include <stdio.h>
#include <errno.h>
#include <string.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <linux/types.h>
#include <stdbool.h>
#include <ctype.h>
@@ -8,6 +8,7 @@
#include <stdio.h>
#include <sys/types.h>
+#include <rte_windows.h>
#include <rte_errno.h>
#include "mlx5_autoconf.h"
@@ -70,7 +70,7 @@ struct mlx5_compress_qp {
TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(priv_list_lock)
int mlx5_compress_logtype;
@@ -771,13 +771,13 @@ mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
/* Iterate all the existing mlx5 devices. */
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
mlx5_free_mr_by_addr(&priv->mr_scache,
priv->ctx->device->name,
addr, len);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
break;
case RTE_MEM_EVENT_ALLOC:
default:
@@ -860,9 +860,9 @@ mlx5_compress_dev_probe(struct rte_device *dev)
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
mlx5_compress_mr_mem_event_cb,
NULL);
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
return 0;
}
@@ -871,13 +871,13 @@ mlx5_compress_dev_remove(struct rte_device *dev)
{
struct mlx5_compress_priv *priv = NULL;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
if (priv->cdev->device == dev)
break;
if (priv)
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (priv) {
if (TAILQ_EMPTY(&mlx5_compress_priv_list))
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
@@ -5,7 +5,7 @@
#include <assert.h>
#include <errno.h>
#include <nmmintrin.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
@@ -8,7 +8,7 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <rte_string_fns.h>
#include <rte_cycles.h>
@@ -194,14 +194,13 @@ static void *dlb2_complete_queue_map_unmap(void *__args)
static inline void os_schedule_work(struct dlb2_hw *hw)
{
struct dlb2_dev *dlb2_dev;
- pthread_t complete_queue_map_unmap_thread;
+ rte_thread_t complete_queue_map_unmap_thread;
int ret;
dlb2_dev = container_of(hw, struct dlb2_dev, hw);
- ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
+ ret = rte_thread_ctrl_thread_create(&complete_queue_map_unmap_thread,
"dlb_queue_unmap_waiter",
- NULL,
dlb2_complete_queue_map_unmap,
dlb2_dev);
if (ret)
@@ -11,7 +11,7 @@
#include <limits.h>
#include <sched.h>
#include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <sys/types.h>
#include <sys/syscall.h>
@@ -179,7 +179,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(internal_list_lock)
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
static inline int
@@ -677,7 +677,7 @@ find_internal_resource(struct pmd_internals *port_int)
if (port_int == NULL)
return NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
struct pmd_internals *list_int =
@@ -688,7 +688,7 @@ find_internal_resource(struct pmd_internals *port_int)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -726,7 +726,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
if (mb_pool == NULL)
return ret;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
internals = list->eth_dev->data->dev_private;
@@ -752,7 +752,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
}
out:
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
return ret;
}
@@ -781,9 +781,9 @@ eth_dev_configure(struct rte_eth_dev *dev)
return -1;
list->eth_dev = dev;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
}
return 0;
@@ -981,9 +981,9 @@ eth_dev_close(struct rte_eth_dev *dev)
/* Remove ethdev from list used to track and share UMEMs */
list = find_internal_resource(internals);
if (list) {
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
}
}
@@ -563,12 +563,12 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
ark_pktchkr_run(ark->pc);
if (ark->start_pg && (dev->data->port_id == 0)) {
- pthread_t thread;
+ rte_thread_t thread;
/* Delay packet generatpr start allow the hardware to be ready
* This is only used for sanity checking with internal generator
*/
- if (rte_ctrl_thread_create(&thread, "ark-delay-pg", NULL,
+ if (rte_thread_ctrl_thread_create(&thread, "ark-delay-pg",
ark_pktgen_delay_start, ark->pg)) {
ARK_PMD_LOG(ERR, "Could not create pktgen "
"starter thread\n");
@@ -3,7 +3,7 @@
*/
#include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -475,7 +475,7 @@ ark_pktgen_delay_start(void *arg)
* perform a blind sleep here to ensure that the external test
* application has time to setup the test before we generate packets
*/
- pthread_detach(pthread_self());
+ rte_thread_detach(rte_thread_self());
usleep(100000);
ark_pktgen_run(inst);
return NULL;
@@ -405,7 +405,7 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
hw->aq_nic_cfg = &adapter->hw_cfg;
- pthread_mutex_init(&hw->mbox_mutex, NULL);
+ rte_thread_mutex_init(&hw->mbox_mutex);
/* disable interrupt */
atl_disable_intr(hw);
@@ -712,7 +712,7 @@ atl_dev_close(struct rte_eth_dev *dev)
rte_intr_callback_unregister(intr_handle,
atl_dev_interrupt_handler, dev);
- pthread_mutex_destroy(&hw->mbox_mutex);
+ rte_thread_mutex_destroy(&hw->mbox_mutex);
return ret;
}
@@ -10,7 +10,7 @@
#include <string.h>
#include <stdbool.h>
#include <netinet/in.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <rte_common.h>
@@ -141,7 +141,7 @@ struct aq_hw_s {
u32 rpc_tid;
struct hw_aq_atl_utils_fw_rpc rpc;
- pthread_mutex_t mbox_mutex;
+ rte_thread_mutex mbox_mutex;
};
struct aq_fw_ops {
@@ -6,7 +6,7 @@
*/
#include <rte_ether.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include "../atl_hw_regs.h"
#include "../atl_types.h"
@@ -218,7 +218,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 mac_addr[2] = { 0 };
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -257,7 +257,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -269,7 +269,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -286,7 +286,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
err = hw_atl_utils_update_stats(self);
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
@@ -299,7 +299,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE);
u32 temp_res;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE);
@@ -317,7 +317,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
sizeof(temp_res) / sizeof(u32));
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
if (err)
return err;
@@ -536,7 +536,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0)
return -EOPNOTSUPP;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
request.msg_id = 0;
request.device_id = dev_addr;
@@ -605,7 +605,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -626,7 +626,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
request.address = offset;
request.length = len;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -694,7 +694,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -712,7 +712,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
if ((self->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)
return -EOPNOTSUPP;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Write macsec request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -742,7 +742,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
RTE_ALIGN(sizeof(*response) / sizeof(u32), sizeof(u32)));
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -19,7 +19,7 @@
#include <stdarg.h>
#include <unistd.h>
#include <inttypes.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <rte_bitops.h>
#include <rte_byteorder.h>
@@ -167,12 +167,12 @@ static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
- pthread_mutex_lock(&pdata->xpcs_mutex);
+ rte_thread_mutex_lock(&pdata->xpcs_mutex);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
- pthread_mutex_unlock(&pdata->xpcs_mutex);
+ rte_thread_mutex_unlock(&pdata->xpcs_mutex);
return mmd_data;
}
@@ -201,12 +201,12 @@ static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
- pthread_mutex_lock(&pdata->xpcs_mutex);
+ rte_thread_mutex_lock(&pdata->xpcs_mutex);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
- pthread_mutex_unlock(&pdata->xpcs_mutex);
+ rte_thread_mutex_unlock(&pdata->xpcs_mutex);
}
static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
@@ -2311,10 +2311,10 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
- pthread_mutex_init(&pdata->xpcs_mutex, NULL);
- pthread_mutex_init(&pdata->i2c_mutex, NULL);
- pthread_mutex_init(&pdata->an_mutex, NULL);
- pthread_mutex_init(&pdata->phy_mutex, NULL);
+ rte_thread_mutex_init(&pdata->xpcs_mutex);
+ rte_thread_mutex_init(&pdata->i2c_mutex);
+ rte_thread_mutex_init(&pdata->an_mutex);
+ rte_thread_mutex_init(&pdata->phy_mutex);
ret = pdata->phy_if.phy_init(pdata);
if (ret) {
@@ -602,10 +602,10 @@ struct axgbe_port {
int phy_link;
int phy_speed;
- pthread_mutex_t xpcs_mutex;
- pthread_mutex_t i2c_mutex;
- pthread_mutex_t an_mutex;
- pthread_mutex_t phy_mutex;
+ rte_thread_mutex xpcs_mutex;
+ rte_thread_mutex i2c_mutex;
+ rte_thread_mutex an_mutex;
+ rte_thread_mutex phy_mutex;
/* Flow control settings */
unsigned int pause_autoneg;
@@ -229,7 +229,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
int ret;
uint64_t timeout;
- pthread_mutex_lock(&pdata->i2c_mutex);
+ rte_thread_mutex_lock(&pdata->i2c_mutex);
ret = axgbe_i2c_disable(pdata);
if (ret) {
PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
@@ -282,7 +282,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
}
unlock:
- pthread_mutex_unlock(&pdata->i2c_mutex);
+ rte_thread_mutex_unlock(&pdata->i2c_mutex);
return ret;
}
@@ -686,9 +686,9 @@ static void axgbe_an73_isr(struct axgbe_port *pdata)
if (pdata->an_int) {
/* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
- pthread_mutex_lock(&pdata->an_mutex);
+ rte_thread_mutex_lock(&pdata->an_mutex);
axgbe_an73_state_machine(pdata);
- pthread_mutex_unlock(&pdata->an_mutex);
+ rte_thread_mutex_unlock(&pdata->an_mutex);
} else {
/* Enable AN interrupts */
axgbe_an73_enable_interrupts(pdata);
@@ -977,7 +977,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
{
int ret;
- pthread_mutex_lock(&pdata->an_mutex);
+ rte_thread_mutex_lock(&pdata->an_mutex);
ret = __axgbe_phy_config_aneg(pdata);
if (ret)
@@ -985,7 +985,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
else
rte_bit_relaxed_clear32(AXGBE_LINK_ERR, &pdata->dev_state);
- pthread_mutex_unlock(&pdata->an_mutex);
+ rte_thread_mutex_unlock(&pdata->an_mutex);
return ret;
}
@@ -403,7 +403,7 @@ static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
phy_data->comm_owned = 0;
- pthread_mutex_unlock(&pdata->phy_mutex);
+ rte_thread_mutex_unlock(&pdata->phy_mutex);
}
static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
@@ -416,7 +416,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
* the driver needs to take the software mutex and then the hardware
* mutexes before being able to use the busses.
*/
- pthread_mutex_lock(&pdata->phy_mutex);
+ rte_thread_mutex_lock(&pdata->phy_mutex);
if (phy_data->comm_owned)
return 0;
@@ -447,7 +447,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
return 0;
}
- pthread_mutex_unlock(&pdata->phy_mutex);
+ rte_thread_mutex_unlock(&pdata->phy_mutex);
PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
@@ -540,8 +540,8 @@ struct bnxt_mark_info {
struct bnxt_rep_info {
struct rte_eth_dev *vfr_eth_dev;
- pthread_mutex_t vfr_lock;
- pthread_mutex_t vfr_start_lock;
+ rte_thread_mutex vfr_lock;
+ rte_thread_mutex vfr_start_lock;
bool conduit_valid;
};
@@ -739,7 +739,7 @@ struct bnxt {
#define BNXT_FW_CAP_TRUFLOW_EN BIT(8)
#define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN)
- pthread_mutex_t flow_lock;
+ rte_thread_mutex flow_lock;
uint32_t vnic_cap_flags;
#define BNXT_VNIC_CAP_COS_CLASSIFY BIT(0)
@@ -793,18 +793,18 @@ struct bnxt {
rte_iova_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
/* synchronize between dev_configure_op and int handler */
- pthread_mutex_t def_cp_lock;
+ rte_thread_mutex def_cp_lock;
/* synchronize between dev_start_op and async evt handler
* Locking sequence in async evt handler will be
* def_cp_lock
* health_check_lock
*/
- pthread_mutex_t health_check_lock;
+ rte_thread_mutex health_check_lock;
/* synchronize between dev_stop/dev_close_op and
* error recovery thread triggered as part of
* HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
*/
- pthread_mutex_t err_recovery_lock;
+ rte_thread_mutex err_recovery_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
uint16_t hwrm_max_ext_req_len;
@@ -1014,10 +1014,10 @@ uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
extern const struct rte_flow_ops bnxt_flow_ops;
#define bnxt_acquire_flow_lock(bp) \
- pthread_mutex_lock(&(bp)->flow_lock)
+ rte_thread_mutex_lock(&(bp)->flow_lock)
#define bnxt_release_flow_lock(bp) \
- pthread_mutex_unlock(&(bp)->flow_lock)
+ rte_thread_mutex_unlock(&(bp)->flow_lock)
#define BNXT_VALID_VNIC_OR_RET(bp, vnic_id) do { \
if ((vnic_id) >= (bp)->max_vnics) { \
@@ -156,7 +156,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
return;
}
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
event_data = data1;
/* timestamp_lo/hi values are in units of 100ms */
bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
@@ -178,7 +178,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
}
bp->flags |= BNXT_FLAG_FW_RESET;
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
(void *)bp);
break;
@@ -1087,7 +1087,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
* are calculated correctly.
*/
- pthread_mutex_lock(&bp->def_cp_lock);
+ rte_thread_mutex_lock(&bp->def_cp_lock);
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
bnxt_disable_int(bp);
@@ -1097,20 +1097,20 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return -ENOSPC;
}
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
rc = bnxt_alloc_async_cp_ring(bp);
if (rc) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return rc;
}
bnxt_enable_int(bp);
}
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
}
/* Inherit new configurations */
@@ -1532,14 +1532,14 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
PMD_DRV_LOG(ERR,
"Adapter recovering from error..Please retry\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return bnxt_dev_stop(eth_dev);
}
@@ -1625,13 +1625,13 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
static void
bnxt_uninit_locks(struct bnxt *bp)
{
- pthread_mutex_destroy(&bp->flow_lock);
- pthread_mutex_destroy(&bp->def_cp_lock);
- pthread_mutex_destroy(&bp->health_check_lock);
- pthread_mutex_destroy(&bp->err_recovery_lock);
+ rte_thread_mutex_destroy(&bp->flow_lock);
+ rte_thread_mutex_destroy(&bp->def_cp_lock);
+ rte_thread_mutex_destroy(&bp->health_check_lock);
+ rte_thread_mutex_destroy(&bp->err_recovery_lock);
if (bp->rep_info) {
- pthread_mutex_destroy(&bp->rep_info->vfr_lock);
- pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
+ rte_thread_mutex_destroy(&bp->rep_info->vfr_lock);
+ rte_thread_mutex_destroy(&bp->rep_info->vfr_start_lock);
}
}
@@ -1663,14 +1663,14 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
PMD_DRV_LOG(ERR,
"Adapter recovering from error...Please retry\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
/* cancel the recovery handler before remove dev */
rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
@@ -4312,7 +4312,7 @@ static void bnxt_dev_recover(void *arg)
struct bnxt *bp = arg;
int rc = 0;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
if (!bp->fw_reset_min_msecs) {
rc = bnxt_check_fw_reset_done(bp);
@@ -4347,7 +4347,7 @@ static void bnxt_dev_recover(void *arg)
goto err_start;
PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return;
err_start:
@@ -4359,7 +4359,7 @@ static void bnxt_dev_recover(void *arg)
rte_eth_dev_callback_process(bp->eth_dev,
RTE_ETH_EVENT_INTR_RMV,
NULL);
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
}
@@ -4535,7 +4535,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp)
{
uint32_t polling_freq;
- pthread_mutex_lock(&bp->health_check_lock);
+ rte_thread_mutex_lock(&bp->health_check_lock);
if (!bnxt_is_recovery_enabled(bp))
goto done;
@@ -4550,7 +4550,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp)
bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
done:
- pthread_mutex_unlock(&bp->health_check_lock);
+ rte_thread_mutex_unlock(&bp->health_check_lock);
}
static void bnxt_cancel_fw_health_check(struct bnxt *bp)
@@ -5395,25 +5395,25 @@ bnxt_init_locks(struct bnxt *bp)
{
int err;
- err = pthread_mutex_init(&bp->flow_lock, NULL);
+ err = rte_thread_mutex_init(&bp->flow_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->def_cp_lock, NULL);
+ err = rte_thread_mutex_init(&bp->def_cp_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->health_check_lock, NULL);
+ err = rte_thread_mutex_init(&bp->health_check_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->err_recovery_lock, NULL);
+ err = rte_thread_mutex_init(&bp->err_recovery_lock);
if (err)
PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n");
@@ -6237,14 +6237,14 @@ static int bnxt_init_rep_info(struct bnxt *bp)
for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->rep_info->vfr_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
bnxt_free_rep_info(bp);
return rc;
}
- rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->rep_info->vfr_start_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
bnxt_free_rep_info(bp);
@@ -33,15 +33,15 @@ void bnxt_int_handler(void *param)
return;
raw_cons = cpr->cp_raw_cons;
- pthread_mutex_lock(&bp->def_cp_lock);
+ rte_thread_mutex_lock(&bp->def_cp_lock);
while (1) {
if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return;
}
if (is_bnxt_in_error(bp)) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return;
}
@@ -62,7 +62,7 @@ void bnxt_int_handler(void *param)
else
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
}
int bnxt_free_int(struct bnxt *bp)
@@ -120,7 +120,7 @@ bnxt_rep_tx_burst(void *tx_queue,
qid = vfr_txq->txq->queue_id;
vf_rep_bp = vfr_txq->bp;
parent = vf_rep_bp->parent_dev->data->dev_private;
- pthread_mutex_lock(&parent->rep_info->vfr_lock);
+ rte_thread_mutex_lock(&parent->rep_info->vfr_lock);
ptxq = parent->tx_queues[qid];
ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
@@ -132,7 +132,7 @@ bnxt_rep_tx_burst(void *tx_queue,
rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
ptxq->vfr_tx_cfa_action = 0;
- pthread_mutex_unlock(&parent->rep_info->vfr_lock);
+ rte_thread_mutex_unlock(&parent->rep_info->vfr_lock);
return rc;
}
@@ -407,15 +407,15 @@ int bnxt_rep_dev_start_op(struct rte_eth_dev *eth_dev)
rep_info = &parent_bp->rep_info[rep_bp->vf_id];
BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id);
- pthread_mutex_lock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_lock(&rep_info->vfr_start_lock);
if (!rep_info->conduit_valid) {
rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
if (rc || !rep_info->conduit_valid) {
- pthread_mutex_unlock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
return rc;
}
}
- pthread_mutex_unlock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
rc = bnxt_vfr_alloc(eth_dev);
if (rc) {
@@ -32,7 +32,7 @@ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
/* Mutex to synchronize bnxt_ulp_session_list operations. */
-static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(bnxt_ulp_global_mutex)
/* Spin lock to protect context global list */
rte_spinlock_t bnxt_ulp_ctxt_lock;
@@ -975,7 +975,7 @@ ulp_ctx_detach(struct bnxt *bp)
static void
ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
{
- pthread_mutex_lock(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_lock(&session->bnxt_ulp_mutex);
if (!session->bnxt_ulp_init) {
session->bnxt_ulp_init = true;
@@ -984,7 +984,7 @@ ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
*init = true;
}
- pthread_mutex_unlock(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_unlock(&session->bnxt_ulp_mutex);
}
/*
@@ -1025,7 +1025,7 @@ ulp_session_init(struct bnxt *bp,
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
session = ulp_get_session(pci_addr);
if (!session) {
@@ -1036,17 +1036,17 @@ ulp_session_init(struct bnxt *bp,
if (!session) {
BNXT_TF_DBG(ERR,
"Allocation failed for bnxt_ulp_session\n");
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return NULL;
} else {
/* Add it to the queue */
session->pci_info.domain = pci_addr->domain;
session->pci_info.bus = pci_addr->bus;
- rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
+ rc = rte_thread_mutex_init(&session->bnxt_ulp_mutex);
if (rc) {
BNXT_TF_DBG(ERR, "mutex create failed\n");
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return NULL;
}
STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
@@ -1054,7 +1054,7 @@ ulp_session_init(struct bnxt *bp,
}
}
ulp_context_initialized(session, init);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return session;
}
@@ -1069,12 +1069,12 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session)
return;
if (!session->cfg_data) {
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
STAILQ_REMOVE(&bnxt_ulp_session_list, session,
bnxt_ulp_session_state, next);
- pthread_mutex_destroy(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_destroy(&session->bnxt_ulp_mutex);
rte_free(session);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
}
}
@@ -1235,7 +1235,7 @@ bnxt_ulp_deinit(struct bnxt *bp,
BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
/* free the flow db lock */
- pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
+ rte_thread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
if (ha_enabled)
ulp_ha_mgr_deinit(bp->ulp_ctx);
@@ -1263,7 +1263,7 @@ bnxt_ulp_init(struct bnxt *bp,
goto jump_to_error;
}
- rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
goto jump_to_error;
@@ -1529,9 +1529,9 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
/* Get the session details */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
session = ulp_get_session(pci_addr);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
/* session not found then just exit */
if (!session) {
@@ -1938,7 +1938,7 @@ bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
if (!ulp_ctx || !ulp_ctx->cfg_data)
return -1;
- if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
+ if (rte_thread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
return -1;
}
@@ -1952,7 +1952,7 @@ bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
if (!ulp_ctx || !ulp_ctx->cfg_data)
return;
- pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
+ rte_thread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
}
/* Function to set the ha info into the context */
@@ -64,7 +64,7 @@ struct bnxt_ulp_data {
uint32_t dev_id; /* Hardware device id */
uint32_t ref_cnt;
struct bnxt_ulp_flow_db *flow_db;
- pthread_mutex_t flow_db_lock;
+ rte_thread_mutex flow_db_lock;
void *mapper_data;
struct bnxt_ulp_port_db *port_db;
struct bnxt_ulp_fc_info *fc_info;
@@ -95,7 +95,7 @@ struct bnxt_ulp_pci_info {
struct bnxt_ulp_session_state {
STAILQ_ENTRY(bnxt_ulp_session_state) next;
bool bnxt_ulp_init;
- pthread_mutex_t bnxt_ulp_mutex;
+ rte_thread_mutex bnxt_ulp_mutex;
struct bnxt_ulp_pci_info pci_info;
struct bnxt_ulp_data *cfg_data;
struct tf *g_tfp;
@@ -3,6 +3,7 @@
* All rights reserved.
*/
+#include <pthread.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
@@ -84,7 +85,7 @@ ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
if (!ulp_fc_info)
goto error;
- rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
+ rc = rte_thread_mutex_init(&ulp_fc_info->fc_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
goto error;
@@ -149,7 +150,7 @@ ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
ulp_fc_mgr_thread_cancel(ctxt);
- pthread_mutex_destroy(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_destroy(&ulp_fc_info->fc_lock);
if (ulp_fc_info->num_counters) {
for (i = 0; i < TF_DIR_MAX; i++)
@@ -485,11 +486,12 @@ ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
* Take the fc_lock to ensure no flow is destroyed
* during the bulk get
*/
- if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
+ if (pthread_mutex_trylock((pthread_mutex_t *)
+ &ulp_fc_info->fc_lock.mutex_id))
goto out;
if (!ulp_fc_info->num_entries) {
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
ulp_fc_mgr_thread_cancel(ctxt);
bnxt_ulp_cntxt_entry_release();
return;
@@ -521,7 +523,7 @@ ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
}
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
/*
* If cmd fails once, no need of
@@ -617,12 +619,12 @@ int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
if (!ulp_fc_info->num_counters)
return 0;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
ulp_fc_info->num_entries++;
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return 0;
}
@@ -652,14 +654,14 @@ int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
if (!ulp_fc_info->num_counters)
return 0;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
ulp_fc_info->num_entries--;
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return 0;
}
@@ -730,7 +732,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
/* TODO:
* Think about optimizing with try_lock later
*/
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id -
ulp_fc_info->shadow_hw_tbl[dir].start_idx;
sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
@@ -744,7 +746,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
sw_acc_tbl_entry->pkt_count = 0;
sw_acc_tbl_entry->byte_count = 0;
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
} else if (params.resource_sub_type ==
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
/* Get stats from the parent child table */
@@ -786,7 +788,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
if (!ulp_fc_info)
return -EIO;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
@@ -795,7 +797,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
hw_cntr_id, fid);
rc = -ENOENT;
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return rc;
}
@@ -45,10 +45,10 @@ struct hw_fc_mem_info {
struct bnxt_ulp_fc_info {
struct sw_acc_counter *sw_acc_tbl[TF_DIR_MAX];
struct hw_fc_mem_info shadow_hw_tbl[TF_DIR_MAX];
- uint32_t flags;
- uint32_t num_entries;
- pthread_mutex_t fc_lock;
- uint32_t num_counters;
+ uint32_t flags;
+ uint32_t num_entries;
+ rte_thread_mutex fc_lock;
+ uint32_t num_counters;
};
int32_t
@@ -328,7 +328,7 @@ ulp_ha_mgr_init(struct bnxt_ulp_context *ulp_ctx)
/* Add the HA info tbl to the ulp context. */
bnxt_ulp_cntxt_ptr2_ha_info_set(ulp_ctx, ha_info);
- rc = pthread_mutex_init(&ha_info->ha_lock, NULL);
+ rc = rte_thread_mutex_init(&ha_info->ha_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Failed to initialize ha mutex\n");
goto cleanup;
@@ -359,7 +359,7 @@ ulp_ha_mgr_deinit(struct bnxt_ulp_context *ulp_ctx)
return;
}
- pthread_mutex_destroy(&ha_info->ha_lock);
+ rte_thread_mutex_destroy(&ha_info->ha_lock);
rte_free(ha_info);
bnxt_ulp_cntxt_ptr2_ha_info_set(ulp_ctx, NULL);
@@ -31,7 +31,7 @@ struct bnxt_ulp_ha_mgr_info {
enum ulp_ha_mgr_app_type app_type;
enum ulp_ha_mgr_region region;
uint32_t flags;
- pthread_mutex_t ha_lock;
+ rte_thread_mutex ha_lock;
};
bool
@@ -11,7 +11,7 @@
#include <limits.h>
#include <sched.h>
#include <signal.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <sys/types.h>
#include <sys/syscall.h>
@@ -11,7 +11,7 @@
#include <stdio.h>
#include <limits.h>
#include <sched.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <rte_byteorder.h>
#include <rte_common.h>
@@ -9,6 +9,7 @@
#include <stdbool.h>
#include <stdlib.h>
#include <pthread.h>
+#include <rte_thread.h>
#include <stdint.h>
#include <inttypes.h>
#include <string.h>
@@ -141,14 +142,14 @@ extern int ena_logtype_com;
typedef struct {
pthread_cond_t cond;
- pthread_mutex_t mutex;
+ rte_thread_mutex mutex;
uint8_t flag;
} ena_wait_event_t;
#define ENA_WAIT_EVENT_INIT(waitevent) \
do { \
ena_wait_event_t *_we = &(waitevent); \
- pthread_mutex_init(&_we->mutex, NULL); \
+ rte_thread_mutex_init(&_we->mutex); \
pthread_cond_init(&_we->cond, NULL); \
_we->flag = 0; \
} while (0)
@@ -165,10 +166,10 @@ typedef struct {
wait.tv_sec = now.tv_sec + _tmo / 1000000UL; \
timeout_us = _tmo % 1000000UL; \
wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
- pthread_mutex_lock(&_we->mutex); \
+ rte_thread_mutex_lock(&_we->mutex); \
while (ret == 0 && !_we->flag) { \
ret = pthread_cond_timedwait(&_we->cond, \
- &_we->mutex, &wait); \
+ _we->mutex.mutex_id, &wait); \
} \
/* Asserts only if not working on ena_wait_event_t */ \
if (unlikely(ret != 0 && ret != ETIMEDOUT)) \
@@ -178,15 +179,15 @@ typedef struct {
ena_trc_err(NULL, \
"Timeout waiting for " #waitevent "\n"); \
_we->flag = 0; \
- pthread_mutex_unlock(&_we->mutex); \
+ rte_thread_mutex_unlock(&_we->mutex); \
} while (0)
#define ENA_WAIT_EVENT_SIGNAL(waitevent) \
do { \
ena_wait_event_t *_we = &(waitevent); \
- pthread_mutex_lock(&_we->mutex); \
+ rte_thread_mutex_lock(&_we->mutex); \
_we->flag = 1; \
pthread_cond_signal(&_we->cond); \
- pthread_mutex_unlock(&_we->mutex); \
+ rte_thread_mutex_unlock(&_we->mutex); \
} while (0)
/* pthread condition doesn't need to be rearmed after usage */
#define ENA_WAIT_EVENT_CLEAR(...)
@@ -107,7 +107,7 @@ struct enic {
int iommu_groupid;
int eventfd;
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
- pthread_t err_intr_thread;
+ rte_thread_t err_intr_thread;
int promisc;
int allmulti;
uint8_t ig_vlan_strip_en;
@@ -3,7 +3,7 @@
*/
#include <sys/types.h>
#include <sys/stat.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <unistd.h>
#include <rte_spinlock.h>
@@ -121,7 +121,7 @@ ice_dcf_vsi_update_service_handler(void *param)
struct ice_dcf_hw *hw = reset_param->dcf_hw;
struct ice_dcf_adapter *adapter;
- pthread_detach(pthread_self());
+ rte_thread_detach(rte_thread_self());
rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
@@ -159,7 +159,7 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
#define THREAD_NAME_LEN 16
struct ice_dcf_reset_event_param *param;
char name[THREAD_NAME_LEN];
- pthread_t thread;
+ rte_thread_t thread;
int ret;
param = malloc(sizeof(*param));
@@ -173,7 +173,7 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
param->vf_id = vf_id;
snprintf(name, sizeof(name), "ice-reset-%u", vf_id);
- ret = rte_ctrl_thread_create(&thread, name, NULL,
+ ret = rte_thread_ctrl_thread_create(&thread, name,
ice_dcf_vsi_update_service_handler, param);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
@@ -4156,7 +4156,7 @@ ixgbe_dev_setup_link_thread_handler(void *param)
u32 speed;
bool autoneg = false;
- pthread_detach(pthread_self());
+ rte_thread_detach(rte_thread_self());
speed = hw->phy.autoneg_advertised;
if (!speed)
ixgbe_get_link_capabilities(hw, &speed, &autoneg);
@@ -4264,9 +4264,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
* when there is no link thread running.
*/
intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
- if (rte_ctrl_thread_create(&ad->link_thread_tid,
+ if (rte_thread_ctrl_thread_create
+ (&ad->link_thread_tid,
"ixgbe-link-handler",
- NULL,
ixgbe_dev_setup_link_thread_handler,
dev) < 0) {
PMD_DRV_LOG(ERR,
@@ -516,7 +516,7 @@ struct ixgbe_adapter {
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
rte_atomic32_t link_thread_running;
- pthread_t link_thread_tid;
+ rte_thread_t link_thread_tid;
};
struct ixgbe_vf_representor {
@@ -2872,7 +2872,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
int dbmap_env;
int err = 0;
- pthread_mutex_init(&sh->txpp.mutex, NULL);
+ rte_thread_mutex_init(&sh->txpp.mutex);
/*
* Configure environment variable "MLX5_BF_SHUT_UP"
* before the device creation. The rdma_core library
@@ -186,7 +186,7 @@ int mlx5_logtype;
static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_dev_ctx_list_mutex;
+static rte_thread_mutex mlx5_dev_ctx_list_mutex;
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_IPOOL_DECAP_ENCAP] = {
@@ -1117,7 +1117,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
if (!strcmp(sh->ibdev_name,
@@ -1246,11 +1246,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
exit:
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
- pthread_mutex_destroy(&sh->txpp.mutex);
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_destroy(&sh->txpp.mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
if (sh->cnt_id_tbl)
mlx5_l3t_destroy(sh->cnt_id_tbl);
@@ -1282,7 +1282,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
void
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
- pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
@@ -1313,7 +1313,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
/* Release flow workspaces objects on the last device. */
if (LIST_EMPTY(&mlx5_dev_ctx_list))
mlx5_flow_os_release_workspace();
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
/*
* Ensure there is no async event handler installed.
* Only primary process handles async device events.
@@ -1346,11 +1346,11 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
- pthread_mutex_destroy(&sh->txpp.mutex);
+ rte_thread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
return;
exit:
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
@@ -2502,7 +2502,7 @@ RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
*/
RTE_INIT(rte_mlx5_pmd_init)
{
- pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
+ rte_thread_mutex_init(&mlx5_dev_ctx_list_mutex);
mlx5_common_init();
/* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
@@ -1010,7 +1010,7 @@ struct mlx5_txpp_ts {
/* Tx packet pacing structure. */
struct mlx5_dev_txpp {
- pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
+ rte_thread_mutex mutex; /* Pacing create/destroy mutex. */
uint32_t refcnt; /* Pacing reference counter. */
uint32_t freq; /* Timestamp frequency, Hz. */
uint32_t tick; /* Completion tick duration in nanoseconds. */
@@ -914,7 +914,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev)
if (ret < 0)
return 0;
}
- ret = pthread_mutex_lock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_lock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
if (sh->txpp.refcnt) {
@@ -930,7 +930,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev)
rte_errno = -err;
}
}
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
return err;
@@ -957,7 +957,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev)
return;
}
priv->txpp_en = 0;
- ret = pthread_mutex_lock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_lock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
MLX5_ASSERT(sh->txpp.refcnt);
@@ -965,7 +965,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev)
return;
/* No references any more, do actual destroy. */
mlx5_txpp_destroy(sh);
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
}
@@ -253,7 +253,7 @@ struct mlx5_workspace_thread {
static struct mlx5_workspace_thread *curr;
static struct mlx5_workspace_thread *first;
rte_thread_key ws_tls_index;
-static pthread_mutex_t lock_thread_list;
+RTE_STATIC_MUTEX(lock_thread_list)
static bool
mlx5_is_thread_alive(HANDLE thread_handle)
@@ -330,7 +330,7 @@ mlx5_flow_os_release_workspace(void)
free(first);
}
rte_thread_key_delete(ws_tls_index);
- pthread_mutex_destroy(&lock_thread_list);
+ rte_thread_mutex_destroy(&lock_thread_list);
}
static int
@@ -352,7 +352,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
}
temp->mlx5_ws = data;
temp->thread_handle = curr_thread;
- pthread_mutex_lock(&lock_thread_list);
+ rte_thread_mutex_lock(&lock_thread_list);
mlx5_clear_thread_list();
if (!first) {
first = temp;
@@ -361,7 +361,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
curr->next = temp;
curr = curr->next;
}
- pthread_mutex_unlock(&lock_thread_list);
+ rte_thread_mutex_unlock(&lock_thread_list);
return 0;
}
@@ -374,7 +374,7 @@ mlx5_flow_os_init_workspace_once(void)
DRV_LOG(ERR, "Can't create flow workspace data thread key.");
return err;
}
- pthread_mutex_init(&lock_thread_list, NULL);
+ rte_thread_mutex_init(&lock_thread_list);
return 0;
}
@@ -264,7 +264,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
int err = 0;
struct mlx5_context *mlx5_ctx;
- pthread_mutex_init(&sh->txpp.mutex, NULL);
+ rte_thread_mutex_init(&sh->txpp.mutex);
/* Set numa node from pci probe */
sh->numa_node = spawn->pci_dev->device.numa_node;
@@ -153,10 +153,10 @@ void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
/* Mutexes */
-typedef pthread_mutex_t osal_mutex_t;
-#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
-#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
-#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+typedef rte_thread_mutex osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) rte_thread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) rte_thread_mutex_init(lock)
+#define OSAL_MUTEX_ACQUIRE(lock) rte_thread_mutex_lock(lock)
#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
#define OSAL_MUTEX_DEALLOC(lock) nothing
@@ -3,7 +3,7 @@
* Copyright(c) 2016-2018 Intel Corporation
*/
#include <unistd.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <stdbool.h>
#include <sys/epoll.h>
@@ -121,7 +121,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(internal_list_lock)
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
@@ -507,7 +507,7 @@ find_internal_resource(char *ifname)
if (ifname == NULL)
return NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
internal = list->eth_dev->data->dev_private;
@@ -517,7 +517,7 @@ find_internal_resource(char *ifname)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -1001,9 +1001,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev)
goto free_list;
list->eth_dev = eth_dev;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
@@ -1035,9 +1035,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev)
rte_vhost_driver_unregister(internal->iface_name);
list_remove:
vring_states[eth_dev->data->port_id] = NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(vring_state);
free_list:
rte_free(list);
@@ -1093,7 +1093,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
if (!rte_eth_dev_is_valid_port(port_id))
return -1;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
eth_dev = list->eth_dev;
@@ -1106,7 +1106,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
return vid;
}
@@ -1184,9 +1184,9 @@ eth_dev_close(struct rte_eth_dev *dev)
list = find_internal_resource(internal->iface_name);
if (list) {
rte_vhost_driver_unregister(internal->iface_name);
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
}
@@ -143,7 +143,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
uint64_t features;
int ret = -1;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
@@ -161,7 +161,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
goto error;
PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
error:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -185,7 +185,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
* memory subsystem in the future.
*/
rte_mcfg_mem_read_lock();
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
/* Step 2: share memory regions */
ret = dev->ops->set_memory_table(dev);
@@ -206,12 +206,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
dev->started = true;
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
rte_mcfg_mem_read_unlock();
return 0;
error:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
rte_mcfg_mem_read_unlock();
PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path);
@@ -226,7 +226,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uint32_t i;
int ret;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
if (!dev->started)
goto out;
@@ -249,11 +249,11 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
dev->started = false;
out:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return 0;
err:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path);
@@ -443,7 +443,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
if (msl->external)
return;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
if (dev->started == false)
goto exit;
@@ -468,7 +468,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
}
exit:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
if (ret < 0)
PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path);
@@ -554,7 +554,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
uint64_t backend_features;
int i;
- pthread_mutex_init(&dev->mutex, NULL);
+ rte_thread_mutex_init(&dev->mutex);
strlcpy(dev->path, path, PATH_MAX);
for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
@@ -862,13 +862,13 @@ virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
{
int ret;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
dev->status = status;
ret = dev->ops->set_status(dev, status);
if (ret && ret != -ENOTSUP)
PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path);
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -878,7 +878,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
int ret;
uint8_t status;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
ret = dev->ops->get_status(dev, &status);
if (!ret) {
@@ -903,7 +903,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path);
}
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -55,7 +55,7 @@ struct virtio_user_dev {
bool qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
struct virtio_user_backend_ops *ops;
- pthread_mutex_t mutex;
+ rte_thread_mutex mutex;
bool started;
void *backend_data;
@@ -23,6 +23,7 @@
#include <rte_log.h>
#include <rte_kvargs.h>
#include <rte_devargs.h>
+#include <rte_thread.h>
#include "base/ifcvf.h"
@@ -52,7 +53,7 @@ struct ifcvf_internal {
int vfio_container_fd;
int vfio_group_fd;
int vfio_dev_fd;
- pthread_t tid; /* thread for notify relay */
+ rte_thread_t tid; /* thread for notify relay */
int epfd;
int vid;
struct rte_vdpa_device *vdev;
@@ -79,7 +80,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(internal_list_lock)
static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
@@ -89,7 +90,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
int found = 0;
struct internal_list *list;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
if (vdev == list->internal->vdev) {
@@ -98,7 +99,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -112,7 +113,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev)
int found = 0;
struct internal_list *list;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
if (!rte_pci_addr_cmp(&pdev->addr,
@@ -122,7 +123,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -500,10 +501,10 @@ setup_notify_relay(struct ifcvf_internal *internal)
int ret;
snprintf(name, sizeof(name), "ifc-notify-%d", internal->vid);
- ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay,
+ ret = rte_thread_ctrl_thread_create(&internal->tid, name, notify_relay,
(void *)internal);
if (ret != 0) {
- DRV_LOG(ERR, "failed to create notify relay pthread.");
+ DRV_LOG(ERR, "failed to create notify relay thread.");
return -1;
}
@@ -513,13 +514,11 @@ setup_notify_relay(struct ifcvf_internal *internal)
static int
unset_notify_relay(struct ifcvf_internal *internal)
{
- void *status;
-
- if (internal->tid) {
- pthread_cancel(internal->tid);
- pthread_join(internal->tid, &status);
+ if (internal->tid.opaque_id) {
+ pthread_cancel(internal->tid.opaque_id);
+ rte_thread_join(internal->tid, NULL);
}
- internal->tid = 0;
+ internal->tid.opaque_id = 0;
if (internal->epfd >= 0)
close(internal->epfd);
@@ -806,10 +805,10 @@ setup_vring_relay(struct ifcvf_internal *internal)
int ret;
snprintf(name, sizeof(name), "ifc-vring-%d", internal->vid);
- ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay,
+ ret = rte_thread_ctrl_thread_create(&internal->tid, name, vring_relay,
(void *)internal);
if (ret != 0) {
- DRV_LOG(ERR, "failed to create ring relay pthread.");
+ DRV_LOG(ERR, "failed to create ring relay thread.");
return -1;
}
@@ -819,13 +818,11 @@ setup_vring_relay(struct ifcvf_internal *internal)
static int
unset_vring_relay(struct ifcvf_internal *internal)
{
- void *status;
-
- if (internal->tid) {
- pthread_cancel(internal->tid);
- pthread_join(internal->tid, &status);
+ if (internal->tid.opaque_id) {
+ pthread_cancel(internal->tid.opaque_id);
+ rte_thread_join(internal->tid, NULL);
}
- internal->tid = 0;
+ internal->tid.opaque_id = 0;
if (internal->epfd >= 0)
close(internal->epfd);
@@ -1253,9 +1250,9 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
goto error;
}
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_atomic32_set(&internal->started, 1);
update_datapath(internal);
@@ -1293,9 +1290,9 @@ ifcvf_pci_remove(struct rte_pci_device *pci_dev)
rte_vfio_container_destroy(internal->vfio_container_fd);
rte_vdpa_unregister_device(internal->vdev);
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
rte_free(internal);
@@ -48,7 +48,7 @@
TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
TAILQ_HEAD_INITIALIZER(priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+RTE_STATIC_MUTEX(priv_list_lock)
static struct mlx5_vdpa_priv *
mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
@@ -56,14 +56,14 @@ mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
struct mlx5_vdpa_priv *priv;
int found = 0;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &priv_list, next) {
if (vdev == priv->vdev) {
found = 1;
break;
}
}
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (!found) {
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
rte_errno = EINVAL;
@@ -143,9 +143,9 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
DRV_LOG(ERR, "Too big vring id: %d.", vring);
return -E2BIG;
}
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
ret = mlx5_vdpa_virtq_enable(priv, vring, state);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
return ret;
}
@@ -296,7 +296,7 @@ mlx5_vdpa_dev_close(int vid)
priv->configured = 0;
priv->vid = 0;
/* The mutex may stay locked after event thread cancel - initiate it. */
- pthread_mutex_init(&priv->vq_config_lock, NULL);
+ rte_thread_mutex_init(&priv->vq_config_lock);
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
@@ -706,10 +706,10 @@ mlx5_vdpa_dev_probe(struct rte_device *dev)
}
mlx5_vdpa_config_get(dev->devargs, priv);
SLIST_INIT(&priv->mr_list);
- pthread_mutex_init(&priv->vq_config_lock, NULL);
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_init(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
return 0;
error:
@@ -729,7 +729,7 @@ mlx5_vdpa_dev_remove(struct rte_device *dev)
struct mlx5_vdpa_priv *priv = NULL;
int found = 0;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &priv_list, next) {
if (priv->vdev->device == dev) {
found = 1;
@@ -738,7 +738,7 @@ mlx5_vdpa_dev_remove(struct rte_device *dev)
}
if (found)
TAILQ_REMOVE(&priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (found) {
if (priv->configured)
mlx5_vdpa_dev_close(priv->vid);
@@ -749,7 +749,7 @@ mlx5_vdpa_dev_remove(struct rte_device *dev)
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
mlx5_glue->close_device(priv->ctx);
- pthread_mutex_destroy(&priv->vq_config_lock);
+ rte_thread_mutex_destroy(&priv->vq_config_lock);
rte_free(priv);
}
return 0;
@@ -119,9 +119,9 @@ enum {
struct mlx5_vdpa_priv {
TAILQ_ENTRY(mlx5_vdpa_priv) next;
uint8_t configured;
- pthread_mutex_t vq_config_lock;
+ rte_thread_mutex vq_config_lock;
uint64_t no_traffic_counter;
- pthread_t timer_tid;
+ rte_thread_t timer_tid;
int event_mode;
int event_core; /* Event thread cpu affinity core. */
uint32_t event_us;
@@ -285,7 +285,7 @@ mlx5_vdpa_event_handle(void *arg)
case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
priv->timer_delay_us = priv->event_us;
while (1) {
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
max = mlx5_vdpa_queues_complete(priv);
if (max == 0 && priv->no_traffic_counter++ >=
priv->no_traffic_max) {
@@ -293,10 +293,10 @@ mlx5_vdpa_event_handle(void *arg)
priv->vdev->device->name);
mlx5_vdpa_arm_all_cqs(priv);
do {
- pthread_mutex_unlock
+ rte_thread_mutex_unlock
(&priv->vq_config_lock);
cq = mlx5_vdpa_event_wait(priv);
- pthread_mutex_lock
+ rte_thread_mutex_lock
(&priv->vq_config_lock);
if (cq == NULL ||
mlx5_vdpa_queue_complete(cq) > 0)
@@ -307,7 +307,7 @@ mlx5_vdpa_event_handle(void *arg)
} else if (max != 0) {
priv->no_traffic_counter = 0;
}
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
mlx5_vdpa_timer_sleep(priv, max);
}
return NULL;
@@ -315,10 +315,10 @@ mlx5_vdpa_event_handle(void *arg)
do {
cq = mlx5_vdpa_event_wait(priv);
if (cq != NULL) {
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
if (mlx5_vdpa_queue_complete(cq) > 0)
mlx5_vdpa_cq_arm(priv, cq);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
}
} while (1);
return NULL;
@@ -340,7 +340,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
struct mlx5_vdpa_virtq *virtq;
uint64_t sec;
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
sizeof(out.buf)) >=
(ssize_t)sizeof(out.event_resp.cookie)) {
@@ -386,7 +386,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
virtq->err_time[i - 1] = virtq->err_time[i];
virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
}
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
#endif
}
@@ -473,28 +473,23 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
{
int ret;
rte_cpuset_t cpuset;
- pthread_attr_t attr;
+ rte_thread_attr_t attr;
char name[16];
- const struct sched_param sp = {
- .sched_priority = sched_get_priority_max(SCHED_RR),
- };
if (!priv->eventc)
/* All virtqs are in poll mode. */
return 0;
- pthread_attr_init(&attr);
- ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
- if (ret) {
- DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
- return -1;
- }
- ret = pthread_attr_setschedparam(&attr, &sp);
+
+ rte_thread_attr_init(&attr);
+ ret = rte_thread_attr_set_priority(&attr,
+ RTE_THREAD_PRIORITY_REALTIME_CRITICAL);
if (ret) {
DRV_LOG(ERR, "Failed to set thread priority.");
return -1;
}
- ret = pthread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
- (void *)priv);
+
+ ret = rte_thread_create(&priv->timer_tid, &attr, mlx5_vdpa_event_handle,
+ (void *)priv);
if (ret) {
DRV_LOG(ERR, "Failed to create timer thread.");
return -1;
@@ -504,13 +499,13 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
CPU_SET(priv->event_core, &cpuset);
else
cpuset = rte_lcore_cpuset(rte_get_main_lcore());
- ret = pthread_setaffinity_np(priv->timer_tid, sizeof(cpuset), &cpuset);
+ ret = rte_thread_set_affinity_by_id(priv->timer_tid, &cpuset);
if (ret) {
DRV_LOG(ERR, "Failed to set thread affinity.");
return -1;
}
snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
- ret = rte_thread_setname(priv->timer_tid, name);
+ ret = rte_thread_name_set(priv->timer_tid, name);
if (ret)
DRV_LOG(DEBUG, "Cannot set timer thread name.");
return 0;
@@ -519,13 +514,11 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
{
- void *status;
-
- if (priv->timer_tid) {
- pthread_cancel(priv->timer_tid);
- pthread_join(priv->timer_tid, &status);
+ if (priv->timer_tid.opaque_id) {
+ pthread_cancel(priv->timer_tid.opaque_id);
+ rte_thread_join(priv->timer_tid, NULL);
}
- priv->timer_tid = 0;
+ priv->timer_tid.opaque_id = 0;
}
void
@@ -16,6 +16,7 @@
#include <linux/if.h>
#include <linux/if_tun.h>
#include <fcntl.h>
+#include <pthread.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <signal.h>
@@ -6,6 +6,7 @@
#define _PTHREAD_SHIM_H_
#include <rte_lcore.h>
+#include <pthread.h>
/*
* This pthread shim is an example that demonstrates how legacy code