@@ -47,7 +47,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
char path[32];
#ifdef RTE_LIB_PDUMP
#ifdef RTE_NET_RING
- pthread_t thread;
+ rte_thread_t thread;
#endif
#endif
@@ -127,7 +127,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
#ifdef RTE_LIB_PDUMP
#ifdef RTE_NET_RING
if ((strcmp(env_value, "run_pdump_server_tests") == 0))
- pthread_create(&thread, NULL, &send_pkts, NULL);
+ rte_thread_create(&thread, NULL, &send_pkts, NULL);
#endif
#endif
@@ -137,7 +137,7 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
#ifdef RTE_NET_RING
if ((strcmp(env_value, "run_pdump_server_tests") == 0)) {
flag_for_send_pkts = 0;
- pthread_join(thread, NULL);
+ rte_thread_join(thread, NULL);
}
#endif
#endif
@@ -14,7 +14,7 @@
struct thread_context {
enum { INIT, ERROR, DONE } state;
bool lcore_id_any;
- pthread_t id;
+ rte_thread_t id;
unsigned int *registered_count;
};
@@ -77,7 +77,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = false;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
break;
non_eal_threads_count++;
}
@@ -96,7 +96,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = true;
- if (pthread_create(&t->id, NULL, thread_loop, t) == 0) {
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -110,7 +110,7 @@ test_non_eal_lcores(unsigned int eal_threads_count)
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
if (t->state != DONE)
ret = -1;
}
@@ -262,7 +262,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = false;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -285,7 +285,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
t->state = INIT;
t->registered_count = ®istered_count;
t->lcore_id_any = true;
- if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+ if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
@@ -309,7 +309,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
if (t->state != DONE)
ret = -1;
}
@@ -330,7 +330,7 @@ test_non_eal_lcores_callback(unsigned int eal_threads_count)
__atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
- pthread_join(t->id, NULL);
+ rte_thread_join(t->id, NULL);
}
error:
if (handle[1] != NULL)
@@ -203,7 +203,7 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
static int slaves_initialized;
static int mac_slaves_initialized;
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t mutex = RTE_THREAD_MUTEX_INITIALIZER;
static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER;
@@ -1191,11 +1191,11 @@ test_bonding_lsc_event_callback(uint16_t port_id __rte_unused,
void *param __rte_unused,
void *ret_param __rte_unused)
{
- pthread_mutex_lock(&mutex);
+ rte_thread_mutex_lock(&mutex);
test_lsc_interrupt_count++;
pthread_cond_signal(&cvar);
- pthread_mutex_unlock(&mutex);
+ rte_thread_mutex_unlock(&mutex);
return 0;
}
@@ -1220,11 +1220,11 @@ lsc_timeout(int wait_us)
ts.tv_sec += 1;
}
- pthread_mutex_lock(&mutex);
+ rte_thread_mutex_lock(&mutex);
if (test_lsc_interrupt_count < 1)
retval = pthread_cond_timedwait(&cvar, &mutex, &ts);
- pthread_mutex_unlock(&mutex);
+ rte_thread_mutex_unlock(&mutex);
if (retval == 0 && test_lsc_interrupt_count < 1)
return -1;
@@ -25,7 +25,7 @@ static volatile uint32_t thr_id;
static uint64_t gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use mutex to provide thread safety */
-static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t lpm_mutex = RTE_THREAD_MUTEX_INITIALIZER;
/* Report quiescent state interval every 1024 lookups. Larger critical
* sections in reader will result in writer polling multiple times.
@@ -443,7 +443,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
/* Add all the entries */
for (j = si; j < ei; j++) {
if (num_writers > 1)
- pthread_mutex_lock(&lpm_mutex);
+ rte_thread_mutex_lock(&lpm_mutex);
if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
large_ldepth_route_table[j].depth,
next_hop_add) != 0) {
@@ -452,13 +452,13 @@ test_lpm_rcu_qsbr_writer(void *arg)
goto error;
}
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
}
/* Delete all the entries */
for (j = si; j < ei; j++) {
if (num_writers > 1)
- pthread_mutex_lock(&lpm_mutex);
+ rte_thread_mutex_lock(&lpm_mutex);
if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
large_ldepth_route_table[j].depth) != 0) {
printf("Failed to delete iteration %d, route# %d\n",
@@ -466,7 +466,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
goto error;
}
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
}
}
@@ -478,7 +478,7 @@ test_lpm_rcu_qsbr_writer(void *arg)
error:
if (num_writers > 1)
- pthread_mutex_unlock(&lpm_mutex);
+ rte_thread_mutex_unlock(&lpm_mutex);
return -1;
}
@@ -255,6 +255,10 @@ else # for 32-bit we need smaller reserved memory areas
dpdk_conf.set('RTE_MAX_MEM_MB', 2048)
endif
+if is_windows
+ dpdk_conf.set('RTE_USE_WINDOWS_THREAD_TYPES', not get_option('use_external_thread_lib'))
+endif
+
compile_time_cpuflags = []
subdir(arch_subdir)
@@ -37,10 +37,10 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
struct dpaa_ioctl_irq_map irq_map;
/* Verify the thread's cpu-affinity */
- ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &cpuset);
+ ret = rte_thread_get_affinity_by_id(rte_thread_self(),
+ sizeof(cpu_set_t), &cpuset);
if (ret) {
- error(0, ret, "pthread_getaffinity_np()");
+ error(0, ret, "rte_thread_get_affinity_by_id()");
return ret;
}
pcfg.cpu = -1;
@@ -18,16 +18,16 @@ struct process_interrupt {
};
static COMPAT_LIST_HEAD(process_irq_list);
-static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t process_irq_lock = RTE_THREAD_MUTEX_INITIALIZER;
static void process_interrupt_install(struct process_interrupt *irq)
{
int ret;
/* Add the irq to the end of the list */
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_add_tail(&irq->node, &process_irq_list);
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
}
@@ -35,10 +35,10 @@ static void process_interrupt_remove(struct process_interrupt *irq)
{
int ret;
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_del(&irq->node);
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
}
@@ -47,14 +47,14 @@ static struct process_interrupt *process_interrupt_find(int irq_num)
int ret;
struct process_interrupt *i = NULL;
- ret = pthread_mutex_lock(&process_irq_lock);
+ ret = rte_thread_mutex_lock(&process_irq_lock);
assert(!ret);
list_for_each_entry(i, &process_irq_list, node) {
if (i->irq == irq_num)
goto done;
}
done:
- ret = pthread_mutex_unlock(&process_irq_lock);
+ ret = rte_thread_mutex_unlock(&process_irq_lock);
assert(!ret);
return i;
}
@@ -21,7 +21,7 @@
* what the lock is for.
*/
static int fd = -1;
-static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t fd_init_lock = RTE_THREAD_MUTEX_INITIALIZER;
static int check_fd(void)
{
@@ -29,12 +29,12 @@ static int check_fd(void)
if (fd >= 0)
return 0;
- ret = pthread_mutex_lock(&fd_init_lock);
+ ret = rte_thread_mutex_lock(&fd_init_lock);
assert(!ret);
/* check again with the lock held */
if (fd < 0)
fd = open(PROCESS_PATH, O_RDWR);
- ret = pthread_mutex_unlock(&fd_init_lock);
+ ret = rte_thread_mutex_unlock(&fd_init_lock);
assert(!ret);
return (fd >= 0) ? 0 : -ENODEV;
}
@@ -92,10 +92,10 @@ dpaa2_get_core_id(void)
rte_cpuset_t cpuset;
int i, ret, cpu_id = -1;
- ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &cpuset);
+ ret = rte_thread_get_affinity_by_id(rte_thread_self(),
+ sizeof(cpu_set_t), &cpuset);
if (ret) {
- DPAA2_BUS_ERR("pthread_getaffinity_np() failed");
+ DPAA2_BUS_ERR("rte_thread_get_affinity_by_id() failed");
return ret;
}
@@ -72,7 +72,7 @@ struct mlx5_compress_qp {
TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list =
TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
int mlx5_compress_logtype;
@@ -830,9 +830,9 @@ mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv,
}
priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
return 0;
}
@@ -852,13 +852,13 @@ mlx5_compress_pci_remove(struct rte_pci_device *pdev)
{
struct mlx5_compress_priv *priv = NULL;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
break;
if (priv)
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (priv) {
mlx5_mr_release_cache(&priv->mr_scache);
mlx5_compress_hw_global_release(priv);
@@ -208,7 +208,7 @@ static void *dlb_complete_queue_map_unmap(void *__args)
static inline void os_schedule_work(struct dlb_hw *hw)
{
struct dlb_dev *dlb_dev;
- pthread_t complete_queue_map_unmap_thread;
+ rte_thread_t complete_queue_map_unmap_thread;
int ret;
dlb_dev = container_of(hw, struct dlb_dev, hw);
@@ -192,7 +192,7 @@ static void *dlb2_complete_queue_map_unmap(void *__args)
static inline void os_schedule_work(struct dlb2_hw *hw)
{
struct dlb2_dev *dlb2_dev;
- pthread_t complete_queue_map_unmap_thread;
+ rte_thread_t complete_queue_map_unmap_thread;
int ret;
dlb2_dev = container_of(hw, struct dlb2_dev, hw);
@@ -167,7 +167,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
static inline int
@@ -613,7 +613,7 @@ find_internal_resource(struct pmd_internals *port_int)
if (port_int == NULL)
return NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
struct pmd_internals *list_int =
@@ -624,7 +624,7 @@ find_internal_resource(struct pmd_internals *port_int)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -662,7 +662,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
if (mb_pool == NULL)
return ret;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
internals = list->eth_dev->data->dev_private;
@@ -688,7 +688,7 @@ get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
}
out:
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
return ret;
}
@@ -717,9 +717,9 @@ eth_dev_configure(struct rte_eth_dev *dev)
return -1;
list->eth_dev = dev;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
}
return 0;
@@ -883,9 +883,9 @@ eth_dev_close(struct rte_eth_dev *dev)
/* Remove ethdev from list used to track and share UMEMs */
list = find_internal_resource(internals);
if (list) {
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
}
}
@@ -618,12 +618,12 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
ark_pktchkr_run(ark->pc);
if (ark->start_pg && (dev->data->port_id == 0)) {
- pthread_t thread;
+ rte_thread_t thread;
/* Delay packet generatpr start allow the hardware to be ready
* This is only used for sanity checking with internal generator
*/
- if (pthread_create(&thread, NULL, delay_pg_start, ark)) {
+ if (rte_thread_create(&thread, NULL, delay_pg_start, ark)) {
ARK_PMD_LOG(ERR, "Could not create pktgen "
"starter thread\n");
return -1;
@@ -405,7 +405,7 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
hw->aq_nic_cfg = &adapter->hw_cfg;
- pthread_mutex_init(&hw->mbox_mutex, NULL);
+ rte_thread_mutex_init(&hw->mbox_mutex);
/* disable interrupt */
atl_disable_intr(hw);
@@ -712,7 +712,7 @@ atl_dev_close(struct rte_eth_dev *dev)
rte_intr_callback_unregister(intr_handle,
atl_dev_interrupt_handler, dev);
- pthread_mutex_destroy(&hw->mbox_mutex);
+ rte_thread_mutex_destroy(&hw->mbox_mutex);
return ret;
}
@@ -10,7 +10,8 @@
#include <string.h>
#include <stdbool.h>
#include <netinet/in.h>
-#include <pthread.h>
+#include <rte_compat.h>
+#include <rte_thread.h>
#include <rte_common.h>
@@ -141,7 +142,7 @@ struct aq_hw_s {
u32 rpc_tid;
struct hw_aq_atl_utils_fw_rpc rpc;
- pthread_mutex_t mbox_mutex;
+ rte_thread_mutex_t mbox_mutex;
};
struct aq_fw_ops {
@@ -218,7 +218,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 mac_addr[2] = { 0 };
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -257,7 +257,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -269,7 +269,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@@ -286,7 +286,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
err = hw_atl_utils_update_stats(self);
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
@@ -299,7 +299,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE);
u32 temp_res;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE);
@@ -317,7 +317,7 @@ static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
sizeof(temp_res) / sizeof(u32));
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
if (err)
return err;
@@ -536,7 +536,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0)
return -EOPNOTSUPP;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
request.msg_id = 0;
request.device_id = dev_addr;
@@ -605,7 +605,7 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -626,7 +626,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
request.address = offset;
request.length = len;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -694,7 +694,7 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
}
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -712,7 +712,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
if ((self->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)
return -EOPNOTSUPP;
- pthread_mutex_lock(&self->mbox_mutex);
+ rte_thread_mutex_lock(&self->mbox_mutex);
/* Write macsec request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
@@ -742,7 +742,7 @@ static int aq_fw2x_send_macsec_request(struct aq_hw_s *self,
RTE_ALIGN(sizeof(*response) / sizeof(u32), sizeof(u32)));
exit:
- pthread_mutex_unlock(&self->mbox_mutex);
+ rte_thread_mutex_unlock(&self->mbox_mutex);
return err;
}
@@ -167,12 +167,12 @@ static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
- pthread_mutex_lock(&pdata->xpcs_mutex);
+ rte_thread_mutex_lock(&pdata->xpcs_mutex);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
- pthread_mutex_unlock(&pdata->xpcs_mutex);
+ rte_thread_mutex_unlock(&pdata->xpcs_mutex);
return mmd_data;
}
@@ -201,12 +201,12 @@ static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
index = mmd_address & ~pdata->xpcs_window_mask;
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
- pthread_mutex_lock(&pdata->xpcs_mutex);
+ rte_thread_mutex_lock(&pdata->xpcs_mutex);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
- pthread_mutex_unlock(&pdata->xpcs_mutex);
+ rte_thread_mutex_unlock(&pdata->xpcs_mutex);
}
static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
@@ -2311,10 +2311,10 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
- pthread_mutex_init(&pdata->xpcs_mutex, NULL);
- pthread_mutex_init(&pdata->i2c_mutex, NULL);
- pthread_mutex_init(&pdata->an_mutex, NULL);
- pthread_mutex_init(&pdata->phy_mutex, NULL);
+ rte_thread_mutex_init(&pdata->xpcs_mutex);
+ rte_thread_mutex_init(&pdata->i2c_mutex);
+ rte_thread_mutex_init(&pdata->an_mutex);
+ rte_thread_mutex_init(&pdata->phy_mutex);
ret = pdata->phy_if.phy_init(pdata);
if (ret) {
@@ -602,10 +602,10 @@ struct axgbe_port {
int phy_link;
int phy_speed;
- pthread_mutex_t xpcs_mutex;
- pthread_mutex_t i2c_mutex;
- pthread_mutex_t an_mutex;
- pthread_mutex_t phy_mutex;
+ rte_thread_mutex_t xpcs_mutex;
+ rte_thread_mutex_t i2c_mutex;
+ rte_thread_mutex_t an_mutex;
+ rte_thread_mutex_t phy_mutex;
/* Flow control settings */
unsigned int pause_autoneg;
@@ -229,7 +229,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
int ret;
uint64_t timeout;
- pthread_mutex_lock(&pdata->i2c_mutex);
+ rte_thread_mutex_lock(&pdata->i2c_mutex);
ret = axgbe_i2c_disable(pdata);
if (ret) {
PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
@@ -282,7 +282,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
}
unlock:
- pthread_mutex_unlock(&pdata->i2c_mutex);
+ rte_thread_mutex_unlock(&pdata->i2c_mutex);
return ret;
}
@@ -686,9 +686,9 @@ static void axgbe_an73_isr(struct axgbe_port *pdata)
if (pdata->an_int) {
/* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
- pthread_mutex_lock(&pdata->an_mutex);
+ rte_thread_mutex_lock(&pdata->an_mutex);
axgbe_an73_state_machine(pdata);
- pthread_mutex_unlock(&pdata->an_mutex);
+ rte_thread_mutex_unlock(&pdata->an_mutex);
} else {
/* Enable AN interrupts */
axgbe_an73_enable_interrupts(pdata);
@@ -977,7 +977,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
{
int ret;
- pthread_mutex_lock(&pdata->an_mutex);
+ rte_thread_mutex_lock(&pdata->an_mutex);
ret = __axgbe_phy_config_aneg(pdata);
if (ret)
@@ -985,7 +985,7 @@ static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
else
rte_bit_relaxed_clear32(AXGBE_LINK_ERR, &pdata->dev_state);
- pthread_mutex_unlock(&pdata->an_mutex);
+ rte_thread_mutex_unlock(&pdata->an_mutex);
return ret;
}
@@ -403,7 +403,7 @@ static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
phy_data->comm_owned = 0;
- pthread_mutex_unlock(&pdata->phy_mutex);
+ rte_thread_mutex_unlock(&pdata->phy_mutex);
}
static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
@@ -416,7 +416,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
* the driver needs to take the software mutex and then the hardware
* mutexes before being able to use the busses.
*/
- pthread_mutex_lock(&pdata->phy_mutex);
+ rte_thread_mutex_lock(&pdata->phy_mutex);
if (phy_data->comm_owned)
return 0;
@@ -447,7 +447,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
return 0;
}
- pthread_mutex_unlock(&pdata->phy_mutex);
+ rte_thread_mutex_unlock(&pdata->phy_mutex);
PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
@@ -523,8 +523,8 @@ struct bnxt_mark_info {
struct bnxt_rep_info {
struct rte_eth_dev *vfr_eth_dev;
- pthread_mutex_t vfr_lock;
- pthread_mutex_t vfr_start_lock;
+ rte_thread_mutex_t vfr_lock;
+ rte_thread_mutex_t vfr_start_lock;
bool conduit_valid;
};
@@ -677,7 +677,7 @@ struct bnxt {
#define BNXT_FW_CAP_ADV_FLOW_COUNTERS BIT(6)
#define BNXT_FW_CAP_LINK_ADMIN BIT(7)
- pthread_mutex_t flow_lock;
+ rte_thread_mutex_t flow_lock;
uint32_t vnic_cap_flags;
#define BNXT_VNIC_CAP_COS_CLASSIFY BIT(0)
@@ -731,18 +731,18 @@ struct bnxt {
rte_iova_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
/* synchronize between dev_configure_op and int handler */
- pthread_mutex_t def_cp_lock;
+ rte_thread_mutex_t def_cp_lock;
/* synchronize between dev_start_op and async evt handler
* Locking sequence in async evt handler will be
* def_cp_lock
* health_check_lock
*/
- pthread_mutex_t health_check_lock;
+ rte_thread_mutex_t health_check_lock;
/* synchronize between dev_stop/dev_close_op and
* error recovery thread triggered as part of
* HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
*/
- pthread_mutex_t err_recovery_lock;
+ rte_thread_mutex_t err_recovery_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
uint16_t hwrm_max_ext_req_len;
@@ -928,10 +928,10 @@ uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
extern const struct rte_flow_ops bnxt_flow_ops;
#define bnxt_acquire_flow_lock(bp) \
- pthread_mutex_lock(&(bp)->flow_lock)
+ rte_thread_mutex_lock(&(bp)->flow_lock)
#define bnxt_release_flow_lock(bp) \
- pthread_mutex_unlock(&(bp)->flow_lock)
+ rte_thread_mutex_unlock(&(bp)->flow_lock)
#define BNXT_VALID_VNIC_OR_RET(bp, vnic_id) do { \
if ((vnic_id) >= (bp)->max_vnics) { \
@@ -133,7 +133,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
return;
}
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
event_data = rte_le_to_cpu_32(async_cmp->event_data1);
/* timestamp_lo/hi values are in units of 100ms */
bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
@@ -153,7 +153,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
}
bp->flags |= BNXT_FLAG_FW_RESET;
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
(void *)bp);
break;
@@ -1043,7 +1043,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
* are calculated correctly.
*/
- pthread_mutex_lock(&bp->def_cp_lock);
+ rte_thread_mutex_lock(&bp->def_cp_lock);
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
bnxt_disable_int(bp);
@@ -1053,20 +1053,20 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return -ENOSPC;
}
if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
rc = bnxt_alloc_async_cp_ring(bp);
if (rc) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return rc;
}
bnxt_enable_int(bp);
}
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
}
/* Inherit new configurations */
@@ -1373,14 +1373,14 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
PMD_DRV_LOG(ERR,
"Adapter recovering from error..Please retry\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return bnxt_dev_stop(eth_dev);
}
@@ -1459,13 +1459,13 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
static void
bnxt_uninit_locks(struct bnxt *bp)
{
- pthread_mutex_destroy(&bp->flow_lock);
- pthread_mutex_destroy(&bp->def_cp_lock);
- pthread_mutex_destroy(&bp->health_check_lock);
- pthread_mutex_destroy(&bp->err_recovery_lock);
+ rte_thread_mutex_destroy(&bp->flow_lock);
+ rte_thread_mutex_destroy(&bp->def_cp_lock);
+ rte_thread_mutex_destroy(&bp->health_check_lock);
+ rte_thread_mutex_destroy(&bp->err_recovery_lock);
if (bp->rep_info) {
- pthread_mutex_destroy(&bp->rep_info->vfr_lock);
- pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
+ rte_thread_mutex_destroy(&bp->rep_info->vfr_lock);
+ rte_thread_mutex_destroy(&bp->rep_info->vfr_start_lock);
}
}
@@ -1498,14 +1498,14 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
PMD_DRV_LOG(ERR,
"Adapter recovering from error...Please retry\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
/* cancel the recovery handler before remove dev */
rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
@@ -3839,7 +3839,7 @@ static void bnxt_dev_recover(void *arg)
int timeout = bp->fw_reset_max_msecs;
int rc = 0;
- pthread_mutex_lock(&bp->err_recovery_lock);
+ rte_thread_mutex_lock(&bp->err_recovery_lock);
/* Clear Error flag so that device re-init should happen */
bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
@@ -3876,7 +3876,7 @@ static void bnxt_dev_recover(void *arg)
goto err_start;
PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
return;
err_start:
@@ -3884,7 +3884,7 @@ static void bnxt_dev_recover(void *arg)
err:
bp->flags |= BNXT_FLAG_FATAL_ERROR;
bnxt_uninit_resources(bp, false);
- pthread_mutex_unlock(&bp->err_recovery_lock);
+ rte_thread_mutex_unlock(&bp->err_recovery_lock);
PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
}
@@ -4035,7 +4035,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp)
{
uint32_t polling_freq;
- pthread_mutex_lock(&bp->health_check_lock);
+ rte_thread_mutex_lock(&bp->health_check_lock);
if (!bnxt_is_recovery_enabled(bp))
goto done;
@@ -4050,7 +4050,7 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp)
bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
done:
- pthread_mutex_unlock(&bp->health_check_lock);
+ rte_thread_mutex_unlock(&bp->health_check_lock);
}
static void bnxt_cancel_fw_health_check(struct bnxt *bp)
@@ -4832,25 +4832,25 @@ bnxt_init_locks(struct bnxt *bp)
{
int err;
- err = pthread_mutex_init(&bp->flow_lock, NULL);
+ err = rte_thread_mutex_init(&bp->flow_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->def_cp_lock, NULL);
+ err = rte_thread_mutex_init(&bp->def_cp_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->health_check_lock, NULL);
+ err = rte_thread_mutex_init(&bp->health_check_lock);
if (err) {
PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
return err;
}
- err = pthread_mutex_init(&bp->err_recovery_lock, NULL);
+ err = rte_thread_mutex_init(&bp->err_recovery_lock);
if (err)
PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n");
@@ -5617,14 +5617,14 @@ static int bnxt_init_rep_info(struct bnxt *bp)
for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->rep_info->vfr_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
bnxt_free_rep_info(bp);
return rc;
}
- rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->rep_info->vfr_start_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
bnxt_free_rep_info(bp);
@@ -30,15 +30,15 @@ void bnxt_int_handler(void *param)
return;
raw_cons = cpr->cp_raw_cons;
- pthread_mutex_lock(&bp->def_cp_lock);
+ rte_thread_mutex_lock(&bp->def_cp_lock);
while (1) {
if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return;
}
if (is_bnxt_in_error(bp)) {
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
return;
}
@@ -58,7 +58,7 @@ void bnxt_int_handler(void *param)
else
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
- pthread_mutex_unlock(&bp->def_cp_lock);
+ rte_thread_mutex_unlock(&bp->def_cp_lock);
}
int bnxt_free_int(struct bnxt *bp)
@@ -120,7 +120,7 @@ bnxt_rep_tx_burst(void *tx_queue,
qid = vfr_txq->txq->queue_id;
vf_rep_bp = vfr_txq->bp;
parent = vf_rep_bp->parent_dev->data->dev_private;
- pthread_mutex_lock(&parent->rep_info->vfr_lock);
+ rte_thread_mutex_lock(&parent->rep_info->vfr_lock);
ptxq = parent->tx_queues[qid];
ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action;
@@ -132,7 +132,7 @@ bnxt_rep_tx_burst(void *tx_queue,
rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
ptxq->vfr_tx_cfa_action = 0;
- pthread_mutex_unlock(&parent->rep_info->vfr_lock);
+ rte_thread_mutex_unlock(&parent->rep_info->vfr_lock);
return rc;
}
@@ -407,15 +407,15 @@ int bnxt_rep_dev_start_op(struct rte_eth_dev *eth_dev)
rep_info = &parent_bp->rep_info[rep_bp->vf_id];
BNXT_TF_DBG(DEBUG, "BNXT Port:%d VFR start\n", eth_dev->data->port_id);
- pthread_mutex_lock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_lock(&rep_info->vfr_start_lock);
if (!rep_info->conduit_valid) {
rc = bnxt_get_dflt_vnic_svif(parent_bp, rep_bp);
if (rc || !rep_info->conduit_valid) {
- pthread_mutex_unlock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
return rc;
}
}
- pthread_mutex_unlock(&rep_info->vfr_start_lock);
+ rte_thread_mutex_unlock(&rep_info->vfr_start_lock);
rc = bnxt_vfr_alloc(eth_dev);
if (rc) {
@@ -28,7 +28,7 @@ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
/* Mutex to synchronize bnxt_ulp_session_list operations. */
-static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t bnxt_ulp_global_mutex = RTE_THREAD_MUTEX_INITIALIZER;
/*
* Allow the deletion of context only for the bnxt device that
@@ -640,7 +640,7 @@ ulp_ctx_detach(struct bnxt *bp)
static void
ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
{
- pthread_mutex_lock(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_lock(&session->bnxt_ulp_mutex);
if (!session->bnxt_ulp_init) {
session->bnxt_ulp_init = true;
@@ -649,7 +649,7 @@ ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
*init = true;
}
- pthread_mutex_unlock(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_unlock(&session->bnxt_ulp_mutex);
}
/*
@@ -690,7 +690,7 @@ ulp_session_init(struct bnxt *bp,
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
session = ulp_get_session(pci_addr);
if (!session) {
@@ -701,17 +701,17 @@ ulp_session_init(struct bnxt *bp,
if (!session) {
BNXT_TF_DBG(ERR,
"Allocation failed for bnxt_ulp_session\n");
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return NULL;
} else {
/* Add it to the queue */
session->pci_info.domain = pci_addr->domain;
session->pci_info.bus = pci_addr->bus;
- rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
+ rc = rte_thread_mutex_init(&session->bnxt_ulp_mutex);
if (rc) {
BNXT_TF_DBG(ERR, "mutex create failed\n");
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return NULL;
}
STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
@@ -719,7 +719,7 @@ ulp_session_init(struct bnxt *bp,
}
}
ulp_context_initialized(session, init);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
return session;
}
@@ -734,12 +734,12 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session)
return;
if (!session->cfg_data) {
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
STAILQ_REMOVE(&bnxt_ulp_session_list, session,
bnxt_ulp_session_state, next);
- pthread_mutex_destroy(&session->bnxt_ulp_mutex);
+ rte_thread_mutex_destroy(&session->bnxt_ulp_mutex);
rte_free(session);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
}
}
@@ -892,7 +892,7 @@ bnxt_ulp_deinit(struct bnxt *bp,
BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
/* free the flow db lock */
- pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
+ rte_thread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
/* Delete the ulp context and tf session and free the ulp context */
ulp_ctx_deinit(bp, session);
@@ -917,7 +917,7 @@ bnxt_ulp_init(struct bnxt *bp,
goto jump_to_error;
}
- rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
+ rc = rte_thread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
goto jump_to_error;
@@ -1117,9 +1117,9 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
/* Get the session details */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
- pthread_mutex_lock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_lock(&bnxt_ulp_global_mutex);
session = ulp_get_session(pci_addr);
- pthread_mutex_unlock(&bnxt_ulp_global_mutex);
+ rte_thread_mutex_unlock(&bnxt_ulp_global_mutex);
/* session not found then just exit */
if (!session) {
@@ -1451,7 +1451,7 @@ bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
if (!ulp_ctx || !ulp_ctx->cfg_data)
return -1;
- if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
+ if (rte_thread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
return -1;
}
@@ -1465,5 +1465,5 @@ bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
if (!ulp_ctx || !ulp_ctx->cfg_data)
return;
- pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
+ rte_thread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
}
@@ -49,7 +49,7 @@ struct bnxt_ulp_data {
uint32_t dev_id; /* Hardware device id */
uint32_t ref_cnt;
struct bnxt_ulp_flow_db *flow_db;
- pthread_mutex_t flow_db_lock;
+ rte_thread_mutex_t flow_db_lock;
void *mapper_data;
struct bnxt_ulp_port_db *port_db;
struct bnxt_ulp_fc_info *fc_info;
@@ -75,7 +75,7 @@ struct bnxt_ulp_pci_info {
struct bnxt_ulp_session_state {
STAILQ_ENTRY(bnxt_ulp_session_state) next;
bool bnxt_ulp_init;
- pthread_mutex_t bnxt_ulp_mutex;
+ rte_thread_mutex_t bnxt_ulp_mutex;
struct bnxt_ulp_pci_info pci_info;
struct bnxt_ulp_data *cfg_data;
struct tf *g_tfp;
@@ -84,7 +84,7 @@ ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
if (!ulp_fc_info)
goto error;
- rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
+ rc = rte_thread_mutex_init(&ulp_fc_info->fc_lock);
if (rc) {
PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
goto error;
@@ -141,7 +141,7 @@ ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
ulp_fc_mgr_thread_cancel(ctxt);
- pthread_mutex_destroy(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_destroy(&ulp_fc_info->fc_lock);
for (i = 0; i < TF_DIR_MAX; i++)
rte_free(ulp_fc_info->sw_acc_tbl[i]);
@@ -383,7 +383,7 @@ ulp_fc_mgr_alarm_cb(void *arg)
goto out;
if (!ulp_fc_info->num_entries) {
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
ulp_fc_mgr_thread_cancel(ctxt);
return;
}
@@ -414,7 +414,7 @@ ulp_fc_mgr_alarm_cb(void *arg)
}
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
/*
* If cmd fails once, no need of
@@ -503,12 +503,12 @@ int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
if (!ulp_fc_info)
return -EIO;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
ulp_fc_info->num_entries++;
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return 0;
}
@@ -535,14 +535,14 @@ int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
if (!ulp_fc_info)
return -EIO;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
ulp_fc_info->num_entries--;
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return 0;
}
@@ -607,7 +607,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
hw_cntr_id = params.resource_hndl;
if (params.resource_sub_type ==
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id -
ulp_fc_info->shadow_hw_tbl[dir].start_idx;
sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
@@ -621,7 +621,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
sw_acc_tbl_entry->pkt_count = 0;
sw_acc_tbl_entry->byte_count = 0;
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
} else if (params.resource_sub_type ==
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC) {
/* Get stats from the parent child table */
@@ -663,7 +663,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
if (!ulp_fc_info)
return -EIO;
- pthread_mutex_lock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_lock(&ulp_fc_info->fc_lock);
sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
@@ -672,7 +672,7 @@ int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
hw_cntr_id, fid);
rc = -ENOENT;
}
- pthread_mutex_unlock(&ulp_fc_info->fc_lock);
+ rte_thread_mutex_unlock(&ulp_fc_info->fc_lock);
return rc;
}
@@ -47,7 +47,7 @@ struct bnxt_ulp_fc_info {
struct hw_fc_mem_info shadow_hw_tbl[TF_DIR_MAX];
uint32_t flags;
uint32_t num_entries;
- pthread_mutex_t fc_lock;
+ rte_thread_mutex_t fc_lock;
};
int32_t
@@ -129,14 +129,14 @@ do { \
#define q_waitqueue_t \
struct { \
pthread_cond_t cond; \
- pthread_mutex_t mutex; \
+ rte_thread_mutex_t mutex; \
}
#define ena_wait_queue_t q_waitqueue_t
#define ENA_WAIT_EVENT_INIT(waitqueue) \
do { \
- pthread_mutex_init(&(waitqueue).mutex, NULL); \
+ rte_thread_mutex_init(&(waitqueue).mutex); \
pthread_cond_init(&(waitqueue).cond, NULL); \
} while (0)
@@ -149,10 +149,10 @@ do { \
wait.tv_sec = now.tv_sec + timeout / 1000000UL; \
timeout_us = timeout % 1000000UL; \
wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
- pthread_mutex_lock(&waitevent.mutex); \
+ rte_thread_mutex_lock(&waitevent.mutex); \
pthread_cond_timedwait(&waitevent.cond, \
&waitevent.mutex, &wait); \
- pthread_mutex_unlock(&waitevent.mutex); \
+ rte_thread_mutex_unlock(&waitevent.mutex); \
} while (0)
#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
/* pthread condition doesn't need to be rearmed after usage */
@@ -107,7 +107,7 @@ struct enic {
int iommu_groupid;
int eventfd;
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
- pthread_t err_intr_thread;
+ rte_thread_t err_intr_thread;
int promisc;
int allmulti;
uint8_t ig_vlan_strip_en;
@@ -433,7 +433,7 @@ struct hns3_hw {
struct hns3_cmq cmq;
struct hns3_mbx_resp_status mbx_resp; /* mailbox response */
struct hns3_mbx_arq_ring arq; /* mailbox async rx queue */
- pthread_t irq_thread_id;
+ rte_thread_t irq_thread_id;
struct hns3_mac mac;
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
@@ -1107,7 +1107,7 @@ hns3vf_interrupt_handler(void *param)
uint32_t clearval;
if (hw->irq_thread_id == 0)
- hw->irq_thread_id = pthread_self();
+ hw->irq_thread_id = rte_thread_self();
/* Disable interrupt */
hns3vf_disable_irq0(hw);
@@ -101,7 +101,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
* Sending mbox in the interrupt thread cannot wait for the
* response, so polling the mbox response on the irq thread.
*/
- if (pthread_equal(hw->irq_thread_id, pthread_self())) {
+ if (rte_thread_equal(hw->irq_thread_id, rte_thread_self())) {
in_irq = true;
hns3_poll_all_sync_msg();
} else {
@@ -152,7 +152,7 @@ static void
start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
{
struct ice_dcf_reset_event_param *param;
- pthread_t thread;
+ rte_thread_t thread;
int ret;
param = malloc(sizeof(*param));
@@ -165,7 +165,7 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
param->vfr = vfr;
param->vf_id = vf_id;
- ret = pthread_create(&thread, NULL,
+ ret = rte_thread_create(&thread, NULL,
ice_dcf_vsi_update_service_handler, param);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
@@ -27,7 +27,7 @@
#include "ipn3ke_ethdev.h"
static int ipn3ke_rpst_scan_num;
-static pthread_t ipn3ke_rpst_scan_thread;
+static rte_thread_t ipn3ke_rpst_scan_thread;
/** Double linked list of representor port. */
TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
@@ -2614,11 +2614,11 @@ ipn3ke_rpst_scan_check(void)
return -1;
}
} else if (ipn3ke_rpst_scan_num == 0) {
- ret = pthread_cancel(ipn3ke_rpst_scan_thread);
+ ret = rte_thread_cancel(ipn3ke_rpst_scan_thread);
if (ret)
IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
- ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
+ ret = rte_thread_join(ipn3ke_rpst_scan_thread, NULL);
if (ret)
IPN3KE_AFU_PMD_ERR("Can't join the thread");
@@ -4141,7 +4141,7 @@ ixgbe_dev_setup_link_thread_handler(void *param)
u32 speed;
bool autoneg = false;
- pthread_detach(pthread_self());
+ pthread_detach(rte_thread_self());
speed = hw->phy.autoneg_advertised;
if (!speed)
ixgbe_get_link_capabilities(hw, &speed, &autoneg);
@@ -516,7 +516,7 @@ struct ixgbe_adapter {
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
rte_atomic32_t link_thread_running;
- pthread_t link_thread_tid;
+ rte_thread_t link_thread_tid;
};
struct ixgbe_vf_representor {
@@ -50,7 +50,7 @@ struct pmd_internals {
uint16_t port_id;
int is_kni_started;
- pthread_t thread;
+ rte_thread_t thread;
int stop_thread;
int no_request_thread;
@@ -186,11 +186,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev)
if (internals->no_request_thread == 0 && internals->stop_thread == 0) {
internals->stop_thread = 1;
- ret = pthread_cancel(internals->thread);
+ ret = rte_thread_cancel(internals->thread);
if (ret)
PMD_LOG(ERR, "Can't cancel the thread");
- ret = pthread_join(internals->thread, NULL);
+ ret = rte_thread_join(internals->thread, NULL);
if (ret)
PMD_LOG(ERR, "Can't join the thread");
}
@@ -2335,7 +2335,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
int err = 0;
sh->numa_node = spawn->pci_dev->device.numa_node;
- pthread_mutex_init(&sh->txpp.mutex, NULL);
+ rte_thread_mutex_init(&sh->txpp.mutex);
/*
* Configure environment variable "MLX5_BF_SHUT_UP"
* before the device creation. The rdma_core library
@@ -181,7 +181,7 @@ int mlx5_logtype;
static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_dev_ctx_list_mutex;
+static rte_thread_mutex_t mlx5_dev_ctx_list_mutex;
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
[MLX5_IPOOL_DECAP_ENCAP] = {
@@ -884,7 +884,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
if (!strcmp(sh->ibdev_name,
@@ -1010,11 +1010,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
exit:
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
- pthread_mutex_destroy(&sh->txpp.mutex);
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_destroy(&sh->txpp.mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
if (sh->cnt_id_tbl)
mlx5_l3t_destroy(sh->cnt_id_tbl);
@@ -1046,7 +1046,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
void
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
- pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
@@ -1077,7 +1077,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
/* Release flow workspaces objects on the last device. */
if (LIST_EMPTY(&mlx5_dev_ctx_list))
mlx5_flow_os_release_workspace();
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
/*
* Ensure there is no async event handler installed.
* Only primary process handles async device events.
@@ -1108,11 +1108,11 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
if (sh->ctx)
claim_zero(mlx5_glue->close_device(sh->ctx));
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
- pthread_mutex_destroy(&sh->txpp.mutex);
+ rte_thread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
return;
exit:
- pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+ rte_thread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
@@ -2234,7 +2234,7 @@ RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE)
*/
RTE_INIT(rte_mlx5_pmd_init)
{
- pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
+ rte_thread_mutex_init(&mlx5_dev_ctx_list_mutex);
mlx5_common_init();
/* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
@@ -632,7 +632,7 @@ struct mlx5_txpp_ts {
/* Tx packet pacing structure. */
struct mlx5_dev_txpp {
- pthread_mutex_t mutex; /* Pacing create/destroy mutex. */
+ rte_thread_mutex_t mutex; /* Pacing create/destroy mutex. */
uint32_t refcnt; /* Pacing reference counter. */
uint32_t freq; /* Timestamp frequency, Hz. */
uint32_t tick; /* Completion tick duration in nanoseconds. */
@@ -902,7 +902,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev)
if (ret < 0)
return 0;
}
- ret = pthread_mutex_lock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_lock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
if (sh->txpp.refcnt) {
@@ -918,7 +918,7 @@ mlx5_txpp_start(struct rte_eth_dev *dev)
rte_errno = -err;
}
}
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
return err;
@@ -945,7 +945,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev)
return;
}
priv->txpp_en = 0;
- ret = pthread_mutex_lock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_lock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
MLX5_ASSERT(sh->txpp.refcnt);
@@ -953,7 +953,7 @@ mlx5_txpp_stop(struct rte_eth_dev *dev)
return;
/* No references any more, do actual destroy. */
mlx5_txpp_destroy(sh);
- ret = pthread_mutex_unlock(&sh->txpp.mutex);
+ ret = rte_thread_mutex_unlock(&sh->txpp.mutex);
MLX5_ASSERT(!ret);
RTE_SET_USED(ret);
}
@@ -253,7 +253,7 @@ struct mlx5_workspace_thread {
static struct mlx5_workspace_thread *curr;
static struct mlx5_workspace_thread *first;
rte_tls_key ws_tls_index;
-static pthread_mutex_t lock_thread_list;
+static rte_thread_mutex_t lock_thread_list;
static bool
mlx5_is_thread_alive(HANDLE thread_handle)
@@ -330,7 +330,7 @@ mlx5_flow_os_release_workspace(void)
free(first);
}
rte_thread_tls_key_delete(ws_tls_index);
- pthread_mutex_destroy(&lock_thread_list);
+ rte_thread_mutex_destroy(&lock_thread_list);
}
static int
@@ -352,7 +352,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
}
temp->mlx5_ws = data;
temp->thread_handle = curr_thread;
- pthread_mutex_lock(&lock_thread_list);
+ rte_thread_mutex_lock(&lock_thread_list);
mlx5_clear_thread_list();
if (!first) {
first = temp;
@@ -361,7 +361,7 @@ mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
curr->next = temp;
curr = curr->next;
}
- pthread_mutex_unlock(&lock_thread_list);
+ rte_thread_mutex_unlock(&lock_thread_list);
return 0;
}
@@ -374,7 +374,7 @@ mlx5_flow_os_init_workspace_once(void)
DRV_LOG(ERR, "Can't create flow workspace data thread key.");
return err;
}
- pthread_mutex_init(&lock_thread_list, NULL);
+ rte_thread_mutex_init(&lock_thread_list);
return 0;
}
@@ -226,7 +226,7 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
int err = 0;
struct mlx5_context *mlx5_ctx;
- pthread_mutex_init(&sh->txpp.mutex, NULL);
+ rte_thread_mutex_init(&sh->txpp.mutex);
/* Set numa node from pci probe */
sh->numa_node = spawn->pci_dev->device.numa_node;
@@ -153,10 +153,10 @@ void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
/* Mutexes */
-typedef pthread_mutex_t osal_mutex_t;
-#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
-#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
-#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+typedef rte_thread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) rte_thread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) rte_thread_mutex_init(lock)
+#define OSAL_MUTEX_ACQUIRE(lock) rte_thread_mutex_lock(lock)
#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
#define OSAL_MUTEX_DEALLOC(lock) nothing
@@ -121,7 +121,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
@@ -507,7 +507,7 @@ find_internal_resource(char *ifname)
if (ifname == NULL)
return NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
internal = list->eth_dev->data->dev_private;
@@ -517,7 +517,7 @@ find_internal_resource(char *ifname)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -1001,9 +1001,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev)
goto free_list;
list->eth_dev = eth_dev;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
@@ -1035,9 +1035,9 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev)
rte_vhost_driver_unregister(internal->iface_name);
list_remove:
vring_states[eth_dev->data->port_id] = NULL;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(vring_state);
free_list:
rte_free(list);
@@ -1093,7 +1093,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
if (!rte_eth_dev_is_valid_port(port_id))
return -1;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
eth_dev = list->eth_dev;
@@ -1106,7 +1106,7 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
return vid;
}
@@ -1184,9 +1184,9 @@ eth_dev_close(struct rte_eth_dev *dev)
list = find_internal_resource(internal->iface_name);
if (list) {
rte_vhost_driver_unregister(internal->iface_name);
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
}
@@ -142,7 +142,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
uint64_t features;
int ret = -1;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
@@ -160,7 +160,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
goto error;
PMD_DRV_LOG(INFO, "(%s) set features: 0x%" PRIx64, dev->path, features);
error:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -184,7 +184,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
* memory subsystem in the future.
*/
rte_mcfg_mem_read_lock();
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
/* Step 2: share memory regions */
ret = dev->ops->set_memory_table(dev);
@@ -205,12 +205,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
dev->started = true;
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
rte_mcfg_mem_read_unlock();
return 0;
error:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
rte_mcfg_mem_read_unlock();
PMD_INIT_LOG(ERR, "(%s) Failed to start device\n", dev->path);
@@ -225,7 +225,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uint32_t i;
int ret;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
if (!dev->started)
goto out;
@@ -248,11 +248,11 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
dev->started = false;
out:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return 0;
err:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
PMD_INIT_LOG(ERR, "(%s) Failed to stop device\n", dev->path);
@@ -379,7 +379,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
if (msl->external)
return;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
if (dev->started == false)
goto exit;
@@ -404,7 +404,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
}
exit:
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
if (ret < 0)
PMD_DRV_LOG(ERR, "(%s) Failed to update memory table\n", dev->path);
@@ -490,7 +490,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
uint64_t backend_features;
int i;
- pthread_mutex_init(&dev->mutex, NULL);
+ rte_thread_mutex_init(&dev->mutex);
strlcpy(dev->path, path, PATH_MAX);
for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
@@ -795,13 +795,13 @@ virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
{
int ret;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
dev->status = status;
ret = dev->ops->set_status(dev, status);
if (ret && ret != -ENOTSUP)
PMD_INIT_LOG(ERR, "(%s) Failed to set backend status\n", dev->path);
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -811,7 +811,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
int ret;
uint8_t status;
- pthread_mutex_lock(&dev->mutex);
+ rte_thread_mutex_lock(&dev->mutex);
ret = dev->ops->get_status(dev, &status);
if (!ret) {
@@ -836,7 +836,7 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
PMD_INIT_LOG(ERR, "(%s) Failed to get backend status\n", dev->path);
}
- pthread_mutex_unlock(&dev->mutex);
+ rte_thread_mutex_unlock(&dev->mutex);
return ret;
}
@@ -56,7 +56,7 @@ struct virtio_user_dev {
bool qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
struct virtio_user_backend_ops *ops;
- pthread_mutex_t mutex;
+ rte_thread_mutex_t mutex;
bool started;
void *backend_data;
@@ -69,7 +69,7 @@ static const struct rte_pci_id pci_ifpga_map[] = {
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
static int ifpga_monitor_start;
-static pthread_t ifpga_monitor_start_thread;
+static rte_thread_t ifpga_monitor_start_thread;
#define IFPGA_MAX_IRQ 12
/* 0 for FME interrupt, others are reserved for AFU irq */
@@ -526,7 +526,7 @@ ifpga_monitor_start_func(void)
int ret;
if (ifpga_monitor_start == 0) {
- ret = pthread_create(&ifpga_monitor_start_thread,
+ ret = rte_thread_create(&ifpga_monitor_start_thread,
NULL,
ifpga_rawdev_gsd_handle, NULL);
if (ret) {
@@ -545,11 +545,11 @@ ifpga_monitor_stop_func(void)
int ret;
if (ifpga_monitor_start == 1) {
- ret = pthread_cancel(ifpga_monitor_start_thread);
+ ret = rte_thread_cancel(ifpga_monitor_start_thread);
if (ret)
IFPGA_RAWDEV_PMD_ERR("Can't cancel the thread");
- ret = pthread_join(ifpga_monitor_start_thread, NULL);
+ ret = rte_thread_join(ifpga_monitor_start_thread, NULL);
if (ret)
IFPGA_RAWDEV_PMD_ERR("Can't join the thread");
@@ -53,7 +53,7 @@ struct ifcvf_internal {
int vfio_container_fd;
int vfio_group_fd;
int vfio_dev_fd;
- pthread_t tid; /* thread for notify relay */
+ rte_thread_t tid; /* thread for notify relay */
int epfd;
int vid;
struct rte_vdpa_device *vdev;
@@ -80,7 +80,7 @@ TAILQ_HEAD(internal_list_head, internal_list);
static struct internal_list_head internal_list =
TAILQ_HEAD_INITIALIZER(internal_list);
-static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t internal_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
@@ -90,7 +90,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
int found = 0;
struct internal_list *list;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
if (vdev == list->internal->vdev) {
@@ -99,7 +99,7 @@ find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -113,7 +113,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev)
int found = 0;
struct internal_list *list;
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_FOREACH(list, &internal_list, next) {
if (!rte_pci_addr_cmp(&pdev->addr,
@@ -123,7 +123,7 @@ find_internal_resource_by_dev(struct rte_pci_device *pdev)
}
}
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
if (!found)
return NULL;
@@ -499,7 +499,7 @@ setup_notify_relay(struct ifcvf_internal *internal)
{
int ret;
- ret = pthread_create(&internal->tid, NULL, notify_relay,
+ ret = rte_thread_create(&internal->tid, NULL, notify_relay,
(void *)internal);
if (ret) {
DRV_LOG(ERR, "failed to create notify relay pthread.");
@@ -511,11 +511,9 @@ setup_notify_relay(struct ifcvf_internal *internal)
static int
unset_notify_relay(struct ifcvf_internal *internal)
{
- void *status;
-
if (internal->tid) {
- pthread_cancel(internal->tid);
- pthread_join(internal->tid, &status);
+ rte_thread_cancel(internal->tid);
+ rte_thread_join(internal->tid, NULL);
}
internal->tid = 0;
@@ -802,7 +800,7 @@ setup_vring_relay(struct ifcvf_internal *internal)
{
int ret;
- ret = pthread_create(&internal->tid, NULL, vring_relay,
+ ret = rte_thread_create(&internal->tid, NULL, vring_relay,
(void *)internal);
if (ret) {
DRV_LOG(ERR, "failed to create ring relay pthread.");
@@ -814,11 +812,9 @@ setup_vring_relay(struct ifcvf_internal *internal)
static int
unset_vring_relay(struct ifcvf_internal *internal)
{
- void *status;
-
if (internal->tid) {
- pthread_cancel(internal->tid);
- pthread_join(internal->tid, &status);
+ rte_thread_cancel(internal->tid);
+ rte_thread_join(internal->tid, NULL);
}
internal->tid = 0;
@@ -1248,9 +1244,9 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
goto error;
}
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_INSERT_TAIL(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_atomic32_set(&internal->started, 1);
update_datapath(internal);
@@ -1288,9 +1284,9 @@ ifcvf_pci_remove(struct rte_pci_device *pci_dev)
rte_vfio_container_destroy(internal->vfio_container_fd);
rte_vdpa_unregister_device(internal->vdev);
- pthread_mutex_lock(&internal_list_lock);
+ rte_thread_mutex_lock(&internal_list_lock);
TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
+ rte_thread_mutex_unlock(&internal_list_lock);
rte_free(list);
rte_free(internal);
@@ -48,7 +48,7 @@
TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
TAILQ_HEAD_INITIALIZER(priv_list);
-static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t priv_list_lock = RTE_THREAD_MUTEX_INITIALIZER;
static struct mlx5_vdpa_priv *
mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
@@ -56,14 +56,14 @@ mlx5_vdpa_find_priv_resource_by_vdev(struct rte_vdpa_device *vdev)
struct mlx5_vdpa_priv *priv;
int found = 0;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &priv_list, next) {
if (vdev == priv->vdev) {
found = 1;
break;
}
}
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (!found) {
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
rte_errno = EINVAL;
@@ -143,9 +143,9 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
DRV_LOG(ERR, "Too big vring id: %d.", vring);
return -E2BIG;
}
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
ret = mlx5_vdpa_virtq_enable(priv, vring, state);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
return ret;
}
@@ -296,7 +296,7 @@ mlx5_vdpa_dev_close(int vid)
priv->configured = 0;
priv->vid = 0;
/* The mutex may stay locked after event thread cancel - initiate it. */
- pthread_mutex_init(&priv->vq_config_lock, NULL);
+ rte_thread_mutex_init(&priv->vq_config_lock);
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
}
@@ -763,10 +763,10 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
mlx5_vdpa_config_get(pci_dev->device.devargs, priv);
SLIST_INIT(&priv->mr_list);
- pthread_mutex_init(&priv->vq_config_lock, NULL);
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_init(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
return 0;
error:
@@ -797,7 +797,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
struct mlx5_vdpa_priv *priv = NULL;
int found = 0;
- pthread_mutex_lock(&priv_list_lock);
+ rte_thread_mutex_lock(&priv_list_lock);
TAILQ_FOREACH(priv, &priv_list, next) {
if (!rte_pci_addr_cmp(&priv->pci_dev->addr, &pci_dev->addr)) {
found = 1;
@@ -806,7 +806,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
}
if (found)
TAILQ_REMOVE(&priv_list, priv, next);
- pthread_mutex_unlock(&priv_list_lock);
+ rte_thread_mutex_unlock(&priv_list_lock);
if (found) {
if (priv->configured)
mlx5_vdpa_dev_close(priv->vid);
@@ -815,7 +815,7 @@ mlx5_vdpa_pci_remove(struct rte_pci_device *pci_dev)
priv->var = NULL;
}
mlx5_glue->close_device(priv->ctx);
- pthread_mutex_destroy(&priv->vq_config_lock);
+ rte_thread_mutex_destroy(&priv->vq_config_lock);
rte_free(priv);
}
return 0;
@@ -119,10 +119,10 @@ enum {
struct mlx5_vdpa_priv {
TAILQ_ENTRY(mlx5_vdpa_priv) next;
uint8_t configured;
- pthread_mutex_t vq_config_lock;
+ rte_thread_mutex_t vq_config_lock;
uint64_t last_traffic_tic;
- pthread_t timer_tid;
- pthread_mutex_t timer_lock;
+ rte_thread_t timer_tid;
+ rte_thread_mutex_t timer_lock;
pthread_cond_t timer_cond;
volatile uint8_t timer_on;
int event_mode;
@@ -246,17 +246,17 @@ mlx5_vdpa_poll_handle(void *arg)
uint32_t max;
uint64_t current_tic;
- pthread_mutex_lock(&priv->timer_lock);
+ rte_thread_mutex_lock(&priv->timer_lock);
while (!priv->timer_on)
pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
- pthread_mutex_unlock(&priv->timer_lock);
+ rte_thread_mutex_unlock(&priv->timer_lock);
priv->timer_delay_us = priv->event_mode ==
MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
priv->event_us;
while (1) {
max = 0;
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
for (i = 0; i < priv->nr_virtqs; i++) {
cq = &priv->virtqs[i].eqp.cq;
if (cq->cq_obj.cq && !cq->armed) {
@@ -280,13 +280,13 @@ mlx5_vdpa_poll_handle(void *arg)
DRV_LOG(DEBUG, "Device %s traffic was stopped.",
priv->vdev->device->name);
mlx5_vdpa_arm_all_cqs(priv);
- pthread_mutex_unlock(&priv->vq_config_lock);
- pthread_mutex_lock(&priv->timer_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->timer_lock);
priv->timer_on = 0;
while (!priv->timer_on)
pthread_cond_wait(&priv->timer_cond,
&priv->timer_lock);
- pthread_mutex_unlock(&priv->timer_lock);
+ rte_thread_mutex_unlock(&priv->timer_lock);
priv->timer_delay_us = priv->event_mode ==
MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
@@ -296,7 +296,7 @@ mlx5_vdpa_poll_handle(void *arg)
} else {
priv->last_traffic_tic = current_tic;
}
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
mlx5_vdpa_timer_sleep(priv, max);
}
return NULL;
@@ -312,7 +312,7 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
} out;
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
sizeof(out.buf)) >=
(ssize_t)sizeof(out.event_resp.cookie)) {
@@ -331,7 +331,7 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
eventfd_write(cq->callfd, (eventfd_t)1);
if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
mlx5_vdpa_cq_arm(priv, cq);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
return;
}
/* Don't arm again - timer will take control. */
@@ -346,13 +346,13 @@ mlx5_vdpa_interrupt_handler(void *cb_arg)
/* Traffic detected: make sure timer is on. */
priv->last_traffic_tic = rte_rdtsc();
- pthread_mutex_lock(&priv->timer_lock);
+ rte_thread_mutex_lock(&priv->timer_lock);
if (!priv->timer_on) {
priv->timer_on = 1;
pthread_cond_signal(&priv->timer_cond);
}
- pthread_mutex_unlock(&priv->timer_lock);
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->timer_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
}
static void
@@ -368,7 +368,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
struct mlx5_vdpa_virtq *virtq;
uint64_t sec;
- pthread_mutex_lock(&priv->vq_config_lock);
+ rte_thread_mutex_lock(&priv->vq_config_lock);
while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
sizeof(out.buf)) >=
(ssize_t)sizeof(out.event_resp.cookie)) {
@@ -414,7 +414,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
virtq->err_time[i - 1] = virtq->err_time[i];
virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
}
- pthread_mutex_unlock(&priv->vq_config_lock);
+ rte_thread_mutex_unlock(&priv->vq_config_lock);
#endif
}
@@ -501,42 +501,34 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
{
int ret;
rte_cpuset_t cpuset;
- pthread_attr_t attr;
+ rte_thread_attr_t attr;
char name[16];
- const struct sched_param sp = {
- .sched_priority = sched_get_priority_max(SCHED_RR),
- };
if (!priv->eventc)
/* All virtqs are in poll mode. */
return 0;
if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
- pthread_mutex_init(&priv->timer_lock, NULL);
+ rte_thread_mutex_init(&priv->timer_lock);
pthread_cond_init(&priv->timer_cond, NULL);
priv->timer_on = 0;
- pthread_attr_init(&attr);
+ rte_thread_attr_init(&attr);
CPU_ZERO(&cpuset);
if (priv->event_core != -1)
CPU_SET(priv->event_core, &cpuset);
else
cpuset = rte_lcore_cpuset(rte_get_main_lcore());
- ret = pthread_attr_setaffinity_np(&attr, sizeof(cpuset),
- &cpuset);
+ ret = rte_thread_attr_set_affinity(&attr, &cpuset);
if (ret) {
DRV_LOG(ERR, "Failed to set thread affinity.");
return -1;
}
- ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
- if (ret) {
- DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
- return -1;
- }
- ret = pthread_attr_setschedparam(&attr, &sp);
+ ret = rte_thread_attr_set_priority(&attr,
+ RTE_THREAD_PRIORITY_REALTIME_CRITICAL);
if (ret) {
DRV_LOG(ERR, "Failed to set thread priority.");
return -1;
}
- ret = pthread_create(&priv->timer_tid, &attr,
+ ret = rte_thread_create(&priv->timer_tid, &attr,
mlx5_vdpa_poll_handle, (void *)priv);
if (ret) {
DRV_LOG(ERR, "Failed to create timer thread.");
@@ -568,7 +560,6 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
{
int retries = MLX5_VDPA_INTR_RETRIES;
int ret = -EAGAIN;
- void *status;
if (priv->intr_handle.fd) {
while (retries-- && ret == -EAGAIN) {
@@ -585,8 +576,8 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
}
if (priv->timer_tid) {
- pthread_cancel(priv->timer_tid);
- pthread_join(priv->timer_tid, &status);
+ rte_thread_cancel(priv->timer_tid);
+ rte_thread_join(priv->timer_tid, NULL);
}
priv->timer_tid = 0;
}
@@ -1042,8 +1042,8 @@ main(int argc, char** argv)
int ret;
uint16_t nb_sys_ports, port;
unsigned i;
- void *retval;
- pthread_t kni_link_tid;
+ int retval;
+ rte_thread_t kni_link_tid;
int pid;
/* Associate signal_hanlder function with USR signals */
@@ -1126,7 +1126,7 @@ main(int argc, char** argv)
return -1;
}
monitor_links = 0;
- pthread_join(kni_link_tid, &retval);
+ rte_thread_join(kni_link_tid, &retval);
/* Release resources */
RTE_ETH_FOREACH_DEV(port) {
@@ -1613,7 +1613,7 @@ main(int argc, char *argv[])
unsigned nb_ports, valid_num_ports;
int ret, i;
uint16_t portid;
- static pthread_t tid;
+ static rte_thread_t tid;
uint64_t flags = 0;
signal(SIGINT, sigint_handler);
@@ -533,7 +533,7 @@ ctrlr_worker(void *arg)
{
struct vhost_blk_ctrlr *ctrlr = (struct vhost_blk_ctrlr *)arg;
cpu_set_t cpuset;
- pthread_t thread;
+ rte_thread_t thread;
int i;
fprintf(stdout, "Ctrlr Worker Thread start\n");
@@ -545,10 +545,10 @@ ctrlr_worker(void *arg)
exit(0);
}
- thread = pthread_self();
+ thread = rte_thread_self();
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
- pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
+ rte_thread_set_affinity_by_id(thread, sizeof(cpu_set_t), &cpuset);
for (i = 0; i < NUM_OF_BLK_QUEUES; i++)
submit_inflight_vq(&ctrlr->queues[i]);
@@ -604,7 +604,7 @@ new_device(int vid)
struct vhost_blk_queue *vq;
char path[PATH_MAX];
uint64_t features;
- pthread_t tid;
+ rte_thread_t tid;
int i, ret;
bool packed_ring;
@@ -672,7 +672,7 @@ new_device(int vid)
/* start polling vring */
worker_thread_status = WORKER_STATE_START;
fprintf(stdout, "New Device %s, Device ID %d\n", path, vid);
- if (pthread_create(&tid, NULL, &ctrlr_worker, ctrlr) < 0) {
+ if (rte_thread_create(&tid, NULL, &ctrlr_worker, ctrlr) < 0) {
fprintf(stderr, "Worker Thread Started Failed\n");
return -1;
}
@@ -1843,8 +1843,8 @@ eal_auto_detect_cores(struct rte_config *cfg)
unsigned int removed = 0;
rte_cpuset_t affinity_set;
- if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
- &affinity_set))
+ if (rte_thread_get_affinity_by_id(rte_thread_self(),
+ sizeof(rte_cpuset_t), &affinity_set))
CPU_ZERO(&affinity_set);
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1872,8 +1872,8 @@ compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
}
RTE_CPU_NOT(cpuset, cpuset);
- if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
- &default_set))
+ if (rte_thread_get_affinity_by_id(rte_thread_self(),
+ sizeof(rte_cpuset_t), &default_set))
CPU_ZERO(&default_set);
RTE_CPU_AND(cpuset, cpuset, &default_set);
@@ -37,7 +37,7 @@
static int mp_fd = -1;
static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
-static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
+static rte_thread_mutex_t mp_mutex_action = RTE_THREAD_MUTEX_INITIALIZER;
static char peer_name[PATH_MAX];
struct action_entry {
@@ -96,10 +96,10 @@ TAILQ_HEAD(pending_request_list, pending_request);
static struct {
struct pending_request_list requests;
- pthread_mutex_t lock;
+ rte_thread_mutex_t lock;
} pending_requests = {
.requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
- .lock = PTHREAD_MUTEX_INITIALIZER,
+ .lock = RTE_THREAD_MUTEX_INITIALIZER,
/**< used in async requests only */
};
@@ -222,15 +222,15 @@ rte_mp_action_register(const char *name, rte_mp_t action)
strlcpy(entry->action_name, name, sizeof(entry->action_name));
entry->action = action;
- pthread_mutex_lock(&mp_mutex_action);
+ rte_thread_mutex_lock(&mp_mutex_action);
if (find_action_entry_by_name(name) != NULL) {
- pthread_mutex_unlock(&mp_mutex_action);
+ rte_thread_mutex_unlock(&mp_mutex_action);
rte_errno = EEXIST;
free(entry);
return -1;
}
TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
- pthread_mutex_unlock(&mp_mutex_action);
+ rte_thread_mutex_unlock(&mp_mutex_action);
return 0;
}
@@ -249,14 +249,14 @@ rte_mp_action_unregister(const char *name)
return;
}
- pthread_mutex_lock(&mp_mutex_action);
+ rte_thread_mutex_lock(&mp_mutex_action);
entry = find_action_entry_by_name(name);
if (entry == NULL) {
- pthread_mutex_unlock(&mp_mutex_action);
+ rte_thread_mutex_unlock(&mp_mutex_action);
return;
}
TAILQ_REMOVE(&action_entry_list, entry, next);
- pthread_mutex_unlock(&mp_mutex_action);
+ rte_thread_mutex_unlock(&mp_mutex_action);
free(entry);
}
@@ -328,7 +328,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
if (m->type == MP_REP || m->type == MP_IGN) {
struct pending_request *req = NULL;
- pthread_mutex_lock(&pending_requests.lock);
+ rte_thread_mutex_lock(&pending_requests.lock);
pending_req = find_pending_request(s->sun_path, msg->name);
if (pending_req) {
memcpy(pending_req->reply, msg, sizeof(*msg));
@@ -343,18 +343,18 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
pending_req);
} else
RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
if (req != NULL)
trigger_async_action(req);
return;
}
- pthread_mutex_lock(&mp_mutex_action);
+ rte_thread_mutex_lock(&mp_mutex_action);
entry = find_action_entry_by_name(msg->name);
if (entry != NULL)
action = entry->action;
- pthread_mutex_unlock(&mp_mutex_action);
+ rte_thread_mutex_unlock(&mp_mutex_action);
if (!action) {
if (m->type == MP_REQ && !internal_conf->init_complete) {
@@ -527,9 +527,9 @@ async_reply_handle(void *arg)
{
struct pending_request *req;
- pthread_mutex_lock(&pending_requests.lock);
+ rte_thread_mutex_lock(&pending_requests.lock);
req = async_reply_handle_thread_unsafe(arg);
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
if (req != NULL)
trigger_async_action(req);
@@ -587,7 +587,7 @@ rte_mp_channel_init(void)
{
char path[PATH_MAX];
int dir_fd;
- pthread_t mp_handle_tid;
+ rte_thread_t mp_handle_tid;
const struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -999,9 +999,9 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
/* for secondary process, send request to the primary process only */
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- pthread_mutex_lock(&pending_requests.lock);
+ rte_thread_mutex_lock(&pending_requests.lock);
ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
goto end;
}
@@ -1022,7 +1022,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
goto close_end;
}
- pthread_mutex_lock(&pending_requests.lock);
+ rte_thread_mutex_lock(&pending_requests.lock);
while ((ent = readdir(mp_dir))) {
char path[PATH_MAX];
@@ -1041,7 +1041,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
ret = 0;
unlock_end:
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
/* unlock the directory */
flock(dir_fd, LOCK_UN);
@@ -1119,7 +1119,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
* of requests to the queue at once, and some of the replies may arrive
* before we add all of the requests to the queue.
*/
- pthread_mutex_lock(&pending_requests.lock);
+ rte_thread_mutex_lock(&pending_requests.lock);
/* we have to ensure that callback gets triggered even if we don't send
* anything, therefore earlier we have allocated a dummy request. fill
@@ -1142,7 +1142,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
dummy_used = true;
}
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
/* if we couldn't send anything, clean up */
if (ret != 0)
@@ -1186,7 +1186,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
}
/* finally, unlock the queue */
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
/* unlock the directory */
flock(dir_fd, LOCK_UN);
@@ -1202,7 +1202,7 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
closedir_fail:
closedir(mp_dir);
unlock_fail:
- pthread_mutex_unlock(&pending_requests.lock);
+ rte_thread_mutex_unlock(&pending_requests.lock);
fail:
free(dummy);
free(param);
@@ -6,7 +6,6 @@
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
-#include <pthread.h>
#include <signal.h>
#include <sched.h>
#include <assert.h>
@@ -86,9 +85,9 @@ thread_update_affinity(rte_cpuset_t *cpusetp)
int
rte_thread_set_affinity(rte_cpuset_t *cpusetp)
{
- if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
- cpusetp) != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ if (rte_thread_set_affinity_by_id(rte_thread_self(),
+ sizeof(rte_cpuset_t), cpusetp) != 0) {
+ RTE_LOG(ERR, EAL, "rte_thread_set_affinity failed\n");
return -1;
}
@@ -169,7 +168,7 @@ __rte_thread_uninit(void)
struct rte_thread_ctrl_params {
void *(*start_routine)(void *);
void *arg;
- pthread_barrier_t configured;
+ rte_thread_barrier_t configured;
};
static void *ctrl_thread_init(void *arg)
@@ -184,9 +183,9 @@ static void *ctrl_thread_init(void *arg)
__rte_thread_init(rte_lcore_id(), cpuset);
- ret = pthread_barrier_wait(¶ms->configured);
- if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
- pthread_barrier_destroy(¶ms->configured);
+ ret = rte_thread_barrier_wait(¶ms->configured);
+ if (ret == RTE_THREAD_BARRIER_SERIAL_THREAD) {
+ rte_thread_barrier_destroy(¶ms->configured);
free(params);
}
@@ -194,8 +193,8 @@ static void *ctrl_thread_init(void *arg)
}
int
-rte_ctrl_thread_create(pthread_t *thread, const char *name,
- const pthread_attr_t *attr,
+rte_ctrl_thread_create(rte_thread_t *thread, const char *name,
+ const rte_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
struct internal_config *internal_conf =
@@ -211,9 +210,9 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
params->start_routine = start_routine;
params->arg = arg;
- pthread_barrier_init(¶ms->configured, NULL, 2);
+ rte_thread_barrier_init(¶ms->configured, 2);
- ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params);
+ ret = rte_thread_create(thread, attr, ctrl_thread_init, params);
if (ret != 0) {
free(params);
return -ret;
@@ -226,26 +225,26 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
"Cannot set name for ctrl thread\n");
}
- ret = pthread_setaffinity_np(*thread, sizeof(*cpuset), cpuset);
+ ret = rte_thread_set_affinity_by_id(*thread, sizeof(*cpuset), cpuset);
if (ret)
goto fail;
- ret = pthread_barrier_wait(¶ms->configured);
- if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
- pthread_barrier_destroy(¶ms->configured);
+ ret = rte_thread_barrier_wait(¶ms->configured);
+ if (ret == RTE_THREAD_BARRIER_SERIAL_THREAD) {
+ rte_thread_barrier_destroy(¶ms->configured);
free(params);
}
return 0;
fail:
- if (PTHREAD_BARRIER_SERIAL_THREAD ==
- pthread_barrier_wait(¶ms->configured)) {
- pthread_barrier_destroy(¶ms->configured);
+ if (RTE_THREAD_BARRIER_SERIAL_THREAD ==
+ rte_thread_barrier_wait(¶ms->configured)) {
+ rte_thread_barrier_destroy(¶ms->configured);
free(params);
}
- pthread_cancel(*thread);
- pthread_join(*thread, NULL);
+ rte_thread_cancel(*thread);
+ rte_thread_join(*thread, NULL);
return -ret;
}
@@ -266,7 +265,7 @@ rte_thread_register(void)
rte_errno = EINVAL;
return -1;
}
- if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset),
+ if (rte_thread_get_affinity_by_id(rte_thread_self(), sizeof(cpuset),
&cpuset) != 0)
CPU_ZERO(&cpuset);
lcore_id = eal_lcore_non_eal_allocate();
@@ -359,7 +359,7 @@ __rte_trace_mem_per_thread_alloc(void)
/* Store the thread name */
char *name = header->stream_header.thread_name;
memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
- rte_thread_getname(pthread_self(), name,
+ rte_thread_getname(rte_thread_self(), name,
__RTE_TRACE_EMIT_STRING_LEN_MAX);
trace->lcore_meta[count].mem = header;
@@ -19,7 +19,7 @@
* Structure storing internal configuration (per-lcore)
*/
struct lcore_config {
- pthread_t thread_id; /**< pthread identifier */
+ rte_thread_t thread_id; /**< pthread identifier */
int pipe_main2worker[2]; /**< communication pipe with main */
int pipe_worker2main[2]; /**< communication pipe with main */
@@ -75,10 +75,10 @@ struct mp_request {
TAILQ_HEAD(mp_request_list, mp_request);
static struct {
struct mp_request_list list;
- pthread_mutex_t lock;
+ rte_thread_mutex_t lock;
} mp_request_list = {
.list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),
- .lock = PTHREAD_MUTEX_INITIALIZER
+ .lock = RTE_THREAD_MUTEX_INITIALIZER
};
/**
@@ -303,7 +303,7 @@ handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
int ret;
/* lock access to request */
- pthread_mutex_lock(&mp_request_list.lock);
+ rte_thread_mutex_lock(&mp_request_list.lock);
/* make sure it's not a dupe */
entry = find_request_by_id(m->id);
@@ -389,10 +389,10 @@ handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
}
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return 0;
fail:
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
free(entry);
return -1;
}
@@ -411,7 +411,7 @@ handle_sync_response(const struct rte_mp_msg *request,
int i;
/* lock the request */
- pthread_mutex_lock(&mp_request_list.lock);
+ rte_thread_mutex_lock(&mp_request_list.lock);
entry = find_request_by_id(mpreq->id);
if (entry == NULL) {
@@ -541,10 +541,10 @@ handle_sync_response(const struct rte_mp_msg *request,
goto fail;
}
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return 0;
fail:
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return -1;
}
@@ -559,7 +559,7 @@ handle_rollback_response(const struct rte_mp_msg *request,
struct mp_request *entry;
/* lock the request */
- pthread_mutex_lock(&mp_request_list.lock);
+ rte_thread_mutex_lock(&mp_request_list.lock);
memset(&msg, 0, sizeof(msg));
@@ -590,10 +590,10 @@ handle_rollback_response(const struct rte_mp_msg *request,
free(entry->alloc_state.ms);
free(entry);
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return 0;
fail:
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return -1;
}
@@ -605,7 +605,7 @@ handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused)
(const struct malloc_mp_req *)msg->param;
struct mp_request *entry;
- pthread_mutex_lock(&mp_request_list.lock);
+ rte_thread_mutex_lock(&mp_request_list.lock);
entry = find_request_by_id(m->id);
if (entry != NULL) {
@@ -618,7 +618,7 @@ handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused)
pthread_cond_signal(&entry->cond);
}
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return 0;
}
@@ -708,7 +708,7 @@ request_to_primary(struct malloc_mp_req *user_req)
memset(&msg, 0, sizeof(msg));
memset(&ts, 0, sizeof(ts));
- pthread_mutex_lock(&mp_request_list.lock);
+ rte_thread_mutex_lock(&mp_request_list.lock);
entry = malloc(sizeof(*entry));
if (entry == NULL) {
@@ -769,10 +769,10 @@ request_to_primary(struct malloc_mp_req *user_req)
TAILQ_REMOVE(&mp_request_list.list, entry, next);
free(entry);
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
return ret;
fail:
- pthread_mutex_unlock(&mp_request_list.lock);
+ rte_thread_mutex_unlock(&mp_request_list.lock);
free(entry);
return -1;
}
@@ -80,6 +80,7 @@ sources += files(
'rte_random.c',
'rte_reciprocal.c',
'rte_service.c',
+ 'rte_thread.c',
'rte_version.c',
)
@@ -663,7 +663,7 @@ int
rte_eal_init(int argc, char **argv)
{
int i, fctret, ret;
- pthread_t thread_id;
+ rte_thread_t thread_id;
static uint32_t run_once;
uint32_t has_run = 0;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
@@ -686,7 +686,7 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- thread_id = pthread_self();
+ thread_id = rte_thread_self();
eal_reset_internal_config(internal_conf);
@@ -850,7 +850,15 @@ rte_eal_init(int argc, char **argv)
eal_check_mem_on_local_socket();
- if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+ ret = rte_thread_set_priority(rte_thread_self(),
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot set thread priority");
+ rte_errno = ret;
+ return -1;
+ }
+ if (rte_thread_set_affinity_by_id(rte_thread_self(),
+ sizeof(rte_cpuset_t),
&lcore_config[config->main_lcore].cpuset) != 0) {
rte_eal_init_alert("Cannot set affinity");
rte_errno = EINVAL;
@@ -865,6 +873,21 @@ rte_eal_init(int argc, char **argv)
config->main_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
+ rte_thread_attr_t thread_attr;
+ ret = rte_thread_attr_init(&thread_attr);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot initialize thread attributes");
+ rte_errno = ret;
+ return -1;
+ }
+ ret = rte_thread_attr_set_priority(&thread_attr,
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot set thread priority attribute");
+ rte_errno = ret;
+ return -1;
+ }
+
RTE_LCORE_FOREACH_WORKER(i) {
/*
@@ -878,8 +901,10 @@ rte_eal_init(int argc, char **argv)
lcore_config[i].state = WAIT;
+ rte_thread_attr_set_affinity(&thread_attr,
+ &lcore_config[i].cpuset);
/* create a thread for each lcore */
- ret = pthread_create(&lcore_config[i].thread_id, NULL,
+ ret = rte_thread_create(&lcore_config[i].thread_id, &thread_attr,
eal_thread_loop, NULL);
if (ret != 0)
rte_panic("Cannot create thread\n");
@@ -889,10 +914,6 @@ rte_eal_init(int argc, char **argv)
"lcore-worker-%d", i);
rte_thread_setname(lcore_config[i].thread_id, thread_name);
- ret = pthread_setaffinity_np(lcore_config[i].thread_id,
- sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
- if (ret != 0)
- rte_panic("Cannot set affinity\n");
}
/*
@@ -37,7 +37,7 @@ struct alarm_entry {
rte_eal_alarm_callback cb_fn;
void *cb_arg;
volatile uint8_t executing;
- volatile pthread_t executing_id;
+ volatile rte_thread_t executing_id;
};
static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
@@ -156,7 +156,7 @@ eal_alarm_callback(void *arg __rte_unused)
while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) {
ap->executing = 1;
- ap->executing_id = pthread_self();
+ ap->executing_id = rte_thread_self();
rte_spinlock_unlock(&alarm_list_lk);
ap->cb_fn(ap->cb_arg);
@@ -263,8 +263,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
* finish. Otherwise we are trying to cancel
* ourselves - mark it by EINPROGRESS.
*/
- if (pthread_equal(ap->executing_id,
- pthread_self()) == 0)
+ if (rte_thread_equal(ap->executing_id,
+ rte_thread_self()) == 0)
executing++;
else
err = EINPROGRESS;
@@ -285,8 +285,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
free(ap);
count++;
ap = ap_prev;
- } else if (pthread_equal(ap->executing_id,
- pthread_self()) == 0) {
+ } else if (rte_thread_equal(ap->executing_id,
+ rte_thread_self()) == 0) {
executing++;
} else {
err = EINPROGRESS;
@@ -52,7 +52,7 @@ static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
static struct rte_intr_source_list intr_sources;
/* interrupt handling thread */
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
static volatile int kq = -1;
@@ -725,5 +725,5 @@ rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
int rte_thread_is_intr(void)
{
- return pthread_equal(intr_thread, pthread_self());
+ return rte_thread_equal(intr_thread, rte_thread_self());
}
@@ -73,11 +73,11 @@ eal_thread_loop(__rte_unused void *arg)
char c;
int n, ret;
unsigned lcore_id;
- pthread_t thread_id;
+ rte_thread_t thread_id;
int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- thread_id = pthread_self();
+ thread_id = rte_thread_self();
/* retrieve our lcore_id from the configuration structure */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -143,14 +143,14 @@ int rte_sys_gettid(void)
return (int)lwpid;
}
-int rte_thread_setname(pthread_t id, const char *name)
+int rte_thread_setname(rte_thread_t id, const char *name)
{
/* this BSD function returns no error */
pthread_set_name_np(id, name);
return 0;
}
-int rte_thread_getname(pthread_t id, char *name, size_t len)
+int rte_thread_getname(rte_thread_t id, char *name, size_t len)
{
RTE_SET_USED(id);
RTE_SET_USED(name);
@@ -40,6 +40,7 @@ headers += files(
'rte_string_fns.h',
'rte_tailq.h',
'rte_thread.h',
+ 'rte_thread_types.h',
'rte_time.h',
'rte_trace.h',
'rte_trace_point.h',
@@ -370,7 +370,7 @@ rte_lcore_dump(FILE *f);
* @return
* On success, return 0; otherwise return a negative value.
*/
-int rte_thread_setname(pthread_t id, const char *name);
+int rte_thread_setname(rte_thread_t id, const char *name);
/**
* Get thread name.
@@ -387,7 +387,7 @@ int rte_thread_setname(pthread_t id, const char *name);
* On success, return 0; otherwise return a negative value.
*/
__rte_experimental
-int rte_thread_getname(pthread_t id, char *name, size_t len);
+int rte_thread_getname(rte_thread_t id, char *name, size_t len);
/**
* Register current non-EAL thread as a lcore.
@@ -436,8 +436,8 @@ rte_thread_unregister(void);
* corresponding to the error number.
*/
int
-rte_ctrl_thread_create(pthread_t *thread, const char *name,
- const pthread_attr_t *attr,
+rte_ctrl_thread_create(rte_thread_t *thread, const char *name,
+ const rte_thread_attr_t *attr,
void *(*start_routine)(void *), void *arg);
#ifdef __cplusplus
@@ -22,8 +22,6 @@
extern "C" {
#endif
-#include <pthread.h>
-
/**
* Macro to define a per lcore variable "var" of type "type", don't
* use keywords like "static" or "volatile" in type, just prefix the
@@ -958,7 +958,7 @@ int
rte_eal_init(int argc, char **argv)
{
int i, fctret, ret;
- pthread_t thread_id;
+ rte_thread_t thread_id;
static uint32_t run_once;
uint32_t has_run = 0;
const char *p;
@@ -986,7 +986,7 @@ rte_eal_init(int argc, char **argv)
p = strrchr(argv[0], '/');
strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
- thread_id = pthread_self();
+ thread_id = rte_thread_self();
eal_reset_internal_config(internal_conf);
@@ -1214,7 +1214,15 @@ rte_eal_init(int argc, char **argv)
eal_check_mem_on_local_socket();
- if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+ ret = rte_thread_set_priority(rte_thread_self(),
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot set thread priority");
+ rte_errno = ret;
+ return -1;
+ }
+ if (rte_thread_set_affinity_by_id(rte_thread_self(),
+ sizeof(rte_cpuset_t),
&lcore_config[config->main_lcore].cpuset) != 0) {
rte_eal_init_alert("Cannot set affinity");
rte_errno = EINVAL;
@@ -1228,6 +1236,23 @@ rte_eal_init(int argc, char **argv)
config->main_lcore, (uintptr_t)thread_id, cpuset,
ret == 0 ? "" : "...");
+ rte_thread_attr_t thread_attr;
+ ret = rte_thread_attr_init(&thread_attr);
+ if (ret != 0) {
+ RTE_LOG(DEBUG, EAL, "Cannot initialize thread attributes,"
+ "ret = %d\n", ret);
+ rte_errno = ret;
+ return -1;
+ }
+ ret = rte_thread_attr_set_priority(&thread_attr,
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ RTE_LOG(DEBUG, EAL, "Cannot set thread priority attribute,"
+ "ret = %d\n", ret);
+ rte_errno = ret;
+ return -1;
+ }
+
RTE_LCORE_FOREACH_WORKER(i) {
/*
@@ -1241,9 +1266,11 @@ rte_eal_init(int argc, char **argv)
lcore_config[i].state = WAIT;
+ rte_thread_attr_set_affinity(&thread_attr,
+ &lcore_config[i].cpuset);
/* create a thread for each lcore */
- ret = pthread_create(&lcore_config[i].thread_id, NULL,
- eal_thread_loop, NULL);
+ ret = rte_thread_create(&lcore_config[i].thread_id,
+ &thread_attr, eal_thread_loop, NULL);
if (ret != 0)
rte_panic("Cannot create thread\n");
@@ -1255,11 +1282,6 @@ rte_eal_init(int argc, char **argv)
if (ret != 0)
RTE_LOG(DEBUG, EAL,
"Cannot set name for lcore thread\n");
-
- ret = pthread_setaffinity_np(lcore_config[i].thread_id,
- sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
- if (ret != 0)
- rte_panic("Cannot set affinity\n");
}
/*
@@ -48,7 +48,7 @@ struct alarm_entry {
rte_eal_alarm_callback cb_fn;
void *cb_arg;
volatile uint8_t executing;
- volatile pthread_t executing_id;
+ volatile rte_thread_t executing_id;
};
static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
@@ -86,7 +86,7 @@ eal_alarm_callback(void *arg __rte_unused)
(ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec &&
(ap->time.tv_usec * NS_PER_US) <= now.tv_nsec))) {
ap->executing = 1;
- ap->executing_id = pthread_self();
+ ap->executing_id = rte_thread_self();
rte_spinlock_unlock(&alarm_list_lk);
ap->cb_fn(ap->cb_arg);
@@ -207,7 +207,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
/* If calling from other context, mark that alarm is executing
* so loop can spin till it finish. Otherwise we are trying to
* cancel our self - mark it by EINPROGRESS */
- if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+ if (rte_thread_equal(ap->executing_id,
+ rte_thread_self()) == 0)
executing++;
else
err = EINPROGRESS;
@@ -228,7 +229,8 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
free(ap);
count++;
ap = ap_prev;
- } else if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+ } else if (rte_thread_equal(ap->executing_id,
+ rte_thread_self()) == 0)
executing++;
else
err = EINPROGRESS;
@@ -97,7 +97,7 @@ static union intr_pipefds intr_pipe;
static struct rte_intr_source_list intr_sources;
/* interrupt handling thread */
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
/* VFIO interrupts */
#ifdef VFIO_PRESENT
@@ -1558,5 +1558,5 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
int rte_thread_is_intr(void)
{
- return pthread_equal(intr_thread, pthread_self());
+ return rte_thread_equal(intr_thread, rte_thread_self());
}
@@ -73,11 +73,11 @@ eal_thread_loop(__rte_unused void *arg)
char c;
int n, ret;
unsigned lcore_id;
- pthread_t thread_id;
+ rte_thread_t thread_id;
int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- thread_id = pthread_self();
+ thread_id = rte_thread_self();
/* retrieve our lcore_id from the configuration structure */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -148,7 +148,7 @@ int rte_sys_gettid(void)
return (int)syscall(SYS_gettid);
}
-int rte_thread_setname(pthread_t id, const char *name)
+int rte_thread_setname(rte_thread_t id, const char *name)
{
int ret = ENOSYS;
#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
@@ -164,7 +164,7 @@ int rte_thread_setname(pthread_t id, const char *name)
return -ret;
}
-int rte_thread_getname(pthread_t id, char *name, size_t len)
+int rte_thread_getname(rte_thread_t id, char *name, size_t len)
{
int ret = ENOSYS;
#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
@@ -80,7 +80,7 @@ static uint64_t eal_hpet_resolution_hz = 0;
/* Incremented 4 times during one 32bits hpet full count */
static uint32_t eal_hpet_msb;
-static pthread_t msb_inc_thread_id;
+static rte_thread_t msb_inc_thread_id;
/*
* This function runs on a specific thread to update a global variable
@@ -329,6 +329,25 @@ EXPORTS
rte_thread_tls_key_delete
rte_thread_tls_value_get
rte_thread_tls_value_set
+ rte_thread_mutex_lock
+ rte_thread_mutex_unlock
+ rte_thread_mutex_init
+ rte_thread_mutex_destroy
+ rte_thread_create
+ rte_thread_set_affinity_by_id
+ rte_thread_get_affinity_by_id
+ rte_thread_set_priority
+ rte_thread_attr_init
+ rte_thread_attr_set_affinity
+ rte_thread_attr_get_affinity
+ rte_thread_attr_set_priority
+ rte_thread_join
+ rte_thread_self
+ rte_thread_equal
+ rte_thread_barrier_init
+ rte_thread_barrier_wait
+ rte_thread_barrier_destroy
+ rte_thread_cancel
rte_mem_lock
rte_mem_map
@@ -5,5 +5,4 @@ sources += files(
'eal_file.c',
'eal_unix_memory.c',
'eal_unix_timer.c',
- 'rte_thread.c',
)
deleted file mode 100644
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 Mellanox Technologies, Ltd
- */
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_errno.h>
-#include <rte_log.h>
-#include <rte_thread.h>
-
-struct eal_tls_key {
- pthread_key_t thread_index;
-};
-
-int
-rte_thread_tls_key_create(rte_tls_key *key, void (*destructor)(void *))
-{
- int err;
-
- *key = malloc(sizeof(**key));
- if ((*key) == NULL) {
- RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n");
- return -1;
- }
- err = pthread_key_create(&((*key)->thread_index), destructor);
- if (err) {
- RTE_LOG(DEBUG, EAL, "pthread_key_create failed: %s\n",
- strerror(err));
- free(*key);
- return -1;
- }
- return 0;
-}
-
-int
-rte_thread_tls_key_delete(rte_tls_key key)
-{
- int err;
-
- if (!key) {
- RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
- return -1;
- }
- err = pthread_key_delete(key->thread_index);
- if (err) {
- RTE_LOG(DEBUG, EAL, "pthread_key_delete failed: %s\n",
- strerror(err));
- free(key);
- return -1;
- }
- free(key);
- return 0;
-}
-
-int
-rte_thread_tls_value_set(rte_tls_key key, const void *value)
-{
- int err;
-
- if (!key) {
- RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
- return -1;
- }
- err = pthread_setspecific(key->thread_index, value);
- if (err) {
- RTE_LOG(DEBUG, EAL, "pthread_setspecific failed: %s\n",
- strerror(err));
- return -1;
- }
- return 0;
-}
-
-void *
-rte_thread_tls_value_get(rte_tls_key key)
-{
- if (!key) {
- RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
- rte_errno = EINVAL;
- return NULL;
- }
- return pthread_getspecific(key->thread_index);
-}
@@ -419,6 +419,27 @@ EXPERIMENTAL {
rte_thread_tls_key_delete;
rte_thread_tls_value_get;
rte_thread_tls_value_set;
+
+ rte_thread_mutex_lock;
+ rte_thread_mutex_unlock;
+ rte_thread_mutex_init;
+ rte_thread_mutex_destroy;
+ rte_thread_create;
+ rte_thread_set_affinity_by_id;
+ rte_thread_get_affinity_by_id;
+ rte_thread_set_priority;
+ rte_thread_attr_init;
+ rte_thread_attr_set_affinity;
+ rte_thread_attr_get_affinity;
+ rte_thread_join;
+ rte_thread_self;
+ rte_thread_equal;
+ rte_thread_barrier_init;
+ rte_thread_barrier_wait;
+ rte_thread_barrier_destroy;
+ rte_thread_cancel;
+ rte_thread_attr_set_priority;
+
};
INTERNAL {
@@ -368,6 +368,28 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ ret = rte_thread_set_priority(rte_thread_self(),
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot set thread priority");
+ rte_errno = ret;
+ return -1;
+ }
+ rte_thread_attr_t thread_attr;
+ ret = rte_thread_attr_init(&thread_attr);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot initialize thread attributes");
+ rte_errno = ret;
+ return -1;
+ }
+ ret = rte_thread_attr_set_priority(&thread_attr,
+ internal_conf->thread_priority);
+ if (ret != 0) {
+ rte_eal_init_alert("Cannot set thread priority attribute");
+ rte_errno = ret;
+ return -1;
+ }
+
RTE_LCORE_FOREACH_WORKER(i) {
/*
@@ -384,7 +406,9 @@ rte_eal_init(int argc, char **argv)
lcore_config[i].state = WAIT;
/* create a thread for each lcore */
- if (eal_thread_create(&lcore_config[i].thread_id) != 0)
+ ret = rte_thread_create(&lcore_config[i].thread_id,
+ &thread_attr, eal_thread_loop, NULL);
+ if (ret != 0)
rte_panic("Cannot create thread\n");
}
@@ -7,7 +7,7 @@
#include "eal_private.h"
#include "eal_windows.h"
-static pthread_t intr_thread;
+static rte_thread_t intr_thread;
static HANDLE intr_iocp;
@@ -76,7 +76,7 @@ rte_eal_intr_init(void)
int
rte_thread_is_intr(void)
{
- return pthread_equal(intr_thread, pthread_self());
+ return rte_thread_equal(intr_thread, rte_thread_self());
}
int
@@ -94,7 +94,7 @@ eal_intr_thread_schedule(void (*func)(void *arg), void *arg)
handle = OpenThread(THREAD_ALL_ACCESS, FALSE, intr_thread);
if (handle == NULL) {
- RTE_LOG_WIN32_ERR("OpenThread(%llu)", intr_thread);
+ RTE_LOG_WIN32_ERR("OpenThread(%lu)", intr_thread);
return -ENOENT;
}
@@ -60,11 +60,11 @@ eal_thread_loop(void *arg __rte_unused)
char c;
int n, ret;
unsigned int lcore_id;
- pthread_t thread_id;
+ rte_thread_t thread_id;
int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
- thread_id = pthread_self();
+ thread_id = rte_thread_self();
/* retrieve our lcore_id from the configuration structure */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -122,24 +122,6 @@ eal_thread_loop(void *arg __rte_unused)
}
}
-/* function to create threads */
-int
-eal_thread_create(pthread_t *thread)
-{
- HANDLE th;
-
- th = CreateThread(NULL, 0,
- (LPTHREAD_START_ROUTINE)(ULONG_PTR)eal_thread_loop,
- NULL, 0, (LPDWORD)thread);
- if (!th)
- return -1;
-
- SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
- SetThreadPriority(th, THREAD_PRIORITY_NORMAL);
-
- return 0;
-}
-
/* get current thread ID */
int
rte_sys_gettid(void)
@@ -148,7 +130,7 @@ rte_sys_gettid(void)
}
int
-rte_thread_setname(__rte_unused pthread_t id, __rte_unused const char *name)
+rte_thread_setname(__rte_unused rte_thread_t id, __rte_unused const char *name)
{
/* TODO */
/* This is a stub, not the expected result */
@@ -35,16 +35,6 @@
*/
int eal_create_cpu_map(void);
-/**
- * Create a thread.
- *
- * @param thread
- * The location to store the thread id if successful.
- * @return
- * 0 for success, -1 if the thread is not created.
- */
-int eal_thread_create(pthread_t *thread);
-
/**
* Get system NUMA node number for a socket ID.
*
@@ -7,4 +7,5 @@ headers += files(
'rte_os.h',
'rte_virt2phys.h',
'rte_windows.h',
+ 'rte_windows_thread_types.h',
)
deleted file mode 100644
@@ -1,186 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _PTHREAD_H_
-#define _PTHREAD_H_
-
-#include <stdint.h>
-#include <sched.h>
-
-/**
- * This file is required to support the common code in eal_common_proc.c,
- * eal_common_thread.c and common\include\rte_per_lcore.h as Microsoft libc
- * does not contain pthread.h. This may be removed in future releases.
- */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include <rte_windows.h>
-
-#define PTHREAD_BARRIER_SERIAL_THREAD TRUE
-
-/* defining pthread_t type on Windows since there is no in Microsoft libc*/
-typedef uintptr_t pthread_t;
-
-/* defining pthread_attr_t type on Windows since there is no in Microsoft libc*/
-typedef void *pthread_attr_t;
-
-typedef void *pthread_mutexattr_t;
-
-typedef CRITICAL_SECTION pthread_mutex_t;
-
-typedef SYNCHRONIZATION_BARRIER pthread_barrier_t;
-
-#define pthread_barrier_init(barrier, attr, count) \
- InitializeSynchronizationBarrier(barrier, count, -1)
-#define pthread_barrier_wait(barrier) EnterSynchronizationBarrier(barrier, \
- SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY)
-#define pthread_barrier_destroy(barrier) \
- DeleteSynchronizationBarrier(barrier)
-#define pthread_cancel(thread) TerminateThread((HANDLE) thread, 0)
-
-/* pthread function overrides */
-#define pthread_self() \
- ((pthread_t)GetCurrentThreadId())
-
-
-static inline int
-pthread_equal(pthread_t t1, pthread_t t2)
-{
- return t1 == t2;
-}
-
-static inline int
-pthread_setaffinity_np(pthread_t threadid, size_t cpuset_size,
- rte_cpuset_t *cpuset)
-{
- DWORD_PTR ret = 0;
- HANDLE thread_handle;
-
- if (cpuset == NULL || cpuset_size == 0)
- return -1;
-
- thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid);
- if (thread_handle == NULL) {
- RTE_LOG_WIN32_ERR("OpenThread()");
- return -1;
- }
-
- ret = SetThreadAffinityMask(thread_handle, *cpuset->_bits);
- if (ret == 0) {
- RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
- goto close_handle;
- }
-
-close_handle:
- if (CloseHandle(thread_handle) == 0) {
- RTE_LOG_WIN32_ERR("CloseHandle()");
- return -1;
- }
- return (ret == 0) ? -1 : 0;
-}
-
-static inline int
-pthread_getaffinity_np(pthread_t threadid, size_t cpuset_size,
- rte_cpuset_t *cpuset)
-{
- /* Workaround for the lack of a GetThreadAffinityMask()
- *API in Windows
- */
- DWORD_PTR prev_affinity_mask;
- HANDLE thread_handle;
- DWORD_PTR ret = 0;
-
- if (cpuset == NULL || cpuset_size == 0)
- return -1;
-
- thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, threadid);
- if (thread_handle == NULL) {
- RTE_LOG_WIN32_ERR("OpenThread()");
- return -1;
- }
-
- /* obtain previous mask by setting dummy mask */
- prev_affinity_mask = SetThreadAffinityMask(thread_handle, 0x1);
- if (prev_affinity_mask == 0) {
- RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
- goto close_handle;
- }
-
- /* set it back! */
- ret = SetThreadAffinityMask(thread_handle, prev_affinity_mask);
- if (ret == 0) {
- RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
- goto close_handle;
- }
-
- memset(cpuset, 0, cpuset_size);
- *cpuset->_bits = prev_affinity_mask;
-
-close_handle:
- if (CloseHandle(thread_handle) == 0) {
- RTE_LOG_WIN32_ERR("SetThreadAffinityMask()");
- return -1;
- }
- return (ret == 0) ? -1 : 0;
-}
-
-static inline int
-pthread_create(void *threadid, const void *threadattr, void *threadfunc,
- void *args)
-{
- RTE_SET_USED(threadattr);
- HANDLE hThread;
- hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc,
- args, 0, (LPDWORD)threadid);
- if (hThread) {
- SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
- SetThreadPriority(hThread, THREAD_PRIORITY_NORMAL);
- }
- return ((hThread != NULL) ? 0 : E_FAIL);
-}
-
-static inline int
-pthread_join(__rte_unused pthread_t thread,
- __rte_unused void **value_ptr)
-{
- return 0;
-}
-
-static inline int
-pthread_mutex_init(pthread_mutex_t *mutex,
- __rte_unused pthread_mutexattr_t *attr)
-{
- InitializeCriticalSection(mutex);
- return 0;
-}
-
-static inline int
-pthread_mutex_lock(pthread_mutex_t *mutex)
-{
- EnterCriticalSection(mutex);
- return 0;
-}
-
-static inline int
-pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
- LeaveCriticalSection(mutex);
- return 0;
-}
-
-static inline int
-pthread_mutex_destroy(pthread_mutex_t *mutex)
-{
- DeleteCriticalSection(mutex);
- return 0;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _PTHREAD_H_ */
@@ -43,7 +43,7 @@ typedef struct _rte_cpuset_s {
(1LL << _WHICH_BIT(b))) != 0LL)
static inline int
-count_cpu(rte_cpuset_t *s)
+count_cpu(const rte_cpuset_t *s)
{
unsigned int _i;
int count = 0;
@@ -19,7 +19,12 @@ sources += files(
'eal_timer.c',
'fnmatch.c',
'getopt.c',
- 'rte_thread.c',
)
+if get_option('use_external_thread_lib')
+ sources += 'librte_eal/common/rte_thread.c'
+else
+ sources += 'librte_eal/windows/rte_thread.c'
+endif
+
dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
@@ -506,7 +506,7 @@ rte_eth_dev_allocate(const char *name)
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
eth_dev->data->port_id = port_id;
eth_dev->data->mtu = RTE_ETHER_MTU;
- pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
+ rte_thread_mutex_init(ð_dev->data->flow_ops_mutex);
unlock:
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
@@ -580,7 +580,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data->hash_mac_addrs);
rte_free(eth_dev->data->dev_private);
- pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
+ rte_thread_mutex_destroy(ð_dev->data->flow_ops_mutex);
memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
}
@@ -5,7 +5,6 @@
#ifndef _RTE_ETHDEV_CORE_H_
#define _RTE_ETHDEV_CORE_H_
-#include <pthread.h>
#include <sys/types.h>
/**
@@ -183,7 +182,7 @@ struct rte_eth_dev_data {
* Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags.
*/
- pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */
+ rte_thread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */
uint64_t reserved_64s[4]; /**< Reserved for future fields */
void *reserved_ptrs[4]; /**< Reserved for future fields */
} __rte_cache_aligned;
@@ -223,14 +223,14 @@ static inline void
fts_enter(struct rte_eth_dev *dev)
{
if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
- pthread_mutex_lock(&dev->data->flow_ops_mutex);
+ rte_thread_mutex_lock(&dev->data->flow_ops_mutex);
}
static inline void
fts_exit(struct rte_eth_dev *dev)
{
if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
- pthread_mutex_unlock(&dev->data->flow_ops_mutex);
+ rte_thread_mutex_unlock(&dev->data->flow_ops_mutex);
}
static int
@@ -121,7 +121,7 @@ struct rte_event_eth_rx_adapter {
/* Count of interrupt vectors in use */
uint32_t num_intr_vec;
/* Thread blocked on Rx interrupts */
- pthread_t rx_intr_thread;
+ rte_thread_t rx_intr_thread;
/* Configuration callback for rte_service configuration */
rte_event_eth_rx_adapter_conf_cb conf_cb;
/* Configuration callback argument */
@@ -1302,12 +1302,12 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
{
int err;
- err = pthread_cancel(rx_adapter->rx_intr_thread);
+ err = rte_thread_cancel(rx_adapter->rx_intr_thread);
if (err)
RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
err);
- err = pthread_join(rx_adapter->rx_intr_thread, NULL);
+ err = rte_thread_join(rx_adapter->rx_intr_thread, NULL);
if (err)
RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
@@ -61,9 +61,9 @@ fdset_shrink_nolock(struct fdset *pfdset)
static void
fdset_shrink(struct fdset *pfdset)
{
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
fdset_shrink_nolock(pfdset);
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
}
/**
@@ -126,21 +126,21 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
if (pfdset == NULL || fd == -1)
return -1;
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
if (i == -1) {
- pthread_mutex_lock(&pfdset->fd_pooling_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_pooling_mutex);
fdset_shrink_nolock(pfdset);
- pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex);
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
if (i == -1) {
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
return -2;
}
}
fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
return 0;
}
@@ -159,7 +159,7 @@ fdset_del(struct fdset *pfdset, int fd)
return NULL;
do {
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
i = fdset_find_fd(pfdset, fd);
if (i != -1 && pfdset->fd[i].busy == 0) {
@@ -170,7 +170,7 @@ fdset_del(struct fdset *pfdset, int fd)
pfdset->fd[i].dat = NULL;
i = -1;
}
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
} while (i != -1);
return dat;
@@ -192,10 +192,10 @@ fdset_try_del(struct fdset *pfdset, int fd)
if (pfdset == NULL || fd == -1)
return -2;
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
i = fdset_find_fd(pfdset, fd);
if (i != -1 && pfdset->fd[i].busy) {
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
return -1;
}
@@ -205,7 +205,7 @@ fdset_try_del(struct fdset *pfdset, int fd)
pfdset->fd[i].dat = NULL;
}
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
return 0;
}
@@ -244,19 +244,19 @@ fdset_event_dispatch(void *arg)
* might have been updated. It is ok if there is unwanted call
* for new listenfds.
*/
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
numfds = pfdset->num;
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
- pthread_mutex_lock(&pfdset->fd_pooling_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_pooling_mutex);
val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
- pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_pooling_mutex);
if (val < 0)
continue;
need_shrink = 0;
for (i = 0; i < numfds; i++) {
- pthread_mutex_lock(&pfdset->fd_mutex);
+ rte_thread_mutex_lock(&pfdset->fd_mutex);
pfdentry = &pfdset->fd[i];
fd = pfdentry->fd;
@@ -264,12 +264,12 @@ fdset_event_dispatch(void *arg)
if (fd < 0) {
need_shrink = 1;
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
continue;
}
if (!pfd->revents) {
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
continue;
}
@@ -280,7 +280,7 @@ fdset_event_dispatch(void *arg)
dat = pfdentry->dat;
pfdentry->busy = 1;
- pthread_mutex_unlock(&pfdset->fd_mutex);
+ rte_thread_mutex_unlock(&pfdset->fd_mutex);
if (rcb && pfd->revents & (POLLIN | FDPOLLERR))
rcb(fd, dat, &remove1);
@@ -5,7 +5,7 @@
#ifndef _FD_MAN_H_
#define _FD_MAN_H_
#include <stdint.h>
-#include <pthread.h>
+#include <rte_thread.h>
#include <poll.h>
#define MAX_FDS 1024
@@ -23,8 +23,8 @@ struct fdentry {
struct fdset {
struct pollfd rwfds[MAX_FDS];
struct fdentry fd[MAX_FDS];
- pthread_mutex_t fd_mutex;
- pthread_mutex_t fd_pooling_mutex;
+ rte_thread_mutex_t fd_mutex;
+ rte_thread_mutex_t fd_pooling_mutex;
int num; /* current fd number of this fdset */
union pipefds {
@@ -31,7 +31,7 @@ TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
*/
struct vhost_user_socket {
struct vhost_user_connection_list conn_list;
- pthread_mutex_t conn_mutex;
+ rte_thread_mutex_t conn_mutex;
char *path;
int socket_fd;
struct sockaddr_un un;
@@ -73,7 +73,7 @@ struct vhost_user {
struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
struct fdset fdset;
int vsocket_cnt;
- pthread_mutex_t mutex;
+ rte_thread_mutex_t mutex;
};
#define MAX_VIRTIO_BACKLOG 128
@@ -86,12 +86,12 @@ static int vhost_user_start_client(struct vhost_user_socket *vsocket);
static struct vhost_user vhost_user = {
.fdset = {
.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
- .fd_mutex = PTHREAD_MUTEX_INITIALIZER,
- .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .fd_mutex = RTE_THREAD_MUTEX_INITIALIZER,
+ .fd_pooling_mutex = RTE_THREAD_MUTEX_INITIALIZER,
.num = 0
},
.vsocket_cnt = 0,
- .mutex = PTHREAD_MUTEX_INITIALIZER,
+ .mutex = RTE_THREAD_MUTEX_INITIALIZER,
};
/*
@@ -269,9 +269,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
goto err_cleanup;
}
- pthread_mutex_lock(&vsocket->conn_mutex);
+ rte_thread_mutex_lock(&vsocket->conn_mutex);
TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ rte_thread_mutex_unlock(&vsocket->conn_mutex);
fdset_pipe_notify(&vhost_user.fdset);
return;
@@ -324,9 +324,9 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
vhost_user_start_client(vsocket);
}
- pthread_mutex_lock(&vsocket->conn_mutex);
+ rte_thread_mutex_lock(&vsocket->conn_mutex);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ rte_thread_mutex_unlock(&vsocket->conn_mutex);
free(conn);
}
@@ -418,11 +418,11 @@ struct vhost_user_reconnect {
TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
struct vhost_user_reconnect_list {
struct vhost_user_reconnect_tailq_list head;
- pthread_mutex_t mutex;
+ rte_thread_mutex_t mutex;
};
static struct vhost_user_reconnect_list reconn_list;
-static pthread_t reconn_tid;
+static rte_thread_t reconn_tid;
static int
vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
@@ -454,7 +454,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
- pthread_mutex_lock(&reconn_list.mutex);
+ rte_thread_mutex_lock(&reconn_list.mutex);
/*
* An equal implementation of TAILQ_FOREACH_SAFE,
@@ -485,7 +485,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
free(reconn);
}
- pthread_mutex_unlock(&reconn_list.mutex);
+ rte_thread_mutex_unlock(&reconn_list.mutex);
sleep(1);
}
@@ -497,7 +497,7 @@ vhost_user_reconnect_init(void)
{
int ret;
- ret = pthread_mutex_init(&reconn_list.mutex, NULL);
+ ret = rte_thread_mutex_init(&reconn_list.mutex);
if (ret < 0) {
VHOST_LOG_CONFIG(ERR, "failed to initialize mutex");
return ret;
@@ -508,7 +508,7 @@ vhost_user_reconnect_init(void)
vhost_user_client_reconnect, NULL);
if (ret != 0) {
VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread");
- if (pthread_mutex_destroy(&reconn_list.mutex)) {
+ if (rte_thread_mutex_destroy(&reconn_list.mutex)) {
VHOST_LOG_CONFIG(ERR,
"failed to destroy reconnect mutex");
}
@@ -552,9 +552,9 @@ vhost_user_start_client(struct vhost_user_socket *vsocket)
reconn->un = vsocket->un;
reconn->fd = fd;
reconn->vsocket = vsocket;
- pthread_mutex_lock(&reconn_list.mutex);
+ rte_thread_mutex_lock(&reconn_list.mutex);
TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
- pthread_mutex_unlock(&reconn_list.mutex);
+ rte_thread_mutex_unlock(&reconn_list.mutex);
return 0;
}
@@ -586,11 +586,11 @@ rte_vhost_driver_attach_vdpa_device(const char *path,
if (dev == NULL || path == NULL)
return -1;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->vdpa_dev = dev;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -600,11 +600,11 @@ rte_vhost_driver_detach_vdpa_device(const char *path)
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->vdpa_dev = NULL;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -615,11 +615,11 @@ rte_vhost_driver_get_vdpa_device(const char *path)
struct vhost_user_socket *vsocket;
struct rte_vdpa_device *dev = NULL;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
dev = vsocket->vdpa_dev;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return dev;
}
@@ -629,7 +629,7 @@ rte_vhost_driver_disable_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
/* Note that use_builtin_virtio_net is not affected by this function
@@ -639,7 +639,7 @@ rte_vhost_driver_disable_features(const char *path, uint64_t features)
if (vsocket)
vsocket->features &= ~features;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -649,7 +649,7 @@ rte_vhost_driver_enable_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket) {
if ((vsocket->supported_features & features) != features) {
@@ -657,12 +657,12 @@ rte_vhost_driver_enable_features(const char *path, uint64_t features)
* trying to enable features the driver doesn't
* support.
*/
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return -1;
}
vsocket->features |= features;
}
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -672,7 +672,7 @@ rte_vhost_driver_set_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket) {
vsocket->supported_features = features;
@@ -683,7 +683,7 @@ rte_vhost_driver_set_features(const char *path, uint64_t features)
*/
vsocket->use_builtin_virtio_net = false;
}
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -696,7 +696,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
VHOST_LOG_CONFIG(ERR,
@@ -722,7 +722,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)
*features = vsocket->features & vdpa_features;
unlock_exit:
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return ret;
}
@@ -732,11 +732,11 @@ rte_vhost_driver_set_protocol_features(const char *path,
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->protocol_features = protocol_features;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -749,7 +749,7 @@ rte_vhost_driver_get_protocol_features(const char *path,
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
VHOST_LOG_CONFIG(ERR,
@@ -777,7 +777,7 @@ rte_vhost_driver_get_protocol_features(const char *path,
& vdpa_protocol_features;
unlock_exit:
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return ret;
}
@@ -789,7 +789,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
struct rte_vdpa_device *vdpa_dev;
int ret = 0;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
VHOST_LOG_CONFIG(ERR,
@@ -815,7 +815,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
*queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
unlock_exit:
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return ret;
}
@@ -847,7 +847,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
if (!path)
return -1;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
VHOST_LOG_CONFIG(ERR,
@@ -867,7 +867,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
goto out;
}
TAILQ_INIT(&vsocket->conn_list);
- ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
+ ret = rte_thread_mutex_init(&vsocket->conn_mutex);
if (ret) {
VHOST_LOG_CONFIG(ERR,
"error: failed to init connection mutex\n");
@@ -962,18 +962,18 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return ret;
out_mutex:
- if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
+ if (rte_thread_mutex_destroy(&vsocket->conn_mutex)) {
VHOST_LOG_CONFIG(ERR,
"error: failed to destroy connection mutex\n");
}
out_free:
vhost_user_socket_mem_free(vsocket);
out:
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return ret;
}
@@ -984,7 +984,7 @@ vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
int found = false;
struct vhost_user_reconnect *reconn, *next;
- pthread_mutex_lock(&reconn_list.mutex);
+ rte_thread_mutex_lock(&reconn_list.mutex);
for (reconn = TAILQ_FIRST(&reconn_list.head);
reconn != NULL; reconn = next) {
@@ -998,7 +998,7 @@ vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
break;
}
}
- pthread_mutex_unlock(&reconn_list.mutex);
+ rte_thread_mutex_unlock(&reconn_list.mutex);
return found;
}
@@ -1016,13 +1016,13 @@ rte_vhost_driver_unregister(const char *path)
return -1;
again:
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
if (!strcmp(vsocket->path, path)) {
- pthread_mutex_lock(&vsocket->conn_mutex);
+ rte_thread_mutex_lock(&vsocket->conn_mutex);
for (conn = TAILQ_FIRST(&vsocket->conn_list);
conn != NULL;
conn = next) {
@@ -1036,9 +1036,10 @@ rte_vhost_driver_unregister(const char *path)
*/
if (fdset_try_del(&vhost_user.fdset,
conn->connfd) == -1) {
- pthread_mutex_unlock(
+ rte_thread_mutex_unlock(
&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(
+ &vhost_user.mutex);
goto again;
}
@@ -1050,7 +1051,7 @@ rte_vhost_driver_unregister(const char *path)
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
free(conn);
}
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ rte_thread_mutex_unlock(&vsocket->conn_mutex);
if (vsocket->is_server) {
/*
@@ -1060,7 +1061,8 @@ rte_vhost_driver_unregister(const char *path)
*/
if (fdset_try_del(&vhost_user.fdset,
vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(
+ &vhost_user.mutex);
goto again;
}
@@ -1070,18 +1072,18 @@ rte_vhost_driver_unregister(const char *path)
vhost_user_remove_reconnect(vsocket);
}
- pthread_mutex_destroy(&vsocket->conn_mutex);
+ rte_thread_mutex_destroy(&vsocket->conn_mutex);
vhost_user_socket_mem_free(vsocket);
count = --vhost_user.vsocket_cnt;
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return 0;
}
}
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return -1;
}
@@ -1095,11 +1097,11 @@ rte_vhost_driver_callback_register(const char *path,
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
vsocket->notify_ops = ops;
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
@@ -1109,9 +1111,9 @@ vhost_driver_callback_get(const char *path)
{
struct vhost_user_socket *vsocket;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
return vsocket ? vsocket->notify_ops : NULL;
}
@@ -1120,11 +1122,11 @@ int
rte_vhost_driver_start(const char *path)
{
struct vhost_user_socket *vsocket;
- static pthread_t fdset_tid;
+ static rte_thread_t fdset_tid;
- pthread_mutex_lock(&vhost_user.mutex);
+ rte_thread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
- pthread_mutex_unlock(&vhost_user.mutex);
+ rte_thread_mutex_unlock(&vhost_user.mutex);
if (!vsocket)
return -1;
@@ -26,7 +26,7 @@
#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
+rte_thread_mutex_t vhost_dev_lock = RTE_THREAD_MUTEX_INITIALIZER;
/* Called with iotlb_lock read-locked */
uint64_t
@@ -646,7 +646,7 @@ vhost_new_device(void)
struct virtio_net *dev;
int i;
- pthread_mutex_lock(&vhost_dev_lock);
+ rte_thread_mutex_lock(&vhost_dev_lock);
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
@@ -655,7 +655,7 @@ vhost_new_device(void)
if (i == MAX_VHOST_DEVICE) {
VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
- pthread_mutex_unlock(&vhost_dev_lock);
+ rte_thread_mutex_unlock(&vhost_dev_lock);
return -1;
}
@@ -663,12 +663,12 @@ vhost_new_device(void)
if (dev == NULL) {
VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
- pthread_mutex_unlock(&vhost_dev_lock);
+ rte_thread_mutex_unlock(&vhost_dev_lock);
return -1;
}
vhost_devices[i] = dev;
- pthread_mutex_unlock(&vhost_dev_lock);
+ rte_thread_mutex_unlock(&vhost_dev_lock);
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
@@ -32,5 +32,7 @@ option('enable_trace_fp', type: 'boolean', value: false,
description: 'enable fast path trace points.')
option('tests', type: 'boolean', value: true,
description: 'build unit tests')
+option('use_external_thread_lib', type: 'boolean', value: false,
+ description: 'use an external thread library')
option('use_hpet', type: 'boolean', value: false,
description: 'use HPET timer in EAL')