@@ -6,7 +6,7 @@
#ifndef _FLOW_API_H_
#define _FLOW_API_H_
-#include <pthread.h>
+#include <rte_spinlock.h>
#include "ntlog.h"
@@ -110,13 +110,13 @@ struct flow_nic_dev {
struct flow_handle *flow_base;
/* linked list of all FLM flows created on this NIC */
struct flow_handle *flow_base_flm;
- pthread_mutex_t flow_mtx;
+ rte_spinlock_t flow_mtx;
/* NIC backend API */
struct flow_api_backend_s be;
/* linked list of created eth-port devices on this NIC */
struct flow_eth_dev *eth_base;
- pthread_mutex_t mtx;
+ rte_spinlock_t mtx;
/* RSS hashing configuration */
struct nt_eth_rss_conf rss_conf;
@@ -7,6 +7,7 @@
#define __NTDRV_4GA_H__
#include "nt4ga_adapter.h"
+#include <rte_spinlock.h>
typedef struct ntdrv_4ga_s {
uint32_t pciident;
@@ -15,7 +16,7 @@ typedef struct ntdrv_4ga_s {
volatile bool b_shutdown;
rte_thread_t flm_thread;
- pthread_mutex_t stat_lck;
+ rte_spinlock_t stat_lck;
rte_thread_t stat_thread;
rte_thread_t port_event_thread;
} ntdrv_4ga_t;
@@ -7,7 +7,7 @@
#define __NTHW_II2CM_H__
#include "nthw_fpga_model.h"
-#include "pthread.h"
+#include "rte_spinlock.h"
struct nt_i2cm {
nthw_fpga_t *mp_fpga;
@@ -39,7 +39,7 @@ struct nt_i2cm {
nthw_field_t *mp_fld_io_exp_rst;
nthw_field_t *mp_fld_io_exp_int_b;
- pthread_mutex_t i2cmmutex;
+ rte_spinlock_t i2cmmutex;
};
typedef struct nt_i2cm nthw_i2cm_t;
@@ -7,7 +7,8 @@
#define NTHW_RPF_HPP_
#include "nthw_fpga_model.h"
-#include "pthread.h"
+#include "rte_spinlock.h"
+#include <rte_spinlock.h>
struct nthw_rpf {
nthw_fpga_t *mp_fpga;
@@ -28,7 +29,7 @@ struct nthw_rpf {
int m_default_maturing_delay;
bool m_administrative_block; /* used to enforce license expiry */
- pthread_mutex_t rpf_mutex;
+ rte_spinlock_t rpf_mutex;
};
typedef struct nthw_rpf nthw_rpf_t;
@@ -8,6 +8,7 @@
#include "nthw_drv.h"
#include "nthw_register.h"
#include "nthw_rpf.h"
+#include "rte_spinlock.h"
nthw_rpf_t *nthw_rpf_new(void)
{
@@ -65,7 +66,7 @@ int nthw_rpf_init(nthw_rpf_t *p, nthw_fpga_t *p_fpga, int n_instance)
nthw_fpga_get_product_param(p_fpga, NT_RPF_MATURING_DEL_DEFAULT, 0);
/* Initialize mutex */
- pthread_mutex_init(&p->rpf_mutex, NULL);
+ rte_spinlock_init(&p->rpf_mutex);
return 0;
}
@@ -2,6 +2,7 @@
* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2023 Napatech A/S
*/
+#include "rte_spinlock.h"
#include "ntlog.h"
#include "nt_util.h"
@@ -44,7 +45,7 @@ const char *dbg_res_descr[] = {
};
static struct flow_nic_dev *dev_base;
-static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+static rte_spinlock_t base_mtx = RTE_SPINLOCK_INITIALIZER;
/*
* Error handling
@@ -400,7 +401,7 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
#endif
/* delete all created flows from this device */
- pthread_mutex_lock(&ndev->mtx);
+ rte_spinlock_lock(&ndev->mtx);
struct flow_handle *flow = ndev->flow_base;
@@ -454,7 +455,7 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
- pthread_mutex_unlock(&ndev->mtx);
+ rte_spinlock_unlock(&ndev->mtx);
/* free eth_dev */
free(eth_dev);
@@ -495,15 +496,15 @@ static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e re
static void list_insert_flow_nic(struct flow_nic_dev *ndev)
{
- pthread_mutex_lock(&base_mtx);
+ rte_spinlock_lock(&base_mtx);
ndev->next = dev_base;
dev_base = ndev;
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
}
static int list_remove_flow_nic(struct flow_nic_dev *ndev)
{
- pthread_mutex_lock(&base_mtx);
+ rte_spinlock_lock(&base_mtx);
struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
while (nic_dev) {
@@ -514,7 +515,7 @@ static int list_remove_flow_nic(struct flow_nic_dev *ndev)
else
dev_base = nic_dev->next;
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
return 0;
}
@@ -522,7 +523,7 @@ static int list_remove_flow_nic(struct flow_nic_dev *ndev)
nic_dev = nic_dev->next;
}
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
return -1;
}
@@ -554,27 +555,27 @@ static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no
"ERROR: Internal array for multiple queues too small for API");
}
- pthread_mutex_lock(&base_mtx);
+ rte_spinlock_lock(&base_mtx);
struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
if (!ndev) {
/* Error - no flow api found on specified adapter */
NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
adapter_no);
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
return NULL;
}
if (ndev->ports < ((uint16_t)port_no + 1)) {
NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
return NULL;
}
if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
NT_LOG(ERR, FILTER,
"ERROR: Exceeds supported number of rx queues per eth device");
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&base_mtx);
return NULL;
}
@@ -584,20 +585,19 @@ static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no
if (eth_dev) {
NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
adapter_no, port_no);
- pthread_mutex_unlock(&base_mtx);
flow_delete_eth_dev(eth_dev);
eth_dev = NULL;
}
+ rte_spinlock_lock(&ndev->mtx);
+
eth_dev = calloc(1, sizeof(struct flow_eth_dev));
if (!eth_dev) {
NT_LOG(ERR, FILTER, "ERROR: calloc failed");
- goto err_exit1;
+ goto err_exit0;
}
- pthread_mutex_lock(&ndev->mtx);
-
eth_dev->ndev = ndev;
eth_dev->port = port_no;
eth_dev->port_id = port_id;
@@ -684,15 +684,14 @@ static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no
nic_insert_eth_port_dev(ndev, eth_dev);
- pthread_mutex_unlock(&ndev->mtx);
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&ndev->mtx);
+ rte_spinlock_unlock(&base_mtx);
return eth_dev;
err_exit0:
- pthread_mutex_unlock(&ndev->mtx);
- pthread_mutex_unlock(&base_mtx);
+ rte_spinlock_unlock(&ndev->mtx);
+ rte_spinlock_unlock(&base_mtx);
-err_exit1:
if (eth_dev)
free(eth_dev);
@@ -799,7 +798,7 @@ struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_b
for (int i = 0; i < RES_COUNT; i++)
assert(ndev->res[i].alloc_bm);
- pthread_mutex_init(&ndev->mtx, NULL);
+ rte_spinlock_init(&ndev->mtx);
list_insert_flow_nic(ndev);
return ndev;
@@ -3,12 +3,12 @@
* Copyright(c) 2024 Napatech A/S
*/
-#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "flow_id_table.h"
+#include "rte_spinlock.h"
#define NTNIC_ARRAY_BITS 14
#define NTNIC_ARRAY_SIZE (1 << NTNIC_ARRAY_BITS)
@@ -25,7 +25,7 @@ struct ntnic_id_table_element {
struct ntnic_id_table_data {
struct ntnic_id_table_element *arrays[NTNIC_ARRAY_SIZE];
- pthread_mutex_t mtx;
+ rte_spinlock_t mtx;
uint32_t next_id;
@@ -68,7 +68,7 @@ void *ntnic_id_table_create(void)
{
struct ntnic_id_table_data *handle = calloc(1, sizeof(struct ntnic_id_table_data));
- pthread_mutex_init(&handle->mtx, NULL);
+ rte_spinlock_init(&handle->mtx);
handle->next_id = 1;
return handle;
@@ -81,8 +81,6 @@ void ntnic_id_table_destroy(void *id_table)
for (uint32_t i = 0; i < NTNIC_ARRAY_SIZE; ++i)
free(handle->arrays[i]);
- pthread_mutex_destroy(&handle->mtx);
-
free(id_table);
}
@@ -91,7 +89,7 @@ uint32_t ntnic_id_table_get_id(void *id_table, union flm_handles flm_h, uint8_t
{
struct ntnic_id_table_data *handle = id_table;
- pthread_mutex_lock(&handle->mtx);
+ rte_spinlock_lock(&handle->mtx);
uint32_t new_id = ntnic_id_table_array_pop_free_id(handle);
@@ -103,7 +101,7 @@ uint32_t ntnic_id_table_get_id(void *id_table, union flm_handles flm_h, uint8_t
element->type = type;
memcpy(&element->handle, &flm_h, sizeof(union flm_handles));
- pthread_mutex_unlock(&handle->mtx);
+ rte_spinlock_unlock(&handle->mtx);
return new_id;
}
@@ -112,7 +110,7 @@ void ntnic_id_table_free_id(void *id_table, uint32_t id)
{
struct ntnic_id_table_data *handle = id_table;
- pthread_mutex_lock(&handle->mtx);
+ rte_spinlock_lock(&handle->mtx);
struct ntnic_id_table_element *current_element =
ntnic_id_table_array_find_element(handle, id);
@@ -127,7 +125,7 @@ void ntnic_id_table_free_id(void *id_table, uint32_t id)
if (handle->free_tail == 0)
handle->free_tail = handle->free_head;
- pthread_mutex_unlock(&handle->mtx);
+ rte_spinlock_unlock(&handle->mtx);
}
void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h, uint8_t *caller_id,
@@ -135,7 +133,7 @@ void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h,
{
struct ntnic_id_table_data *handle = id_table;
- pthread_mutex_lock(&handle->mtx);
+ rte_spinlock_lock(&handle->mtx);
struct ntnic_id_table_element *element = ntnic_id_table_array_find_element(handle, id);
@@ -143,5 +141,5 @@ void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h,
*type = element->type;
memcpy(flm_h, &element->handle, sizeof(union flm_handles));
- pthread_mutex_unlock(&handle->mtx);
+ rte_spinlock_unlock(&handle->mtx);
}
@@ -3,6 +3,7 @@
* Copyright(c) 2023 Napatech A/S
*/
+#include "generic/rte_spinlock.h"
#include "ntlog.h"
#include "nt_util.h"
@@ -20,6 +21,7 @@
#include "flow_api_profile_inline.h"
#include "ntnic_mod_reg.h"
+#include <rte_spinlock.h>
#include <rte_common.h>
#define FLM_MTR_PROFILE_SIZE 0x100000
@@ -189,7 +191,7 @@ static int flow_mtr_create_meter(struct flow_eth_dev *dev,
(void)policy_id;
struct flm_v25_lrn_data_s *learn_record = NULL;
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
learn_record =
(struct flm_v25_lrn_data_s *)
@@ -238,7 +240,7 @@ static int flow_mtr_create_meter(struct flow_eth_dev *dev,
mtr_stat[mtr_id].flm_id = flm_id;
atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return 0;
}
@@ -247,7 +249,7 @@ static int flow_mtr_probe_meter(struct flow_eth_dev *dev, uint8_t caller_id, uin
{
struct flm_v25_lrn_data_s *learn_record = NULL;
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
learn_record =
(struct flm_v25_lrn_data_s *)
@@ -278,7 +280,7 @@ static int flow_mtr_probe_meter(struct flow_eth_dev *dev, uint8_t caller_id, uin
flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return 0;
}
@@ -287,7 +289,7 @@ static int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint8_t caller_id, u
{
struct flm_v25_lrn_data_s *learn_record = NULL;
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
learn_record =
(struct flm_v25_lrn_data_s *)
@@ -330,7 +332,7 @@ static int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint8_t caller_id, u
flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return 0;
}
@@ -340,7 +342,7 @@ static int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint8_t caller_id, uin
{
struct flm_v25_lrn_data_s *learn_record = NULL;
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
learn_record =
(struct flm_v25_lrn_data_s *)
@@ -377,7 +379,7 @@ static int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint8_t caller_id, uin
flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return 0;
}
@@ -514,9 +516,9 @@ static void flm_mtr_read_sta_records(struct flow_eth_dev *dev, uint32_t *data, u
uint8_t port;
bool remote_caller = is_remote_caller(caller_id, &port);
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
((struct flow_handle *)flm_h.p)->learn_ignored = 1;
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
struct flm_status_event_s data = {
.flow = flm_h.p,
.learn_ignore = sta_data->lis,
@@ -813,7 +815,7 @@ static uint8_t get_port_from_port_id(const struct flow_nic_dev *ndev, uint32_t p
static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
{
- pthread_mutex_lock(&ndev->flow_mtx);
+ rte_spinlock_lock(&ndev->flow_mtx);
if (ndev->flow_base)
ndev->flow_base->prev = fh;
@@ -822,7 +824,7 @@ static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
fh->prev = NULL;
ndev->flow_base = fh;
- pthread_mutex_unlock(&ndev->flow_mtx);
+ rte_spinlock_unlock(&ndev->flow_mtx);
}
static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
@@ -830,7 +832,7 @@ static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
struct flow_handle *next = fh->next;
struct flow_handle *prev = fh->prev;
- pthread_mutex_lock(&ndev->flow_mtx);
+ rte_spinlock_lock(&ndev->flow_mtx);
if (next && prev) {
prev->next = next;
@@ -847,12 +849,12 @@ static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
ndev->flow_base = NULL;
}
- pthread_mutex_unlock(&ndev->flow_mtx);
+ rte_spinlock_unlock(&ndev->flow_mtx);
}
static void nic_insert_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *fh)
{
- pthread_mutex_lock(&ndev->flow_mtx);
+ rte_spinlock_lock(&ndev->flow_mtx);
if (ndev->flow_base_flm)
ndev->flow_base_flm->prev = fh;
@@ -861,7 +863,7 @@ static void nic_insert_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *f
fh->prev = NULL;
ndev->flow_base_flm = fh;
- pthread_mutex_unlock(&ndev->flow_mtx);
+ rte_spinlock_unlock(&ndev->flow_mtx);
}
static void nic_remove_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *fh_flm)
@@ -869,7 +871,7 @@ static void nic_remove_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *f
struct flow_handle *next = fh_flm->next;
struct flow_handle *prev = fh_flm->prev;
- pthread_mutex_lock(&ndev->flow_mtx);
+ rte_spinlock_lock(&ndev->flow_mtx);
if (next && prev) {
prev->next = next;
@@ -886,7 +888,7 @@ static void nic_remove_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *f
ndev->flow_base_flm = NULL;
}
- pthread_mutex_unlock(&ndev->flow_mtx);
+ rte_spinlock_unlock(&ndev->flow_mtx);
}
static inline struct nic_flow_def *prepare_nic_flow_def(struct nic_flow_def *fd)
@@ -4192,20 +4194,20 @@ struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
struct nic_flow_def *fd = allocate_nic_flow_def();
if (fd == NULL)
- goto err_exit;
+ goto err_exit0;
res = interpret_flow_actions(dev, action, NULL, fd, error, &num_dest_port, &num_queues);
if (res)
- goto err_exit;
+ goto err_exit0;
res = interpret_flow_elements(dev, elem, fd, error, forced_vlan_vid_local, &port_id,
packet_data, packet_mask, &key_def);
if (res)
- goto err_exit;
+ goto err_exit0;
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
/* Translate group IDs */
if (fd->jump_to_group != UINT32_MAX &&
@@ -4239,19 +4241,27 @@ struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
NT_LOG(DBG, FILTER, ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<",
dev, dev->ndev->adapter_no, dev->port, fh, fd);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return fh;
err_exit:
- if (fh)
+ if (fh) {
flow_destroy_locked_profile_inline(dev, fh, NULL);
-
- else
+ fh = NULL;
+ } else {
free(fd);
+ fd = NULL;
+ }
+
+ rte_spinlock_unlock(&dev->ndev->mtx);
- pthread_mutex_unlock(&dev->ndev->mtx);
+err_exit0:
+ if (fd) {
+ free(fd);
+ fd = NULL;
+ }
NT_LOG(ERR, FILTER, "ERR: %s", __func__);
return NULL;
@@ -4312,6 +4322,7 @@ int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle,
(struct hw_db_idx *)fh->db_idxs, fh->db_idx_counter);
free(fh->fd);
+ fh->fd = NULL;
}
if (err) {
@@ -4320,6 +4331,7 @@ int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
}
free(fh);
+ fh = NULL;
#ifdef FLOW_DEBUG
dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
@@ -4337,9 +4349,9 @@ int flow_destroy_profile_inline(struct flow_eth_dev *dev, struct flow_handle *fl
if (flow) {
/* Delete this flow */
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
err = flow_destroy_locked_profile_inline(dev, flow, error);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
}
return err;
@@ -4427,7 +4439,7 @@ int flow_actions_update_profile_inline(struct flow_eth_dev *dev,
return -1;
}
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
/* Setup new actions */
uint32_t local_idx_counter = 0;
@@ -4534,7 +4546,7 @@ int flow_actions_update_profile_inline(struct flow_eth_dev *dev,
flow->flm_db_idxs[i] = local_idxs[i];
}
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
free(fd);
return 0;
@@ -4543,7 +4555,7 @@ int flow_actions_update_profile_inline(struct flow_eth_dev *dev,
hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, (struct hw_db_idx *)local_idxs,
local_idx_counter);
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
free(fd);
return -1;
@@ -5280,7 +5292,7 @@ int flow_dev_dump_profile_inline(struct flow_eth_dev *dev,
{
flow_nic_set_error(ERR_SUCCESS, error);
- pthread_mutex_lock(&dev->ndev->mtx);
+ rte_spinlock_lock(&dev->ndev->mtx);
if (flow != NULL) {
if (flow->type == FLOW_HANDLE_TYPE_FLM) {
@@ -5339,7 +5351,7 @@ int flow_dev_dump_profile_inline(struct flow_eth_dev *dev,
}
}
- pthread_mutex_unlock(&dev->ndev->mtx);
+ rte_spinlock_unlock(&dev->ndev->mtx);
return 0;
}
@@ -678,11 +678,13 @@ int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free, uint3
uint32_t address_bufctrl = nthw_register_get_address(p->mp_buf_ctrl);
nthw_rab_bus_id_t bus_id = 1;
struct dma_buf_ptr bc_buf;
- ret = nthw_rac_rab_dma_begin(rac);
+ rte_spinlock_lock(&rac->m_mutex);
+ ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1;
if (ret == 0) {
nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf);
- ret = nthw_rac_rab_dma_commit(rac);
+ ret = rac->m_dma_active ? nthw_rac_rab_dma_commit(rac) : (assert(0), -1);
+ rte_spinlock_unlock(&rac->m_mutex);
if (ret != 0)
return ret;
@@ -692,6 +694,13 @@ int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free, uint3
*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+ } else {
+ rte_spinlock_unlock(&rac->m_mutex);
+ const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+ const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+ NT_LOG(ERR, NTHW,
+ "%s: DMA begin requested, but a DMA transaction is already active",
+ p_adapter_id_str);
}
return ret;
@@ -716,8 +725,10 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint
*handled_records = 0;
int max_tries = 10000;
- while (*inf_avail == 0 && *sta_avail == 0 && records != 0 && --max_tries > 0)
- if (nthw_rac_rab_dma_begin(rac) == 0) {
+ while (*inf_avail == 0 && *sta_avail == 0 && records != 0 && --max_tries > 0) {
+ rte_spinlock_lock(&rac->m_mutex);
+ int ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1;
+ if (ret == 0) {
uint32_t dma_free = nthw_rac_rab_get_free(rac);
if (dma_free != RAB_DMA_BUF_CNT) {
@@ -770,7 +781,11 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint
/* Read buf ctrl */
nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf);
- if (nthw_rac_rab_dma_commit(rac) != 0)
+ int ret = rac->m_dma_active ?
+ nthw_rac_rab_dma_commit(rac) :
+ (assert(0), -1);
+ rte_spinlock_unlock(&rac->m_mutex);
+ if (ret != 0)
return -1;
uint32_t bc_mask = bc_buf.size - 1;
@@ -778,8 +793,15 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint
*lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff;
*inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff;
*sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff;
+ } else {
+ rte_spinlock_unlock(&rac->m_mutex);
+ const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+ const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+ NT_LOG(ERR, NTHW,
+ "%s: DMA begin requested, but a DMA transaction is already active",
+ p_adapter_id_str);
}
-
+ }
return 0;
}
@@ -801,7 +823,8 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data,
uint32_t mask;
uint32_t index;
- ret = nthw_rac_rab_dma_begin(rac);
+ rte_spinlock_lock(&rac->m_mutex);
+ ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1;
if (ret == 0) {
/* Announce the number of words to read from INF_DATA */
@@ -821,7 +844,8 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data,
}
nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf);
- ret = nthw_rac_rab_dma_commit(rac);
+ ret = rac->m_dma_active ? nthw_rac_rab_dma_commit(rac) : (assert(0), -1);
+ rte_spinlock_unlock(&rac->m_mutex);
if (ret != 0)
return ret;
@@ -847,6 +871,13 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data,
*lrn_free = bc_buf.base[index & mask] & 0xffff;
*inf_avail = (bc_buf.base[index & mask] >> 16) & 0xffff;
*sta_avail = bc_buf.base[(index + 1) & mask] & 0xffff;
+ } else {
+ rte_spinlock_unlock(&rac->m_mutex);
+ const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
+ const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
+ NT_LOG(ERR, NTHW,
+ "%s: DMA begin requested, but a DMA transaction is already active",
+ p_adapter_id_str);
}
return ret;
@@ -3,6 +3,7 @@
* Copyright(c) 2023 Napatech A/S
*/
+#include "rte_spinlock.h"
#include "nt_util.h"
#include "ntlog.h"
@@ -10,8 +11,6 @@
#include "nthw_register.h"
#include "nthw_rac.h"
-#include <pthread.h>
-
#define RAB_DMA_WAIT (1000000)
#define RAB_READ (0x01)
@@ -217,7 +216,7 @@ int nthw_rac_init(nthw_rac_t *p, nthw_fpga_t *p_fpga, struct fpga_info_s *p_fpga
}
}
- pthread_mutex_init(&p->m_mutex, NULL);
+ rte_spinlock_init(&p->m_mutex);
return 0;
}
@@ -389,19 +388,6 @@ void nthw_rac_bar0_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_a
int nthw_rac_rab_dma_begin(nthw_rac_t *p)
{
- const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info;
- const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str;
-
- pthread_mutex_lock(&p->m_mutex);
-
- if (p->m_dma_active) {
- pthread_mutex_unlock(&p->m_mutex);
- NT_LOG(ERR, NTHW,
- "%s: DMA begin requested, but a DMA transaction is already active",
- p_adapter_id_str);
- return -1;
- }
-
p->m_dma_active = true;
return 0;
@@ -454,19 +440,11 @@ int nthw_rac_rab_dma_commit(nthw_rac_t *p)
{
int ret;
- if (!p->m_dma_active) {
- /* Expecting mutex not to be locked! */
- assert(0); /* alert developer that something is wrong */
- return -1;
- }
-
nthw_rac_rab_dma_activate(p);
ret = nthw_rac_rab_dma_wait(p);
p->m_dma_active = false;
- pthread_mutex_unlock(&p->m_mutex);
-
return ret;
}
@@ -602,7 +580,7 @@ int nthw_rac_rab_write32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint
return -1;
}
- pthread_mutex_lock(&p->m_mutex);
+ rte_spinlock_lock(&p->m_mutex);
if (p->m_dma_active) {
NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled", p_adapter_id_str);
@@ -748,7 +726,7 @@ int nthw_rac_rab_write32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint
}
exit_unlock_res:
- pthread_mutex_unlock(&p->m_mutex);
+ rte_spinlock_unlock(&p->m_mutex);
return res;
}
@@ -763,7 +741,7 @@ int nthw_rac_rab_read32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint3
uint32_t out_buf_free;
int res = 0;
- pthread_mutex_lock(&p->m_mutex);
+ rte_spinlock_lock(&p->m_mutex);
if (address > (1 << RAB_ADDR_BW)) {
NT_LOG(ERR, NTHW, "%s: RAB: Illegal address: value too large %d - max %d",
@@ -923,7 +901,7 @@ int nthw_rac_rab_read32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint3
}
exit_unlock_res:
- pthread_mutex_unlock(&p->m_mutex);
+ rte_spinlock_unlock(&p->m_mutex);
return res;
}
@@ -935,7 +913,7 @@ int nthw_rac_rab_flush(nthw_rac_t *p)
uint32_t retry;
int res = 0;
- pthread_mutex_lock(&p->m_mutex);
+ rte_spinlock_lock(&p->m_mutex);
/* Set the flush bit */
nthw_rac_reg_write32(p_fpga_info, p->RAC_RAB_BUF_USED_ADDR,
@@ -960,6 +938,6 @@ int nthw_rac_rab_flush(nthw_rac_t *p)
/* Clear flush bit when done */
nthw_rac_reg_write32(p_fpga_info, p->RAC_RAB_BUF_USED_ADDR, 0x0);
- pthread_mutex_unlock(&p->m_mutex);
+ rte_spinlock_unlock(&p->m_mutex);
return res;
}
@@ -16,7 +16,7 @@ struct nthw_rac {
nthw_fpga_t *mp_fpga;
nthw_module_t *mp_mod_rac;
- pthread_mutex_t m_mutex;
+ rte_spinlock_t m_mutex;
int mn_param_rac_rab_interfaces;
int mn_param_rac_rab_ob_update;
@@ -18,6 +18,7 @@
#include <sys/queue.h>
+#include "rte_spinlock.h"
#include "ntlog.h"
#include "ntdrv_4ga.h"
#include "ntos_drv.h"
@@ -236,7 +237,7 @@ static int dpdk_stats_reset(struct pmd_internals *internals, struct ntdrv_4ga_s
if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 || n_intf_no > NUM_ADAPTER_PORTS_MAX)
return -1;
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
/* Rx */
for (i = 0; i < internals->nb_rx_queues; i++) {
@@ -256,7 +257,7 @@ static int dpdk_stats_reset(struct pmd_internals *internals, struct ntdrv_4ga_s
p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return 0;
}
@@ -1519,9 +1520,9 @@ static int eth_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *sta
return -1;
}
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
nb_xstats = ntnic_xstats_ops->nthw_xstats_get(p_nt4ga_stat, stats, n, if_index);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return nb_xstats;
}
@@ -1544,10 +1545,10 @@ static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
return -1;
}
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
nb_xstats =
ntnic_xstats_ops->nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n, if_index);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return nb_xstats;
}
@@ -1566,9 +1567,9 @@ static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
return -1;
}
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
ntnic_xstats_ops->nthw_xstats_reset(p_nt4ga_stat, if_index);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return dpdk_stats_reset(internals, p_nt_drv, if_index);
}
@@ -1749,14 +1750,14 @@ THREAD_FUNC port_event_thread_fn(void *context)
if (p_nt4ga_stat->flm_stat_ver > 22 && p_nt4ga_stat->mp_stat_structs_flm) {
if (flmdata.lookup != p_nt4ga_stat->mp_stat_structs_flm->load_lps ||
flmdata.access != p_nt4ga_stat->mp_stat_structs_flm->load_aps) {
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
flmdata.lookup = p_nt4ga_stat->mp_stat_structs_flm->load_lps;
flmdata.access = p_nt4ga_stat->mp_stat_structs_flm->load_aps;
flmdata.lookup_maximum =
p_nt4ga_stat->mp_stat_structs_flm->max_lps;
flmdata.access_maximum =
p_nt4ga_stat->mp_stat_structs_flm->max_aps;
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
rte_eth_dev_callback_process(eth_dev,
@@ -1773,7 +1774,7 @@ THREAD_FUNC port_event_thread_fn(void *context)
if (p_nt4ga_stat->mp_port_load) {
if (portdata.rx_bps != p_nt4ga_stat->mp_port_load[port_no].rx_bps ||
portdata.tx_bps != p_nt4ga_stat->mp_port_load[port_no].tx_bps) {
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
portdata.rx_bps = p_nt4ga_stat->mp_port_load[port_no].rx_bps;
portdata.tx_bps = p_nt4ga_stat->mp_port_load[port_no].tx_bps;
portdata.rx_pps = p_nt4ga_stat->mp_port_load[port_no].rx_pps;
@@ -1786,7 +1787,7 @@ THREAD_FUNC port_event_thread_fn(void *context)
p_nt4ga_stat->mp_port_load[port_no].rx_bps_max;
portdata.tx_bps_maximum =
p_nt4ga_stat->mp_port_load[port_no].tx_bps_max;
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
rte_eth_dev_callback_process(eth_dev,
@@ -1957,9 +1958,9 @@ THREAD_FUNC adapter_stat_thread_fn(void *context)
/* Check then collect */
{
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
nt4ga_stat_ops->nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
}
}
@@ -2232,7 +2233,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
}
}
- pthread_mutex_init(&p_nt_drv->stat_lck, NULL);
+ rte_spinlock_init(&p_nt_drv->stat_lck);
res = THREAD_CTRL_CREATE(&p_nt_drv->stat_thread, "nt4ga_stat_thr", adapter_stat_thread_fn,
(void *)p_drv);
@@ -910,7 +910,7 @@ static int poll_statistics(struct pmd_internals *internals)
internals->last_stat_rtc = now_rtc;
- pthread_mutex_lock(&p_nt_drv->stat_lck);
+ rte_spinlock_lock(&p_nt_drv->stat_lck);
/*
* Add the RX statistics increments since last time we polled.
@@ -951,7 +951,7 @@ static int poll_statistics(struct pmd_internals *internals)
/* Globally only once a second */
if ((now_rtc - last_stat_rtc) < rte_tsc_freq) {
rte_spinlock_unlock(&hwlock);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return 0;
}
@@ -988,7 +988,7 @@ static int poll_statistics(struct pmd_internals *internals)
}
rte_spinlock_unlock(&hwlock);
- pthread_mutex_unlock(&p_nt_drv->stat_lck);
+ rte_spinlock_unlock(&p_nt_drv->stat_lck);
return 0;
}