@@ -66,6 +66,7 @@ Features
- Port and queue statistics.
- RMON statistics in extended stats.
- Link state information.
+- Flow statistics
Limitations
~~~~~~~~~~~
@@ -189,6 +189,24 @@ static int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
return -1;
}
+ if (get_flow_filter_ops() != NULL) {
+ struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
+ p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
+ p_nt4ga_stat->mp_stat_structs_flm = calloc(1, sizeof(struct flm_counters_v1));
+
+ if (!p_nt4ga_stat->mp_stat_structs_flm) {
+ NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem.");
+ return -1;
+ }
+
+ p_nt4ga_stat->mp_stat_structs_flm->max_aps =
+ nthw_fpga_get_product_param(p_adapter_info->fpga_info.mp_fpga,
+ NT_FLM_LOAD_APS_MAX, 0);
+ p_nt4ga_stat->mp_stat_structs_flm->max_lps =
+ nthw_fpga_get_product_param(p_adapter_info->fpga_info.mp_fpga,
+ NT_FLM_LOAD_LPS_MAX, 0);
+ }
+
p_nt4ga_stat->mp_port_load =
calloc(NUM_ADAPTER_PORTS_MAX, sizeof(struct port_load_counters));
@@ -236,6 +254,7 @@ static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info
return -1;
nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+ struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -542,6 +561,27 @@ static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info
(uint64_t)(((__uint128_t)val * 32ULL) / PORT_LOAD_WINDOWS_SIZE);
}
+ /* Update and get FLM stats */
+ flow_filter_ops->flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+ sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
+ /*
+ * Calculate correct load values:
+ * rpp = nthw_fpga_get_product_param(p_fpga, NT_RPP_PER_PS, 0);
+ * bin = (uint32_t)(((FLM_LOAD_WINDOWS_SIZE * 1000000000000ULL) / (32ULL * rpp)) - 1ULL);
+ * load_aps = ((uint64_t)load_aps * 1000000000000ULL) / (uint64_t)((bin+1) * rpp);
+ * load_lps = ((uint64_t)load_lps * 1000000000000ULL) / (uint64_t)((bin+1) * rpp);
+ *
+ * Simplified it gives:
+ *
+ * load_lps = (load_lps * 32ULL) / FLM_LOAD_WINDOWS_SIZE
+ * load_aps = (load_aps * 32ULL) / FLM_LOAD_WINDOWS_SIZE
+ */
+
+ p_nt4ga_stat->mp_stat_structs_flm->load_aps =
+ (p_nt4ga_stat->mp_stat_structs_flm->load_aps * 32ULL) / FLM_LOAD_WINDOWS_SIZE;
+ p_nt4ga_stat->mp_stat_structs_flm->load_lps =
+ (p_nt4ga_stat->mp_stat_structs_flm->load_lps * 32ULL) / FLM_LOAD_WINDOWS_SIZE;
return 0;
}
@@ -688,6 +688,9 @@ int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
uint32_t value);
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value);
+
int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field,
const uint32_t *value, uint32_t records,
uint32_t *handled_records, uint32_t *inf_word_cnt,
@@ -17,6 +17,7 @@ typedef struct ntdrv_4ga_s {
rte_thread_t flm_thread;
pthread_mutex_t stat_lck;
rte_thread_t stat_thread;
+ rte_thread_t port_event_thread;
} ntdrv_4ga_t;
#endif /* __NTDRV_4GA_H__ */
@@ -59,6 +59,7 @@ sources = files(
'nthw/flow_api/flow_id_table.c',
'nthw/flow_api/hw_mod/hw_mod_backend.c',
'nthw/flow_api/profile_inline/flm_lrn_queue.c',
+ 'nthw/flow_api/profile_inline/flm_evt_queue.c',
'nthw/flow_api/profile_inline/flow_api_profile_inline.c',
'nthw/flow_api/profile_inline/flow_api_hw_db_inline.c',
'nthw/flow_api/flow_backend/flow_backend.c',
@@ -1016,11 +1016,14 @@ int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
{
- (void)ndev;
- (void)data;
- (void)size;
+ const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+ if (profile_inline_ops == NULL)
+ return -1;
+
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+ return profile_inline_ops->flow_get_flm_stats_profile_inline(ndev, data, size);
- NT_LOG_DBGX(DBG, FILTER, "Not implemented yet");
return -1;
}
@@ -712,6 +712,148 @@ int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int i
return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
}
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value)
+{
+ switch (_VER_) {
+ case 25:
+ switch (field) {
+ case HW_FLM_STAT_LRN_DONE:
+ *value = be->flm.v25.lrn_done->cnt;
+ break;
+
+ case HW_FLM_STAT_LRN_IGNORE:
+ *value = be->flm.v25.lrn_ignore->cnt;
+ break;
+
+ case HW_FLM_STAT_LRN_FAIL:
+ *value = be->flm.v25.lrn_fail->cnt;
+ break;
+
+ case HW_FLM_STAT_UNL_DONE:
+ *value = be->flm.v25.unl_done->cnt;
+ break;
+
+ case HW_FLM_STAT_UNL_IGNORE:
+ *value = be->flm.v25.unl_ignore->cnt;
+ break;
+
+ case HW_FLM_STAT_REL_DONE:
+ *value = be->flm.v25.rel_done->cnt;
+ break;
+
+ case HW_FLM_STAT_REL_IGNORE:
+ *value = be->flm.v25.rel_ignore->cnt;
+ break;
+
+ case HW_FLM_STAT_PRB_DONE:
+ *value = be->flm.v25.prb_done->cnt;
+ break;
+
+ case HW_FLM_STAT_PRB_IGNORE:
+ *value = be->flm.v25.prb_ignore->cnt;
+ break;
+
+ case HW_FLM_STAT_AUL_DONE:
+ *value = be->flm.v25.aul_done->cnt;
+ break;
+
+ case HW_FLM_STAT_AUL_IGNORE:
+ *value = be->flm.v25.aul_ignore->cnt;
+ break;
+
+ case HW_FLM_STAT_AUL_FAIL:
+ *value = be->flm.v25.aul_fail->cnt;
+ break;
+
+ case HW_FLM_STAT_TUL_DONE:
+ *value = be->flm.v25.tul_done->cnt;
+ break;
+
+ case HW_FLM_STAT_FLOWS:
+ *value = be->flm.v25.flows->cnt;
+ break;
+
+ case HW_FLM_LOAD_LPS:
+ *value = be->flm.v25.load_lps->lps;
+ break;
+
+ case HW_FLM_LOAD_APS:
+ *value = be->flm.v25.load_aps->aps;
+ break;
+
+ default: {
+ if (_VER_ < 18)
+ return UNSUP_FIELD;
+
+ switch (field) {
+ case HW_FLM_STAT_STA_DONE:
+ *value = be->flm.v25.sta_done->cnt;
+ break;
+
+ case HW_FLM_STAT_INF_DONE:
+ *value = be->flm.v25.inf_done->cnt;
+ break;
+
+ case HW_FLM_STAT_INF_SKIP:
+ *value = be->flm.v25.inf_skip->cnt;
+ break;
+
+ case HW_FLM_STAT_PCK_HIT:
+ *value = be->flm.v25.pck_hit->cnt;
+ break;
+
+ case HW_FLM_STAT_PCK_MISS:
+ *value = be->flm.v25.pck_miss->cnt;
+ break;
+
+ case HW_FLM_STAT_PCK_UNH:
+ *value = be->flm.v25.pck_unh->cnt;
+ break;
+
+ case HW_FLM_STAT_PCK_DIS:
+ *value = be->flm.v25.pck_dis->cnt;
+ break;
+
+ case HW_FLM_STAT_CSH_HIT:
+ *value = be->flm.v25.csh_hit->cnt;
+ break;
+
+ case HW_FLM_STAT_CSH_MISS:
+ *value = be->flm.v25.csh_miss->cnt;
+ break;
+
+ case HW_FLM_STAT_CSH_UNH:
+ *value = be->flm.v25.csh_unh->cnt;
+ break;
+
+ case HW_FLM_STAT_CUC_START:
+ *value = be->flm.v25.cuc_start->cnt;
+ break;
+
+ case HW_FLM_STAT_CUC_MOVE:
+ *value = be->flm.v25.cuc_move->cnt;
+ break;
+
+ default:
+ return UNSUP_FIELD;
+ }
+ }
+ break;
+ }
+
+ break;
+
+ default:
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field,
const uint32_t *value, uint32_t records,
new file mode 100644
@@ -0,0 +1,176 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+#include "ntlog.h"
+#include "flm_evt_queue.h"
+
+/* Local queues for flm statistic events */
+static struct rte_ring *info_q_local[MAX_INFO_LCL_QUEUES];
+
+/* Remote queues for flm statistic events */
+static struct rte_ring *info_q_remote[MAX_INFO_RMT_QUEUES];
+
+/* Local queues for flm status records */
+static struct rte_ring *stat_q_local[MAX_STAT_LCL_QUEUES];
+
+/* Remote queues for flm status records */
+static struct rte_ring *stat_q_remote[MAX_STAT_RMT_QUEUES];
+
+
+static struct rte_ring *flm_evt_queue_create(uint8_t port, uint8_t caller)
+{
+ static_assert((FLM_EVT_ELEM_SIZE & ~(size_t)3) == FLM_EVT_ELEM_SIZE,
+ "FLM EVENT struct size");
+ static_assert((FLM_STAT_ELEM_SIZE & ~(size_t)3) == FLM_STAT_ELEM_SIZE,
+ "FLM STAT struct size");
+ char name[20] = "NONE";
+ struct rte_ring *q;
+ uint32_t elem_size = 0;
+ uint32_t queue_size = 0;
+
+ switch (caller) {
+ case FLM_INFO_LOCAL:
+ if (port >= MAX_INFO_LCL_QUEUES) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM statistic event queue cannot be created for port %u. Max supported port is %u",
+ port,
+ MAX_INFO_LCL_QUEUES - 1);
+ return NULL;
+ }
+
+ snprintf(name, 20, "LOCAL_INFO%u", port);
+ elem_size = FLM_EVT_ELEM_SIZE;
+ queue_size = FLM_EVT_QUEUE_SIZE;
+ break;
+
+ case FLM_INFO_REMOTE:
+ if (port >= MAX_INFO_RMT_QUEUES) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM statistic event queue cannot be created for vport %u. Max supported vport is %u",
+ port,
+ MAX_INFO_RMT_QUEUES - 1);
+ return NULL;
+ }
+
+ snprintf(name, 20, "REMOTE_INFO%u", port);
+ elem_size = FLM_EVT_ELEM_SIZE;
+ queue_size = FLM_EVT_QUEUE_SIZE;
+ break;
+
+ case FLM_STAT_LOCAL:
+ if (port >= MAX_STAT_LCL_QUEUES) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM status queue cannot be created for port %u. Max supported port is %u",
+ port,
+ MAX_STAT_LCL_QUEUES - 1);
+ return NULL;
+ }
+
+ snprintf(name, 20, "LOCAL_STAT%u", port);
+ elem_size = FLM_STAT_ELEM_SIZE;
+ queue_size = FLM_STAT_QUEUE_SIZE;
+ break;
+
+ case FLM_STAT_REMOTE:
+ if (port >= MAX_STAT_RMT_QUEUES) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM status queue cannot be created for vport %u. Max supported vport is %u",
+ port,
+ MAX_STAT_RMT_QUEUES - 1);
+ return NULL;
+ }
+
+ snprintf(name, 20, "REMOTE_STAT%u", port);
+ elem_size = FLM_STAT_ELEM_SIZE;
+ queue_size = FLM_STAT_QUEUE_SIZE;
+ break;
+
+ default:
+ NT_LOG(ERR, FILTER, "FLM queue create illegal caller: %u", caller);
+ return NULL;
+ }
+
+ q = rte_ring_create_elem(name,
+ elem_size,
+ queue_size,
+ SOCKET_ID_ANY,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (q == NULL) {
+ NT_LOG(WRN, FILTER, "FLM queues cannot be created due to error %02X", rte_errno);
+ return NULL;
+ }
+
+ switch (caller) {
+ case FLM_INFO_LOCAL:
+ info_q_local[port] = q;
+ break;
+
+ case FLM_INFO_REMOTE:
+ info_q_remote[port] = q;
+ break;
+
+ case FLM_STAT_LOCAL:
+ stat_q_local[port] = q;
+ break;
+
+ case FLM_STAT_REMOTE:
+ stat_q_remote[port] = q;
+ break;
+
+ default:
+ break;
+ }
+
+ return q;
+}
+
+int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj)
+{
+ int ret;
+
+ /* If queues is not created, then ignore and return */
+ if (!remote) {
+ if (port < MAX_INFO_LCL_QUEUES) {
+ if (info_q_local[port] != NULL) {
+ ret = rte_ring_sc_dequeue_elem(info_q_local[port],
+ obj,
+ FLM_EVT_ELEM_SIZE);
+ return ret;
+ }
+
+ if (flm_evt_queue_create(port, FLM_INFO_LOCAL) != NULL) {
+ /* Recursive call to get data */
+ return flm_inf_queue_get(port, remote, obj);
+ }
+ }
+
+ } else if (port < MAX_INFO_RMT_QUEUES) {
+ if (info_q_remote[port] != NULL) {
+ ret = rte_ring_sc_dequeue_elem(info_q_remote[port],
+ obj,
+ FLM_EVT_ELEM_SIZE);
+ return ret;
+ }
+
+ if (flm_evt_queue_create(port, FLM_INFO_REMOTE) != NULL) {
+ /* Recursive call to get data */
+ return flm_inf_queue_get(port, remote, obj);
+ }
+ }
+
+ return -ENOENT;
+}
new file mode 100644
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Napatech A/S
+ */
+
+#ifndef _FLM_EVT_QUEUE_H_
+#define _FLM_EVT_QUEUE_H_
+
+#include "stdint.h"
+#include "stdbool.h"
+
+struct flm_status_event_s {
+ void *flow;
+ uint32_t learn_ignore : 1;
+ uint32_t learn_failed : 1;
+ uint32_t learn_done : 1;
+};
+
+struct flm_info_event_s {
+ uint64_t bytes;
+ uint64_t packets;
+ uint64_t timestamp;
+ uint64_t id;
+ uint8_t cause;
+};
+
+enum {
+ FLM_INFO_LOCAL,
+ FLM_INFO_REMOTE,
+ FLM_STAT_LOCAL,
+ FLM_STAT_REMOTE,
+};
+
+/* Max number of local queues */
+#define MAX_INFO_LCL_QUEUES 8
+#define MAX_STAT_LCL_QUEUES 8
+
+/* Max number of remote queues */
+#define MAX_INFO_RMT_QUEUES 128
+#define MAX_STAT_RMT_QUEUES 128
+
+/* queue size */
+#define FLM_EVT_QUEUE_SIZE 8192
+#define FLM_STAT_QUEUE_SIZE 8192
+
+/* Event element size */
+#define FLM_EVT_ELEM_SIZE sizeof(struct flm_info_event_s)
+#define FLM_STAT_ELEM_SIZE sizeof(struct flm_status_event_s)
+
+int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj);
+
+#endif /* _FLM_EVT_QUEUE_H_ */
@@ -4462,6 +4462,48 @@ int flow_dev_dump_profile_inline(struct flow_eth_dev *dev,
return 0;
}
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+ const enum hw_flm_e fields[] = {
+ HW_FLM_STAT_FLOWS, HW_FLM_STAT_LRN_DONE, HW_FLM_STAT_LRN_IGNORE,
+ HW_FLM_STAT_LRN_FAIL, HW_FLM_STAT_UNL_DONE, HW_FLM_STAT_UNL_IGNORE,
+ HW_FLM_STAT_AUL_DONE, HW_FLM_STAT_AUL_IGNORE, HW_FLM_STAT_AUL_FAIL,
+ HW_FLM_STAT_TUL_DONE, HW_FLM_STAT_REL_DONE, HW_FLM_STAT_REL_IGNORE,
+ HW_FLM_STAT_PRB_DONE, HW_FLM_STAT_PRB_IGNORE,
+
+ HW_FLM_STAT_STA_DONE, HW_FLM_STAT_INF_DONE, HW_FLM_STAT_INF_SKIP,
+ HW_FLM_STAT_PCK_HIT, HW_FLM_STAT_PCK_MISS, HW_FLM_STAT_PCK_UNH,
+ HW_FLM_STAT_PCK_DIS, HW_FLM_STAT_CSH_HIT, HW_FLM_STAT_CSH_MISS,
+ HW_FLM_STAT_CSH_UNH, HW_FLM_STAT_CUC_START, HW_FLM_STAT_CUC_MOVE,
+
+ HW_FLM_LOAD_LPS, HW_FLM_LOAD_APS,
+ };
+
+ const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+ if (!ndev->flow_mgnt_prepared)
+ return 0;
+
+ if (size < fields_cnt)
+ return -1;
+
+ hw_mod_flm_stat_update(&ndev->be);
+
+ for (uint64_t i = 0; i < fields_cnt; ++i) {
+ uint32_t value = 0;
+ hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+ data[i] = (fields[i] == HW_FLM_STAT_FLOWS || fields[i] == HW_FLM_LOAD_LPS ||
+ fields[i] == HW_FLM_LOAD_APS)
+ ? value
+ : data[i] + value;
+
+ if (ndev->be.flm.ver < 18 && fields[i] == HW_FLM_STAT_PRB_IGNORE)
+ break;
+ }
+
+ return 0;
+}
+
static const struct profile_inline_ops ops = {
/*
* Management
@@ -4478,6 +4520,10 @@ static const struct profile_inline_ops ops = {
.flow_destroy_profile_inline = flow_destroy_profile_inline,
.flow_flush_profile_inline = flow_flush_profile_inline,
.flow_nic_set_hasher_fields_inline = flow_nic_set_hasher_fields_inline,
+ /*
+ * Stats
+ */
+ .flow_get_flm_stats_profile_inline = flow_get_flm_stats_profile_inline,
/*
* NT Flow FLM Meter API
*/
@@ -52,4 +52,10 @@ int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev,
int hsh_idx,
struct nt_eth_rss_conf rss_conf);
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size);
+
#endif /* _FLOW_API_PROFILE_INLINE_H_ */
new file mode 100644
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef NTNIC_EVENT_H_
+#define NTNIC_EVENT_H_
+
+#include <rte_ethdev.h>
+
+typedef struct ntnic_flm_load_s {
+ uint64_t lookup;
+ uint64_t lookup_maximum;
+ uint64_t access;
+ uint64_t access_maximum;
+} ntnic_flm_load_t;
+
+typedef struct ntnic_port_load_s {
+ uint64_t rx_pps;
+ uint64_t rx_pps_maximum;
+ uint64_t tx_pps;
+ uint64_t tx_pps_maximum;
+ uint64_t rx_bps;
+ uint64_t rx_bps_maximum;
+ uint64_t tx_bps;
+ uint64_t tx_bps_maximum;
+} ntnic_port_load_t;
+
+struct ntnic_flm_statistic_s {
+ uint64_t bytes;
+ uint64_t packets;
+ uint64_t timestamp;
+ uint64_t id;
+ uint8_t cause;
+};
+
+enum rte_ntnic_event_type {
+ RTE_NTNIC_FLM_LOAD_EVENT = RTE_ETH_EVENT_MAX,
+ RTE_NTNIC_PORT_LOAD_EVENT,
+ RTE_NTNIC_FLM_STATS_EVENT,
+};
+
+#endif /* NTNIC_EVENT_H_ */
@@ -26,6 +26,8 @@
#include "ntnic_vfio.h"
#include "ntnic_mod_reg.h"
#include "nt_util.h"
+#include "profile_inline/flm_evt_queue.h"
+#include "rte_pmd_ntnic.h"
const rte_thread_attr_t thread_attr = { .priority = RTE_THREAD_PRIORITY_NORMAL };
#define THREAD_CREATE(a, b, c) rte_thread_create(a, &thread_attr, b, c)
@@ -1419,6 +1421,7 @@ drv_deinit(struct drv_s *p_drv)
if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
THREAD_JOIN(p_nt_drv->flm_thread);
profile_inline_ops->flm_free_queues();
+ THREAD_JOIN(p_nt_drv->port_event_thread);
}
/* stop adapter */
@@ -1709,6 +1712,123 @@ static const struct eth_dev_ops nthw_eth_dev_ops = {
.rss_hash_conf_get = rss_hash_conf_get,
};
+/*
+ * Port event thread
+ */
+THREAD_FUNC port_event_thread_fn(void *context)
+{
+ struct pmd_internals *internals = (struct pmd_internals *)context;
+ struct drv_s *p_drv = internals->p_drv;
+ ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+ struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+ struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
+
+ nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[internals->port_id];
+ uint8_t port_no = internals->port;
+
+ ntnic_flm_load_t flmdata;
+ ntnic_port_load_t portdata;
+
+ memset(&flmdata, 0, sizeof(flmdata));
+ memset(&portdata, 0, sizeof(portdata));
+
+ while (ndev != NULL && ndev->eth_base == NULL)
+ nt_os_wait_usec(1 * 1000 * 1000);
+
+ while (!p_drv->ntdrv.b_shutdown) {
+ /*
+ * FLM load measurement
+ * Do only send event, if there has been a change
+ */
+ if (p_nt4ga_stat->flm_stat_ver > 22 && p_nt4ga_stat->mp_stat_structs_flm) {
+ if (flmdata.lookup != p_nt4ga_stat->mp_stat_structs_flm->load_lps ||
+ flmdata.access != p_nt4ga_stat->mp_stat_structs_flm->load_aps) {
+ pthread_mutex_lock(&p_nt_drv->stat_lck);
+ flmdata.lookup = p_nt4ga_stat->mp_stat_structs_flm->load_lps;
+ flmdata.access = p_nt4ga_stat->mp_stat_structs_flm->load_aps;
+ flmdata.lookup_maximum =
+ p_nt4ga_stat->mp_stat_structs_flm->max_lps;
+ flmdata.access_maximum =
+ p_nt4ga_stat->mp_stat_structs_flm->max_aps;
+ pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+ if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
+ rte_eth_dev_callback_process(eth_dev,
+ (enum rte_eth_event_type)RTE_NTNIC_FLM_LOAD_EVENT,
+ &flmdata);
+ }
+ }
+ }
+
+ /*
+ * Port load measurement
+ * Do only send event, if there has been a change.
+ */
+ if (p_nt4ga_stat->mp_port_load) {
+ if (portdata.rx_bps != p_nt4ga_stat->mp_port_load[port_no].rx_bps ||
+ portdata.tx_bps != p_nt4ga_stat->mp_port_load[port_no].tx_bps) {
+ pthread_mutex_lock(&p_nt_drv->stat_lck);
+ portdata.rx_bps = p_nt4ga_stat->mp_port_load[port_no].rx_bps;
+ portdata.tx_bps = p_nt4ga_stat->mp_port_load[port_no].tx_bps;
+ portdata.rx_pps = p_nt4ga_stat->mp_port_load[port_no].rx_pps;
+ portdata.tx_pps = p_nt4ga_stat->mp_port_load[port_no].tx_pps;
+ portdata.rx_pps_maximum =
+ p_nt4ga_stat->mp_port_load[port_no].rx_pps_max;
+ portdata.tx_pps_maximum =
+ p_nt4ga_stat->mp_port_load[port_no].tx_pps_max;
+ portdata.rx_bps_maximum =
+ p_nt4ga_stat->mp_port_load[port_no].rx_bps_max;
+ portdata.tx_bps_maximum =
+ p_nt4ga_stat->mp_port_load[port_no].tx_bps_max;
+ pthread_mutex_unlock(&p_nt_drv->stat_lck);
+
+ if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
+ rte_eth_dev_callback_process(eth_dev,
+ (enum rte_eth_event_type)RTE_NTNIC_PORT_LOAD_EVENT,
+ &portdata);
+ }
+ }
+ }
+
+ /* Process events */
+ {
+ int count = 0;
+ bool do_wait = true;
+
+ while (count < 5000) {
+ /* Local FLM statistic events */
+ struct flm_info_event_s data;
+
+ if (flm_inf_queue_get(port_no, FLM_INFO_LOCAL, &data) == 0) {
+ if (eth_dev && eth_dev->data &&
+ eth_dev->data->dev_private) {
+ struct ntnic_flm_statistic_s event_data;
+ event_data.bytes = data.bytes;
+ event_data.packets = data.packets;
+ event_data.cause = data.cause;
+ event_data.id = data.id;
+ event_data.timestamp = data.timestamp;
+ rte_eth_dev_callback_process(eth_dev,
+ (enum rte_eth_event_type)
+ RTE_NTNIC_FLM_STATS_EVENT,
+ &event_data);
+ do_wait = false;
+ }
+ }
+
+ if (do_wait)
+ nt_os_wait_usec(10);
+
+ count++;
+ do_wait = true;
+ }
+ }
+ }
+
+ return THREAD_RETURN;
+}
+
/*
* Adapter flm stat thread
*/
@@ -2235,6 +2355,18 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
/* increase initialized ethernet devices - PF */
p_drv->n_eth_dev_init_count++;
+
+ /* Port event thread */
+ if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+ res = THREAD_CTRL_CREATE(&p_nt_drv->port_event_thread, "nt_port_event_thr",
+ port_event_thread_fn, (void *)internals);
+
+ if (res) {
+ NT_LOG(ERR, NTNIC, "%s: error=%d",
+ (pci_dev->name[0] ? pci_dev->name : "NA"), res);
+ return -1;
+ }
+ }
}
return 0;
@@ -290,6 +290,13 @@ struct profile_inline_ops {
int hsh_idx,
struct nt_eth_rss_conf rss_conf);
+ /*
+ * Stats
+ */
+ int (*flow_get_flm_stats_profile_inline)(struct flow_nic_dev *ndev,
+ uint64_t *data,
+ uint64_t size);
+
/*
* NT Flow FLM queue API
*/