[v5,35/80] net/ntnic: add learn flow queue handling

Message ID 20241030213940.3470062-36-sil-plv@napatech.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Provide flow filter and statistics support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Serhii Iliushyk Oct. 30, 2024, 9:38 p.m. UTC
From: Danylo Vodopianov <dvo-plv@napatech.com>

Implements thread for handling flow learn queue

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/include/hw_mod_backend.h    |  5 +
 drivers/net/ntnic/include/ntdrv_4ga.h         |  1 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c   | 33 +++++++
 .../flow_api/profile_inline/flm_lrn_queue.c   | 42 +++++++++
 .../flow_api/profile_inline/flm_lrn_queue.h   | 11 +++
 .../profile_inline/flow_api_profile_inline.c  | 48 ++++++++++
 drivers/net/ntnic/ntnic_ethdev.c              | 94 +++++++++++++++++++
 drivers/net/ntnic/ntnic_mod_reg.h             |  7 ++
 8 files changed, 241 insertions(+)
  

Patch

diff --git a/drivers/net/ntnic/include/hw_mod_backend.h b/drivers/net/ntnic/include/hw_mod_backend.h
index 13722c30a9..17d5755634 100644
--- a/drivers/net/ntnic/include/hw_mod_backend.h
+++ b/drivers/net/ntnic/include/hw_mod_backend.h
@@ -688,6 +688,11 @@  int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
 int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
 	uint32_t value);
 
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field,
+	const uint32_t *value, uint32_t records,
+	uint32_t *handled_records, uint32_t *inf_word_cnt,
+	uint32_t *sta_word_cnt);
+
 int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be, int start_idx, int count);
 
 struct hsh_func_s {
diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h
index 8017aa4fc3..8ebdd98db0 100644
--- a/drivers/net/ntnic/include/ntdrv_4ga.h
+++ b/drivers/net/ntnic/include/ntdrv_4ga.h
@@ -14,6 +14,7 @@  typedef struct ntdrv_4ga_s {
 	char *p_drv_name;
 
 	volatile bool b_shutdown;
+	rte_thread_t flm_thread;
 } ntdrv_4ga_t;
 
 #endif	/* __NTDRV_4GA_H__ */
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
index 0a7e90c04f..f4c29b8bde 100644
--- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c
@@ -712,3 +712,36 @@  int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int i
 
 	return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
 }
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field,
+	const uint32_t *value, uint32_t records,
+	uint32_t *handled_records, uint32_t *inf_word_cnt,
+	uint32_t *sta_word_cnt)
+{
+	int ret = 0;
+
+	switch (_VER_) {
+	case 25:
+		switch (field) {
+		case HW_FLM_FLOW_LRN_DATA:
+			ret = be->iface->flm_lrn_data_flush(be->be_dev, &be->flm, value, records,
+					handled_records,
+					(sizeof(struct flm_v25_lrn_data_s) /
+						sizeof(uint32_t)),
+					inf_word_cnt, sta_word_cnt);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c
index ad7efafe08..6e77c28f93 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c
@@ -13,8 +13,28 @@ 
 
 #include "flm_lrn_queue.h"
 
+#define QUEUE_SIZE (1 << 13)
+
 #define ELEM_SIZE sizeof(struct flm_v25_lrn_data_s)
 
+void *flm_lrn_queue_create(void)
+{
+	static_assert((ELEM_SIZE & ~(size_t)3) == ELEM_SIZE, "FLM LEARN struct size");
+	struct rte_ring *q = rte_ring_create_elem("RFQ",
+		ELEM_SIZE,
+		QUEUE_SIZE,
+		SOCKET_ID_ANY,
+		RING_F_MP_HTS_ENQ | RING_F_SC_DEQ);
+	assert(q != NULL);
+	return q;
+}
+
+void flm_lrn_queue_free(void *q)
+{
+	if (q)
+		rte_ring_free(q);
+}
+
 uint32_t *flm_lrn_queue_get_write_buffer(void *q)
 {
 	struct rte_ring_zc_data zcd;
@@ -26,3 +46,25 @@  void flm_lrn_queue_release_write_buffer(void *q)
 {
 	rte_ring_enqueue_zc_elem_finish(q, 1);
 }
+
+read_record flm_lrn_queue_get_read_buffer(void *q)
+{
+	struct rte_ring_zc_data zcd;
+	read_record rr;
+
+	if (rte_ring_dequeue_zc_burst_elem_start(q, ELEM_SIZE, QUEUE_SIZE, &zcd, NULL) != 0) {
+		rr.num = zcd.n1;
+		rr.p = zcd.ptr1;
+
+	} else {
+		rr.num = 0;
+		rr.p = NULL;
+	}
+
+	return rr;
+}
+
+void flm_lrn_queue_release_read_buffer(void *q, uint32_t num)
+{
+	rte_ring_dequeue_zc_elem_finish(q, num);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h
index 8cee0c8e78..40558f4201 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h
@@ -8,7 +8,18 @@ 
 
 #include <stdint.h>
 
+typedef struct read_record {
+	uint32_t *p;
+	uint32_t num;
+} read_record;
+
+void *flm_lrn_queue_create(void);
+void flm_lrn_queue_free(void *q);
+
 uint32_t *flm_lrn_queue_get_write_buffer(void *q);
 void flm_lrn_queue_release_write_buffer(void *q);
 
+read_record flm_lrn_queue_get_read_buffer(void *q);
+void flm_lrn_queue_release_read_buffer(void *q, uint32_t num);
+
 #endif	/* _FLM_LRN_QUEUE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index 7a0cb1f9c4..7487b5150e 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -39,6 +39,48 @@ 
 
 static void *flm_lrn_queue_arr;
 
+static void flm_setup_queues(void)
+{
+	flm_lrn_queue_arr = flm_lrn_queue_create();
+	assert(flm_lrn_queue_arr != NULL);
+}
+
+static void flm_free_queues(void)
+{
+	flm_lrn_queue_free(flm_lrn_queue_arr);
+}
+
+static uint32_t flm_lrn_update(struct flow_eth_dev *dev, uint32_t *inf_word_cnt,
+	uint32_t *sta_word_cnt)
+{
+	read_record r = flm_lrn_queue_get_read_buffer(flm_lrn_queue_arr);
+
+	if (r.num) {
+		uint32_t handled_records = 0;
+
+		if (hw_mod_flm_lrn_data_set_flush(&dev->ndev->be, HW_FLM_FLOW_LRN_DATA, r.p, r.num,
+			&handled_records, inf_word_cnt, sta_word_cnt)) {
+			NT_LOG(ERR, FILTER, "Flow programming failed");
+
+		} else if (handled_records > 0) {
+			flm_lrn_queue_release_read_buffer(flm_lrn_queue_arr, handled_records);
+		}
+	}
+
+	return r.num;
+}
+
+static uint32_t flm_update(struct flow_eth_dev *dev)
+{
+	static uint32_t inf_word_cnt;
+	static uint32_t sta_word_cnt;
+
+	if (flm_lrn_update(dev, &inf_word_cnt, &sta_word_cnt) != 0)
+		return 1;
+
+	return inf_word_cnt + sta_word_cnt;
+}
+
 static int rx_queue_idx_to_hw_id(const struct flow_eth_dev *dev, int id)
 {
 	for (int i = 0; i < dev->num_queues; ++i)
@@ -4214,6 +4256,12 @@  static const struct profile_inline_ops ops = {
 	.flow_create_profile_inline = flow_create_profile_inline,
 	.flow_destroy_profile_inline = flow_destroy_profile_inline,
 	.flow_nic_set_hasher_fields_inline = flow_nic_set_hasher_fields_inline,
+	/*
+	 * NT Flow FLM Meter API
+	 */
+	.flm_setup_queues = flm_setup_queues,
+	.flm_free_queues = flm_free_queues,
+	.flm_update = flm_update,
 };
 
 void profile_inline_init(void)
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
index a509a8eb51..bfca8f28b1 100644
--- a/drivers/net/ntnic/ntnic_ethdev.c
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -24,6 +24,11 @@ 
 #include "ntnic_mod_reg.h"
 #include "nt_util.h"
 
+const rte_thread_attr_t thread_attr = { .priority = RTE_THREAD_PRIORITY_NORMAL };
+#define THREAD_CTRL_CREATE(a, b, c, d) rte_thread_create_internal_control(a, b, c, d)
+#define THREAD_JOIN(a) rte_thread_join(a, NULL)
+#define THREAD_FUNC static uint32_t
+#define THREAD_RETURN (0)
 #define HW_MAX_PKT_LEN (10000)
 #define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
 
@@ -120,6 +125,16 @@  store_pdrv(struct drv_s *p_drv)
 	rte_spinlock_unlock(&hwlock);
 }
 
+static void clear_pdrv(struct drv_s *p_drv)
+{
+	if (p_drv->adapter_no > NUM_ADAPTER_MAX)
+		return;
+
+	rte_spinlock_lock(&hwlock);
+	_g_p_drv[p_drv->adapter_no] = NULL;
+	rte_spinlock_unlock(&hwlock);
+}
+
 static struct drv_s *
 get_pdrv_from_pci(struct rte_pci_addr addr)
 {
@@ -1240,6 +1255,13 @@  eth_dev_set_link_down(struct rte_eth_dev *eth_dev)
 static void
 drv_deinit(struct drv_s *p_drv)
 {
+	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+	if (profile_inline_ops == NULL) {
+		NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
+		return;
+	}
+
 	const struct adapter_ops *adapter_ops = get_adapter_ops();
 
 	if (adapter_ops == NULL) {
@@ -1251,6 +1273,22 @@  drv_deinit(struct drv_s *p_drv)
 		return;
 
 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
+	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
+
+	/*
+	 * Mark the global pdrv for cleared. Used by some threads to terminate.
+	 * 1 second to give the threads a chance to see the termonation.
+	 */
+	clear_pdrv(p_drv);
+	nt_os_wait_usec(1000000);
+
+	/* stop statistics threads */
+	p_drv->ntdrv.b_shutdown = true;
+
+	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		THREAD_JOIN(p_nt_drv->flm_thread);
+		profile_inline_ops->flm_free_queues();
+	}
 
 	/* stop adapter */
 	adapter_ops->deinit(&p_nt_drv->adapter_info);
@@ -1359,6 +1397,43 @@  static const struct eth_dev_ops nthw_eth_dev_ops = {
 	.promiscuous_enable = promiscuous_enable,
 };
 
+/*
+ * Adapter flm stat thread
+ */
+THREAD_FUNC adapter_flm_update_thread_fn(void *context)
+{
+	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+	if (profile_inline_ops == NULL) {
+		NT_LOG(ERR, NTNIC, "%s: profile_inline module uninitialized", __func__);
+		return THREAD_RETURN;
+	}
+
+	struct drv_s *p_drv = context;
+
+	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
+	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
+	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
+	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
+
+	NT_LOG(DBG, NTNIC, "%s: %s: waiting for port configuration",
+		p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (p_flow_nic_dev->eth_base == NULL)
+		nt_os_wait_usec(1 * 1000 * 1000);
+
+	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
+
+	NT_LOG(DBG, NTNIC, "%s: %s: begin", p_adapter_info->mp_adapter_id_str, __func__);
+
+	while (!p_drv->ntdrv.b_shutdown)
+		if (profile_inline_ops->flm_update(dev) == 0)
+			nt_os_wait_usec(10);
+
+	NT_LOG(DBG, NTNIC, "%s: %s: end", p_adapter_info->mp_adapter_id_str, __func__);
+	return THREAD_RETURN;
+}
+
 static int
 nthw_pci_dev_init(struct rte_pci_device *pci_dev)
 {
@@ -1369,6 +1444,13 @@  nthw_pci_dev_init(struct rte_pci_device *pci_dev)
 		/* Return statement is not necessary here to allow traffic processing by SW  */
 	}
 
+	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+	if (profile_inline_ops == NULL) {
+		NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
+		/* Return statement is not necessary here to allow traffic processing by SW  */
+	}
+
 	nt_vfio_init();
 	const struct port_ops *port_ops = get_port_ops();
 
@@ -1597,6 +1679,18 @@  nthw_pci_dev_init(struct rte_pci_device *pci_dev)
 		return -1;
 	}
 
+	if (profile_inline_ops != NULL && fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
+		profile_inline_ops->flm_setup_queues();
+		res = THREAD_CTRL_CREATE(&p_nt_drv->flm_thread, "ntnic-nt_flm_update_thr",
+			adapter_flm_update_thread_fn, (void *)p_drv);
+
+		if (res) {
+			NT_LOG_DBGX(ERR, NTNIC, "%s: error=%d",
+				(pci_dev->name[0] ? pci_dev->name : "NA"), res);
+			return -1;
+		}
+	}
+
 	n_phy_ports = fpga_info->n_phy_ports;
 
 	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
diff --git a/drivers/net/ntnic/ntnic_mod_reg.h b/drivers/net/ntnic/ntnic_mod_reg.h
index 1069be2f85..27d6cbef01 100644
--- a/drivers/net/ntnic/ntnic_mod_reg.h
+++ b/drivers/net/ntnic/ntnic_mod_reg.h
@@ -256,6 +256,13 @@  struct profile_inline_ops {
 	int (*flow_nic_set_hasher_fields_inline)(struct flow_nic_dev *ndev,
 		int hsh_idx,
 		struct nt_eth_rss_conf rss_conf);
+
+	/*
+	 * NT Flow FLM queue API
+	 */
+	void (*flm_setup_queues)(void);
+	void (*flm_free_queues)(void);
+	uint32_t (*flm_update)(struct flow_eth_dev *dev);
 };
 
 void register_profile_inline_ops(const struct profile_inline_ops *ops);