@@ -34,6 +34,8 @@ struct flow_eth_dev {
struct flow_nic_dev *ndev;
/* NIC port id */
uint8_t port;
+ /* App assigned port_id - may be DPDK port_id */
+ uint32_t port_id;
/* 0th for exception */
struct flow_queue_id_s rx_queue[FLOW_MAX_QUEUES + 1];
@@ -41,6 +43,9 @@ struct flow_eth_dev {
/* VSWITCH has exceptions sent on queue 0 per design */
int num_queues;
+ /* QSL_HSH index if RSS needed QSL v6+ */
+ int rss_target_id;
+
struct flow_eth_dev *next;
};
@@ -48,6 +53,8 @@ struct flow_eth_dev {
struct flow_nic_dev {
uint8_t adapter_no; /* physical adapter no in the host system */
uint16_t ports; /* number of in-ports addressable on this NIC */
+ /* flow profile this NIC is initially prepared for */
+ enum flow_eth_dev_profile flow_profile;
struct hw_mod_resource_s res[RES_COUNT];/* raw NIC resource allocation table */
void *km_res_handle;
@@ -73,6 +80,14 @@ struct flow_nic_dev {
extern const char *dbg_res_descr[];
+#define flow_nic_set_bit(arr, x) \
+ do { \
+ uint8_t *_temp_arr = (arr); \
+ size_t _temp_x = (x); \
+ _temp_arr[_temp_x / 8] = \
+ (uint8_t)(_temp_arr[_temp_x / 8] | (uint8_t)(1 << (_temp_x % 8))); \
+ } while (0)
+
#define flow_nic_unset_bit(arr, x) \
do { \
size_t _temp_x = (x); \
@@ -85,6 +100,18 @@ extern const char *dbg_res_descr[];
(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
})
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+ do { \
+ struct flow_nic_dev *_temp_ndev = (_ndev); \
+ typeof(res_type) _temp_res_type = (res_type); \
+ size_t _temp_index = (index); \
+ NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu", \
+ dbg_res_descr[_temp_res_type], _temp_index); \
+ assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, \
+ _temp_index) == 0); \
+ flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+ } while (0)
+
#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
do { \
typeof(res_type) _temp_res_type = (res_type); \
@@ -97,6 +124,9 @@ extern const char *dbg_res_descr[];
#define flow_nic_is_resource_used(_ndev, res_type, index) \
(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ uint32_t alignment);
+
void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx);
int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index);
@@ -41,6 +41,11 @@ enum res_type_e {
RES_INVALID
};
+/*
+ * Flow NIC offload management
+ */
+#define MAX_OUTPUT_DEST (128)
+
void km_free_ndev_resource_management(void **handle);
void kcc_free_ndev_resource_management(void **handle);
@@ -86,6 +86,7 @@ struct __rte_cache_aligned ntnic_tx_queue {
struct pmd_internals {
const struct rte_pci_device *pci_dev;
+ struct flow_eth_dev *flw_dev;
char name[20];
int n_intf_no;
int lpbk_mode;
@@ -12,11 +12,20 @@
#define FLOW_MAX_QUEUES 128
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+ FLOW_ETH_DEV_PROFILE_INLINE = 0,
+};
+
struct flow_queue_id_s {
int id;
int hw_id;
};
struct flow_eth_dev; /* port device */
+struct flow_handle;
#endif /* _STREAM_BINARY_FLOW_API_H_ */
@@ -7,6 +7,7 @@
#include "flow_api_nic_setup.h"
#include "ntnic_mod_reg.h"
+#include "flow_api.h"
#include "flow_filter.h"
const char *dbg_res_descr[] = {
@@ -35,6 +36,24 @@ const char *dbg_res_descr[] = {
static struct flow_nic_dev *dev_base;
static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * Resources
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ uint32_t alignment)
+{
+ for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) {
+ if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+ flow_nic_mark_resource_used(ndev, res_type, i);
+ ndev->res[res_type].ref[i] = 1;
+ return i;
+ }
+ }
+
+ return -1;
+}
+
void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
{
flow_nic_mark_resource_unused(ndev, res_type, idx);
@@ -55,10 +74,60 @@ int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
}
+/*
+ * Nic port/adapter lookup
+ */
+
+static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+ struct flow_nic_dev *nic_dev = dev_base;
+
+ while (nic_dev) {
+ if (nic_dev->adapter_no == adapter_no)
+ break;
+
+ nic_dev = nic_dev->next;
+ }
+
+ if (!nic_dev)
+ return NULL;
+
+ struct flow_eth_dev *dev = nic_dev->eth_base;
+
+ while (dev) {
+ if (port == dev->port)
+ return dev;
+
+ dev = dev->next;
+ }
+
+ return NULL;
+}
+
+static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+ struct flow_nic_dev *ndev = dev_base;
+
+ while (ndev) {
+ if (adapter_no == ndev->adapter_no)
+ break;
+
+ ndev = ndev->next;
+ }
+
+ return ndev;
+}
+
/*
* Device Management API
*/
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev)
+{
+ dev->next = ndev->eth_base;
+ ndev->eth_base = dev;
+}
+
static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
{
struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
@@ -156,16 +225,6 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
#endif
-#ifndef SCATTER_GATHER
-
- /* free rx queues */
- for (int i = 0; i < eth_dev->num_queues; i++) {
- ndev->be.iface->free_rx_queue(ndev->be.be_dev, eth_dev->rx_queue[i].hw_id);
- flow_nic_deref_resource(ndev, RES_QUEUE, eth_dev->rx_queue[i].id);
- }
-
-#endif
-
/* take eth_dev out of ndev list */
if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
@@ -242,6 +301,132 @@ static int list_remove_flow_nic(struct flow_nic_dev *ndev)
return -1;
}
+/*
+ * adapter_no physical adapter no
+ * port_no local port no
+ * alloc_rx_queues number of rx-queues to allocate for this eth_dev
+ */
+static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id,
+ int alloc_rx_queues, struct flow_queue_id_s queue_ids[],
+ int *rss_target_id, enum flow_eth_dev_profile flow_profile,
+ uint32_t exception_path)
+{
+ const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
+
+ if (profile_inline_ops == NULL)
+ NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__);
+
+ int i;
+ struct flow_eth_dev *eth_dev = NULL;
+
+ NT_LOG(DBG, FILTER,
+ "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i",
+ adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+ if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+ assert(0);
+ NT_LOG(ERR, FILTER,
+ "ERROR: Internal array for multiple queues too small for API");
+ }
+
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev) {
+ /* Error - no flow api found on specified adapter */
+ NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d",
+ adapter_no);
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ if (ndev->ports < ((uint16_t)port_no + 1)) {
+ NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter");
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+ NT_LOG(ERR, FILTER,
+ "ERROR: Exceeds supported number of rx queues per eth device");
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ /* don't accept multiple eth_dev's on same NIC and same port */
+ eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+
+ if (eth_dev) {
+ NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i",
+ adapter_no, port_no);
+ pthread_mutex_unlock(&base_mtx);
+ flow_delete_eth_dev(eth_dev);
+ eth_dev = NULL;
+ }
+
+ eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+
+ if (!eth_dev) {
+ NT_LOG(ERR, FILTER, "ERROR: calloc failed");
+ goto err_exit1;
+ }
+
+ pthread_mutex_lock(&ndev->mtx);
+
+ eth_dev->ndev = ndev;
+ eth_dev->port = port_no;
+ eth_dev->port_id = port_id;
+
+ /* Allocate the requested queues in HW for this dev */
+
+ for (i = 0; i < alloc_rx_queues; i++) {
+ eth_dev->rx_queue[i] = queue_ids[i];
+
+ if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) {
+ /*
+ * Init QSL UNM - unmatched - redirects otherwise discarded
+ * packets in QSL
+ */
+ if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+ eth_dev->rx_queue[0].hw_id) < 0)
+ goto err_exit0;
+
+ if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0)
+ goto err_exit0;
+
+ if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0)
+ goto err_exit0;
+ }
+
+ eth_dev->num_queues++;
+ }
+
+ eth_dev->rss_target_id = -1;
+
+ *rss_target_id = eth_dev->rss_target_id;
+
+ nic_insert_eth_port_dev(ndev, eth_dev);
+
+ pthread_mutex_unlock(&ndev->mtx);
+ pthread_mutex_unlock(&base_mtx);
+ return eth_dev;
+
+err_exit0:
+ pthread_mutex_unlock(&ndev->mtx);
+ pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+ if (eth_dev)
+ free(eth_dev);
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ NT_LOG(DBG, FILTER, "ERR in %s", __func__);
+ return NULL; /* Error exit */
+}
+
struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
void *be_dev)
{
@@ -383,6 +568,10 @@ void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
static const struct flow_filter_ops ops = {
.flow_filter_init = flow_filter_init,
.flow_filter_done = flow_filter_done,
+ /*
+ * Device Management API
+ */
+ .flow_get_eth_dev = flow_get_eth_dev,
};
void init_flow_filter(void)
@@ -1355,6 +1355,13 @@ static const struct eth_dev_ops nthw_eth_dev_ops = {
static int
nthw_pci_dev_init(struct rte_pci_device *pci_dev)
{
+ const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops();
+
+ if (flow_filter_ops == NULL) {
+ NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized");
+ /* Return statement is not necessary here to allow traffic processing by SW */
+ }
+
nt_vfio_init();
const struct port_ops *port_ops = get_port_ops();
@@ -1378,10 +1385,13 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
uint32_t n_port_mask = -1; /* All ports enabled by default */
uint32_t nb_rx_queues = 1;
uint32_t nb_tx_queues = 1;
+ uint32_t exception_path = 0;
struct flow_queue_id_s queue_ids[MAX_QUEUES];
int n_phy_ports;
struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
int num_port_speeds = 0;
+ enum flow_eth_dev_profile profile = FLOW_ETH_DEV_PROFILE_INLINE;
+
NT_LOG_DBGX(DBG, NTNIC, "Dev %s PF #%i Init : %02x:%02x:%i", pci_dev->name,
pci_dev->addr.function, pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function);
@@ -1681,6 +1691,18 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
return -1;
}
+ if (flow_filter_ops != NULL) {
+ internals->flw_dev = flow_filter_ops->flow_get_eth_dev(0, n_intf_no,
+ eth_dev->data->port_id, nb_rx_queues, queue_ids,
+ &internals->txq_scg[0].rss_target_id, profile, exception_path);
+
+ if (!internals->flw_dev) {
+ NT_LOG(ERR, NTNIC,
+ "Error creating port. Resource exhaustion in HW");
+ return -1;
+ }
+ }
+
/* connect structs */
internals->p_drv = p_drv;
eth_dev->data->dev_private = internals;
@@ -118,6 +118,11 @@ const struct flow_backend_ops *get_flow_backend_ops(void)
return flow_backend_ops;
}
+const struct profile_inline_ops *get_profile_inline_ops(void)
+{
+ return NULL;
+}
+
static const struct flow_filter_ops *flow_filter_ops;
void register_flow_filter_ops(const struct flow_filter_ops *ops)
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "flow_api.h"
+#include "stream_binary_flow_api.h"
#include "nthw_fpga_model.h"
#include "nthw_platform_drv.h"
#include "nthw_drv.h"
@@ -223,10 +224,23 @@ void register_flow_backend_ops(const struct flow_backend_ops *ops);
const struct flow_backend_ops *get_flow_backend_ops(void);
void flow_backend_init(void);
+const struct profile_inline_ops *get_profile_inline_ops(void);
+
struct flow_filter_ops {
int (*flow_filter_init)(nthw_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
int adapter_no);
int (*flow_filter_done)(struct flow_nic_dev *dev);
+ /*
+ * Device Management API
+ */
+ struct flow_eth_dev *(*flow_get_eth_dev)(uint8_t adapter_no,
+ uint8_t hw_port_no,
+ uint32_t port_id,
+ int alloc_rx_queues,
+ struct flow_queue_id_s queue_ids[],
+ int *rss_target_id,
+ enum flow_eth_dev_profile flow_profile,
+ uint32_t exception_path);
};
void register_flow_filter_ops(const struct flow_filter_ops *ops);