@@ -77,6 +77,9 @@ struct flow_eth_dev {
/* QSL_HSH index if RSS needed QSL v6+ */
int rss_target_id;
+ /* The size of buffer for aged out flow list */
+ uint32_t nb_aging_objects;
+
struct flow_eth_dev *next;
};
@@ -320,6 +320,7 @@ struct flow_handle {
uint32_t flm_teid;
uint8_t flm_rqi;
uint8_t flm_qfi;
+ uint8_t flm_scrub_prof;
};
};
};
@@ -1041,12 +1041,6 @@ static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id,
struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
struct rte_flow_error *error)
{
- (void)dev;
- (void)caller_id;
- (void)port_info;
- (void)queue_info;
- (void)error;
-
const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
if (profile_inline_ops == NULL) {
@@ -1054,20 +1048,14 @@ static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id,
return -1;
}
- return 0;
+ return profile_inline_ops->flow_info_get_profile_inline(dev, caller_id, port_info,
+ queue_info, error);
}
static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id,
const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error)
{
- (void)dev;
- (void)caller_id;
- (void)port_attr;
- (void)queue_attr;
- (void)nb_queue;
- (void)error;
-
const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
if (profile_inline_ops == NULL) {
@@ -1075,7 +1063,8 @@ static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id,
return -1;
}
- return 0;
+ return profile_inline_ops->flow_configure_profile_inline(dev, caller_id, port_attr,
+ nb_queue, queue_attr, error);
}
int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
@@ -4,12 +4,89 @@
*/
#include <rte_ring.h>
+#include <rte_errno.h>
#include "ntlog.h"
#include "flm_age_queue.h"
/* Queues for flm aged events */
static struct rte_ring *age_queue[MAX_EVT_AGE_QUEUES];
+static RTE_ATOMIC(uint16_t) age_event[MAX_EVT_AGE_PORTS];
+
+void flm_age_queue_free(uint8_t port, uint16_t caller_id)
+{
+ struct rte_ring *q = NULL;
+
+ if (port < MAX_EVT_AGE_PORTS)
+ rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst);
+
+ if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) {
+ q = age_queue[caller_id];
+ age_queue[caller_id] = NULL;
+ }
+
+ if (q != NULL)
+ rte_ring_free(q);
+}
+
+struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned int count)
+{
+ char name[20];
+ struct rte_ring *q = NULL;
+
+ if (rte_is_power_of_2(count) == false || count > RTE_RING_SZ_MASK) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM aged event queue number of elements (%u) is invalid, must be power of 2, and not exceed %u",
+ count,
+ RTE_RING_SZ_MASK);
+ return NULL;
+ }
+
+ if (port >= MAX_EVT_AGE_PORTS) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM aged event queue cannot be created for port %u. Max supported port is %u",
+ port,
+ MAX_EVT_AGE_PORTS - 1);
+ return NULL;
+ }
+
+ rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst);
+
+ if (caller_id >= MAX_EVT_AGE_QUEUES) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM aged event queue cannot be created for caller_id %u. Max supported caller_id is %u",
+ caller_id,
+ MAX_EVT_AGE_QUEUES - 1);
+ return NULL;
+ }
+
+ if (age_queue[caller_id] != NULL) {
+ NT_LOG(DBG, FILTER, "FLM aged event queue %u already created", caller_id);
+ return age_queue[caller_id];
+ }
+
+ snprintf(name, 20, "AGE_EVENT%u", caller_id);
+ q = rte_ring_create_elem(name,
+ FLM_AGE_ELEM_SIZE,
+ count,
+ SOCKET_ID_ANY,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (q == NULL) {
+ NT_LOG(WRN,
+ FILTER,
+ "FLM aged event queue cannot be created due to error %02X",
+ rte_errno);
+ return NULL;
+ }
+
+ age_queue[caller_id] = q;
+
+ return q;
+}
int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj)
{
@@ -15,8 +15,13 @@ struct flm_age_event_s {
/* Max number of event queues */
#define MAX_EVT_AGE_QUEUES 256
+/* Max number of event ports */
+#define MAX_EVT_AGE_PORTS 128
+
#define FLM_AGE_ELEM_SIZE sizeof(struct flm_age_event_s)
+void flm_age_queue_free(uint8_t port, uint16_t caller_id);
+struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned int count);
int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj);
unsigned int flm_age_queue_count(uint16_t caller_id);
unsigned int flm_age_queue_get_size(uint16_t caller_id);
@@ -490,7 +490,7 @@ static int flm_flow_programming(struct flow_handle *fh, uint32_t flm_op)
learn_record->ft = fh->flm_ft;
learn_record->kid = fh->flm_kid;
learn_record->eor = 1;
- learn_record->scrub_prof = 0;
+ learn_record->scrub_prof = fh->flm_scrub_prof;
flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr);
return 0;
@@ -2438,6 +2438,7 @@ static void copy_fd_to_fh_flm(struct flow_handle *fh, const struct nic_flow_def
fh->flm_rpl_ext_ptr = rpl_ext_ptr;
fh->flm_prio = (uint8_t)priority;
fh->flm_ft = (uint8_t)flm_ft;
+ fh->flm_scrub_prof = (uint8_t)flm_scrub;
for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
switch (fd->modify_field[i].select) {
@@ -4554,6 +4555,63 @@ int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
return 0;
}
+int flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info, struct rte_flow_error *error)
+{
+ (void)queue_info;
+ (void)caller_id;
+ int res = 0;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+ memset(port_info, 0, sizeof(struct rte_flow_port_info));
+
+ port_info->max_nb_aging_objects = dev->nb_aging_objects;
+
+ return res;
+}
+
+int flow_configure_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
+ const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ (void)nb_queue;
+ (void)queue_attr;
+ int res = 0;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+
+ if (port_attr->nb_aging_objects > 0) {
+ if (dev->nb_aging_objects > 0) {
+ flm_age_queue_free(dev->port_id, caller_id);
+ dev->nb_aging_objects = 0;
+ }
+
+ struct rte_ring *age_queue =
+ flm_age_queue_create(dev->port_id, caller_id, port_attr->nb_aging_objects);
+
+ if (age_queue == NULL) {
+ error->message = "Failed to allocate aging objects";
+ goto error_out;
+ }
+
+ dev->nb_aging_objects = port_attr->nb_aging_objects;
+ }
+
+ return res;
+
+error_out:
+ error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;
+
+ if (port_attr->nb_aging_objects > 0) {
+ flm_age_queue_free(dev->port_id, caller_id);
+ dev->nb_aging_objects = 0;
+ }
+
+ return -1;
+}
+
static const struct profile_inline_ops ops = {
/*
* Management
@@ -4575,6 +4633,8 @@ static const struct profile_inline_ops ops = {
* Stats
*/
.flow_get_flm_stats_profile_inline = flow_get_flm_stats_profile_inline,
+ .flow_info_get_profile_inline = flow_info_get_profile_inline,
+ .flow_configure_profile_inline = flow_configure_profile_inline,
/*
* NT Flow FLM Meter API
*/
@@ -64,4 +64,13 @@ int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev,
int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size);
+int flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info, struct rte_flow_error *error);
+
+int flow_configure_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
+ const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error);
+
#endif /* _FLOW_API_PROFILE_INLINE_H_ */
@@ -309,6 +309,15 @@ struct profile_inline_ops {
void (*flm_setup_queues)(void);
void (*flm_free_queues)(void);
uint32_t (*flm_update)(struct flow_eth_dev *dev);
+
+ int (*flow_info_get_profile_inline)(struct flow_eth_dev *dev, uint8_t caller_id,
+ struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error);
+
+ int (*flow_configure_profile_inline)(struct flow_eth_dev *dev, uint8_t caller_id,
+ const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error);
};
void register_profile_inline_ops(const struct profile_inline_ops *ops);