@@ -148,6 +148,14 @@ struct hsh_def_s {
const uint8_t *key; /* Hash key. */
};
+/*
+ * AGE configuration, see struct rte_flow_action_age
+ */
+struct age_def_s {
+ uint32_t timeout;
+ void *context;
+};
+
/*
* Tunnel encapsulation header definition
*/
@@ -264,6 +272,11 @@ struct nic_flow_def {
* Hash module RSS definitions
*/
struct hsh_def_s hsh;
+
+ /*
+ * AGE action timeout
+ */
+ struct age_def_s age;
};
enum flow_handle_type {
@@ -688,6 +688,9 @@ int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
uint32_t value);
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value);
+
int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value);
@@ -695,8 +698,16 @@ int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e f
const uint32_t *value, uint32_t records,
uint32_t *handled_records, uint32_t *inf_word_cnt,
uint32_t *sta_word_cnt);
+int hw_mod_flm_inf_sta_data_update_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *inf_value, uint32_t inf_size,
+ uint32_t *inf_word_cnt, uint32_t *sta_value,
+ uint32_t sta_size, uint32_t *sta_word_cnt);
+uint32_t hw_mod_flm_scrub_timeout_decode(uint32_t t_enc);
+uint32_t hw_mod_flm_scrub_timeout_encode(uint32_t t);
int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
+ uint32_t value);
struct hsh_func_s {
COMMON_FUNC_INFO_S;
@@ -129,3 +129,19 @@ void ntnic_id_table_free_id(void *id_table, uint32_t id)
pthread_mutex_unlock(&handle->mtx);
}
+
+void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h, uint8_t *caller_id,
+ uint8_t *type)
+{
+ struct ntnic_id_table_data *handle = id_table;
+
+ pthread_mutex_lock(&handle->mtx);
+
+ struct ntnic_id_table_element *element = ntnic_id_table_array_find_element(handle, id);
+
+ *caller_id = element->caller_id;
+ *type = element->type;
+ memcpy(flm_h, &element->handle, sizeof(union flm_handles));
+
+ pthread_mutex_unlock(&handle->mtx);
+}
@@ -20,4 +20,7 @@ uint32_t ntnic_id_table_get_id(void *id_table, union flm_handles flm_h, uint8_t
uint8_t type);
void ntnic_id_table_free_id(void *id_table, uint32_t id);
+void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h, uint8_t *caller_id,
+ uint8_t *type);
+
#endif /* FLOW_ID_TABLE_H_ */
@@ -712,6 +712,52 @@ int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int i
return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ int get = 1; /* Only get supported */
+
+ switch (_VER_) {
+ case 25:
+ switch (field) {
+ case HW_FLM_BUF_CTRL_LRN_FREE:
+ GET_SET(be->flm.v25.buf_ctrl->lrn_free, value);
+ break;
+
+ case HW_FLM_BUF_CTRL_INF_AVAIL:
+ GET_SET(be->flm.v25.buf_ctrl->inf_avail, value);
+ break;
+
+ case HW_FLM_BUF_CTRL_STA_AVAIL:
+ GET_SET(be->flm.v25.buf_ctrl->sta_avail, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value)
+{
+ return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
{
return be->iface->flm_stat_update(be->be_dev, &be->flm);
@@ -887,3 +933,115 @@ int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e f
return ret;
}
+
+int hw_mod_flm_inf_sta_data_update_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *inf_value, uint32_t inf_size,
+ uint32_t *inf_word_cnt, uint32_t *sta_value,
+ uint32_t sta_size, uint32_t *sta_word_cnt)
+{
+ switch (_VER_) {
+ case 25:
+ switch (field) {
+ case HW_FLM_FLOW_INF_STA_DATA:
+ be->iface->flm_inf_sta_data_update(be->be_dev, &be->flm, inf_value,
+ inf_size, inf_word_cnt, sta_value,
+ sta_size, sta_word_cnt);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+/*
+ * SCRUB timeout support functions to encode users' input into FPGA 8-bit time format:
+ * Timeout in seconds (2^30 nanoseconds); zero means disabled. Value is:
+ *
+ * (T[7:3] != 0) ? ((8 + T[2:0]) shift-left (T[7:3] - 1)) : T[2:0]
+ *
+ * The maximum allowed value is 0xEF (127 years).
+ *
+ * Note that this represents a lower bound on the timeout, depending on the flow
+ * scanner interval and overall load, the timeout can be substantially longer.
+ */
+uint32_t hw_mod_flm_scrub_timeout_decode(uint32_t t_enc)
+{
+ uint8_t t_bits_2_0 = t_enc & 0x07;
+ uint8_t t_bits_7_3 = (t_enc >> 3) & 0x1F;
+ return t_bits_7_3 != 0 ? ((8 + t_bits_2_0) << (t_bits_7_3 - 1)) : t_bits_2_0;
+}
+
+uint32_t hw_mod_flm_scrub_timeout_encode(uint32_t t)
+{
+ uint32_t t_enc = 0;
+
+ if (t > 0) {
+ uint32_t t_dec = 0;
+
+ do {
+ t_enc++;
+ t_dec = hw_mod_flm_scrub_timeout_decode(t_enc);
+ } while (t_enc <= 0xEF && t_dec < t);
+ }
+
+ return t_enc;
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
+ uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 25:
+ switch (field) {
+ case HW_FLM_SCRUB_PRESET_ALL:
+ if (get)
+ return UNSUP_FIELD;
+
+ memset(&be->flm.v25.scrub[index], (uint8_t)*value,
+ sizeof(struct flm_v25_scrub_s));
+ break;
+
+ case HW_FLM_SCRUB_T:
+ GET_SET(be->flm.v25.scrub[index].t, value);
+ break;
+
+ case HW_FLM_SCRUB_R:
+ GET_SET(be->flm.v25.scrub[index].r, value);
+ break;
+
+ case HW_FLM_SCRUB_DEL:
+ GET_SET(be->flm.v25.scrub[index].del, value);
+ break;
+
+ case HW_FLM_SCRUB_INF:
+ GET_SET(be->flm.v25.scrub[index].inf, value);
+ break;
+
+ default:
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_flm_scrub_mod(be, field, index, &value, 0);
+}
@@ -13,6 +13,21 @@
static struct rte_ring *age_queue[MAX_EVT_AGE_QUEUES];
static RTE_ATOMIC(uint16_t) age_event[MAX_EVT_AGE_PORTS];
+__rte_always_inline int flm_age_event_get(uint8_t port)
+{
+ return rte_atomic_load_explicit(&age_event[port], rte_memory_order_seq_cst);
+}
+
+__rte_always_inline void flm_age_event_set(uint8_t port)
+{
+ rte_atomic_store_explicit(&age_event[port], 1, rte_memory_order_seq_cst);
+}
+
+__rte_always_inline void flm_age_event_clear(uint8_t port)
+{
+ rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst);
+}
+
void flm_age_queue_free(uint8_t port, uint16_t caller_id)
{
struct rte_ring *q = NULL;
@@ -88,6 +103,19 @@ struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned
return q;
}
+void flm_age_queue_put(uint16_t caller_id, struct flm_age_event_s *obj)
+{
+ int ret;
+
+ /* If queues is not created, then ignore and return */
+ if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) {
+ ret = rte_ring_sp_enqueue_elem(age_queue[caller_id], obj, FLM_AGE_ELEM_SIZE);
+
+ if (ret != 0)
+ NT_LOG(DBG, FILTER, "FLM aged event queue full");
+ }
+}
+
int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj)
{
int ret;
@@ -12,6 +12,14 @@ struct flm_age_event_s {
void *context;
};
+/* Indicates why the flow info record was generated */
+#define INF_DATA_CAUSE_SW_UNLEARN 0
+#define INF_DATA_CAUSE_TIMEOUT_FLOW_DELETED 1
+#define INF_DATA_CAUSE_NA 2
+#define INF_DATA_CAUSE_PERIODIC_FLOW_INFO 3
+#define INF_DATA_CAUSE_SW_PROBE 4
+#define INF_DATA_CAUSE_TIMEOUT_FLOW_KEPT 5
+
/* Max number of event queues */
#define MAX_EVT_AGE_QUEUES 256
@@ -20,8 +28,12 @@ struct flm_age_event_s {
#define FLM_AGE_ELEM_SIZE sizeof(struct flm_age_event_s)
+int flm_age_event_get(uint8_t port);
+void flm_age_event_set(uint8_t port);
+void flm_age_event_clear(uint8_t port);
void flm_age_queue_free(uint8_t port, uint16_t caller_id);
struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned int count);
+void flm_age_queue_put(uint16_t caller_id, struct flm_age_event_s *obj);
int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj);
unsigned int flm_age_queue_count(uint16_t caller_id);
unsigned int flm_age_queue_get_size(uint16_t caller_id);
@@ -138,6 +138,26 @@ static struct rte_ring *flm_evt_queue_create(uint8_t port, uint8_t caller)
return q;
}
+int flm_sta_queue_put(uint8_t port, bool remote, struct flm_status_event_s *obj)
+{
+ struct rte_ring **stat_q = remote ? stat_q_remote : stat_q_local;
+
+ if (port >= (remote ? MAX_STAT_RMT_QUEUES : MAX_STAT_LCL_QUEUES))
+ return -1;
+
+ if (stat_q[port] == NULL) {
+ if (flm_evt_queue_create(port, remote ? FLM_STAT_REMOTE : FLM_STAT_LOCAL) == NULL)
+ return -1;
+ }
+
+ if (rte_ring_sp_enqueue_elem(stat_q[port], obj, FLM_STAT_ELEM_SIZE) != 0) {
+ NT_LOG(DBG, FILTER, "FLM local status queue full");
+ return -1;
+ }
+
+ return 0;
+}
+
int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj)
{
int ret;
@@ -48,5 +48,6 @@ enum {
#define FLM_STAT_ELEM_SIZE sizeof(struct flm_status_event_s)
int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj);
+int flm_sta_queue_put(uint8_t port, bool remote, struct flm_status_event_s *obj);
#endif /* _FLM_EVT_QUEUE_H_ */
@@ -7,6 +7,7 @@
#include "flow_api_engine.h"
#include "flow_api_hw_db_inline.h"
+#include "flow_api_profile_inline_config.h"
#include "rte_common.h"
#define HW_DB_INLINE_ACTION_SET_NB 512
@@ -57,12 +58,18 @@ struct hw_db_inline_resource_db {
int ref;
} *hsh;
+ struct hw_db_inline_resource_db_scrub {
+ struct hw_db_inline_scrub_data data;
+ int ref;
+ } *scrub;
+
uint32_t nb_cot;
uint32_t nb_qsl;
uint32_t nb_slc_lr;
uint32_t nb_tpe;
uint32_t nb_tpe_ext;
uint32_t nb_hsh;
+ uint32_t nb_scrub;
/* Items */
struct hw_db_inline_resource_db_cat {
@@ -255,6 +262,14 @@ int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle)
return -1;
}
+ db->nb_scrub = ndev->be.flm.nb_scrub_profiles;
+ db->scrub = calloc(db->nb_scrub, sizeof(struct hw_db_inline_resource_db_scrub));
+
+ if (db->scrub == NULL) {
+ hw_db_inline_destroy(db);
+ return -1;
+ }
+
*db_handle = db;
/* Preset data */
@@ -276,6 +291,7 @@ void hw_db_inline_destroy(void *db_handle)
free(db->tpe);
free(db->tpe_ext);
free(db->hsh);
+ free(db->scrub);
free(db->cat);
@@ -366,6 +382,11 @@ void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct
hw_db_inline_hsh_deref(ndev, db_handle, *(struct hw_db_hsh_idx *)&idxs[i]);
break;
+ case HW_DB_IDX_TYPE_FLM_SCRUB:
+ hw_db_inline_scrub_deref(ndev, db_handle,
+ *(struct hw_db_flm_scrub_idx *)&idxs[i]);
+ break;
+
default:
break;
}
@@ -410,9 +431,9 @@ void hw_db_inline_dump(struct flow_nic_dev *ndev, void *db_handle, const struct
else
fprintf(file,
- " COT id %d, QSL id %d, SLC_LR id %d, TPE id %d, HSH id %d\n",
+ " COT id %d, QSL id %d, SLC_LR id %d, TPE id %d, HSH id %d, SCRUB id %d\n",
data->cot.ids, data->qsl.ids, data->slc_lr.ids,
- data->tpe.ids, data->hsh.ids);
+ data->tpe.ids, data->hsh.ids, data->scrub.ids);
break;
}
@@ -577,6 +598,15 @@ void hw_db_inline_dump(struct flow_nic_dev *ndev, void *db_handle, const struct
break;
}
+ case HW_DB_IDX_TYPE_FLM_SCRUB: {
+ const struct hw_db_inline_scrub_data *data = &db->scrub[idxs[i].ids].data;
+ fprintf(file, " FLM_RCP %d\n", idxs[i].id1);
+ fprintf(file, " SCRUB %d\n", idxs[i].ids);
+ fprintf(file, " Timeout: %d, encoded timeout: %d\n",
+ hw_mod_flm_scrub_timeout_decode(data->timeout), data->timeout);
+ break;
+ }
+
case HW_DB_IDX_TYPE_HSH: {
const struct hw_db_inline_hsh_data *data = &db->hsh[idxs[i].ids].data;
fprintf(file, " HSH %d\n", idxs[i].ids);
@@ -690,6 +720,9 @@ const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle,
case HW_DB_IDX_TYPE_HSH:
return &db->hsh[idxs[i].ids].data;
+ case HW_DB_IDX_TYPE_FLM_SCRUB:
+ return &db->scrub[idxs[i].ids].data;
+
default:
return NULL;
}
@@ -1540,7 +1573,7 @@ static int hw_db_inline_action_set_compare(const struct hw_db_inline_action_set_
return data1->cot.raw == data2->cot.raw && data1->qsl.raw == data2->qsl.raw &&
data1->slc_lr.raw == data2->slc_lr.raw && data1->tpe.raw == data2->tpe.raw &&
- data1->hsh.raw == data2->hsh.raw;
+ data1->hsh.raw == data2->hsh.raw && data1->scrub.raw == data2->scrub.raw;
}
struct hw_db_action_set_idx
@@ -2849,3 +2882,106 @@ void hw_db_inline_hsh_deref(struct flow_nic_dev *ndev, void *db_handle, struct h
db->hsh[idx.ids].ref = 0;
}
}
+
+/******************************************************************************/
+/* FML SCRUB */
+/******************************************************************************/
+
+static int hw_db_inline_scrub_compare(const struct hw_db_inline_scrub_data *data1,
+ const struct hw_db_inline_scrub_data *data2)
+{
+ return data1->timeout == data2->timeout;
+}
+
+struct hw_db_flm_scrub_idx hw_db_inline_scrub_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_scrub_data *data)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+ struct hw_db_flm_scrub_idx idx = { .raw = 0 };
+ int found = 0;
+
+ idx.type = HW_DB_IDX_TYPE_FLM_SCRUB;
+
+ /* NOTE: scrub id 0 is reserved for "default" timeout 0, i.e. flow will never AGE-out */
+ if (data->timeout == 0) {
+ idx.ids = 0;
+ hw_db_inline_scrub_ref(ndev, db, idx);
+ return idx;
+ }
+
+ for (uint32_t i = 1; i < db->nb_scrub; ++i) {
+ int ref = db->scrub[i].ref;
+
+ if (ref > 0 && hw_db_inline_scrub_compare(data, &db->scrub[i].data)) {
+ idx.ids = i;
+ hw_db_inline_scrub_ref(ndev, db, idx);
+ return idx;
+ }
+
+ if (!found && ref <= 0) {
+ found = 1;
+ idx.ids = i;
+ }
+ }
+
+ if (!found) {
+ idx.error = 1;
+ return idx;
+ }
+
+ int res = hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_T, idx.ids, data->timeout);
+ res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_R, idx.ids,
+ NTNIC_SCANNER_TIMEOUT_RESOLUTION);
+ res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_DEL, idx.ids, SCRUB_DEL);
+ res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_INF, idx.ids, SCRUB_INF);
+
+ if (res != 0) {
+ idx.error = 1;
+ return idx;
+ }
+
+ db->scrub[idx.ids].ref = 1;
+ memcpy(&db->scrub[idx.ids].data, data, sizeof(struct hw_db_inline_scrub_data));
+ flow_nic_mark_resource_used(ndev, RES_SCRUB_RCP, idx.ids);
+
+ hw_mod_flm_scrub_flush(&ndev->be, idx.ids, 1);
+
+ return idx;
+}
+
+void hw_db_inline_scrub_ref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_flm_scrub_idx idx)
+{
+ (void)ndev;
+
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (!idx.error)
+ db->scrub[idx.ids].ref += 1;
+}
+
+void hw_db_inline_scrub_deref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_flm_scrub_idx idx)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (idx.error)
+ return;
+
+ db->scrub[idx.ids].ref -= 1;
+
+ if (db->scrub[idx.ids].ref <= 0) {
+ /* NOTE: scrub id 0 is reserved for "default" timeout 0, which shall not be removed
+ */
+ if (idx.ids > 0) {
+ hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_T, idx.ids, 0);
+ hw_mod_flm_scrub_flush(&ndev->be, idx.ids, 1);
+
+ memset(&db->scrub[idx.ids].data, 0x0,
+ sizeof(struct hw_db_inline_scrub_data));
+ flow_nic_free_resource(ndev, RES_SCRUB_RCP, idx.ids);
+ }
+
+ db->scrub[idx.ids].ref = 0;
+ }
+}
@@ -117,6 +117,10 @@ struct hw_db_flm_ft {
HW_DB_IDX;
};
+struct hw_db_flm_scrub_idx {
+ HW_DB_IDX;
+};
+
struct hw_db_km_idx {
HW_DB_IDX;
};
@@ -145,6 +149,7 @@ enum hw_db_idx_type {
HW_DB_IDX_TYPE_FLM_RCP,
HW_DB_IDX_TYPE_KM_RCP,
HW_DB_IDX_TYPE_FLM_FT,
+ HW_DB_IDX_TYPE_FLM_SCRUB,
HW_DB_IDX_TYPE_KM_FT,
HW_DB_IDX_TYPE_HSH,
};
@@ -160,6 +165,43 @@ struct hw_db_inline_match_set_data {
uint8_t priority;
};
+struct hw_db_inline_action_set_data {
+ int contains_jump;
+ union {
+ int jump;
+ struct {
+ struct hw_db_cot_idx cot;
+ struct hw_db_qsl_idx qsl;
+ struct hw_db_slc_lr_idx slc_lr;
+ struct hw_db_tpe_idx tpe;
+ struct hw_db_hsh_idx hsh;
+ struct hw_db_flm_scrub_idx scrub;
+ };
+ };
+};
+
+struct hw_db_inline_km_rcp_data {
+ uint32_t rcp;
+};
+
+struct hw_db_inline_km_ft_data {
+ struct hw_db_cat_idx cat;
+ struct hw_db_km_idx km;
+ struct hw_db_action_set_idx action_set;
+};
+
+struct hw_db_inline_flm_ft_data {
+ /* Group zero flows should set jump. */
+ /* Group nonzero flows should set group. */
+ int is_group_zero;
+ union {
+ int jump;
+ int group;
+ };
+
+ struct hw_db_action_set_idx action_set;
+};
+
/* Functionality data types */
struct hw_db_inline_cat_data {
uint32_t vlan_mask : 4;
@@ -232,39 +274,8 @@ struct hw_db_inline_hsh_data {
uint8_t key[MAX_RSS_KEY_LEN];
};
-struct hw_db_inline_action_set_data {
- int contains_jump;
- union {
- int jump;
- struct {
- struct hw_db_cot_idx cot;
- struct hw_db_qsl_idx qsl;
- struct hw_db_slc_lr_idx slc_lr;
- struct hw_db_tpe_idx tpe;
- struct hw_db_hsh_idx hsh;
- };
- };
-};
-
-struct hw_db_inline_km_rcp_data {
- uint32_t rcp;
-};
-
-struct hw_db_inline_km_ft_data {
- struct hw_db_cat_idx cat;
- struct hw_db_km_idx km;
- struct hw_db_action_set_idx action_set;
-};
-
-struct hw_db_inline_flm_ft_data {
- /* Group zero flows should set jump. */
- /* Group nonzero flows should set group. */
- int is_group_zero;
- union {
- int jump;
- int group;
- };
- struct hw_db_action_set_idx action_set;
+struct hw_db_inline_scrub_data {
+ uint32_t timeout;
};
/**/
@@ -368,6 +379,13 @@ void hw_db_inline_flm_ft_ref(struct flow_nic_dev *ndev, void *db_handle, struct
void hw_db_inline_flm_ft_deref(struct flow_nic_dev *ndev, void *db_handle,
struct hw_db_flm_ft idx);
+struct hw_db_flm_scrub_idx hw_db_inline_scrub_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_scrub_data *data);
+void hw_db_inline_scrub_ref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_flm_scrub_idx idx);
+void hw_db_inline_scrub_deref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_flm_scrub_idx idx);
+
int hw_db_inline_setup_mbr_filter(struct flow_nic_dev *ndev, uint32_t cat_hw_id, uint32_t ft,
uint32_t qsl_hw_id);
@@ -8,6 +8,7 @@
#include "hw_mod_backend.h"
#include "flm_age_queue.h"
+#include "flm_evt_queue.h"
#include "flm_lrn_queue.h"
#include "flow_api.h"
#include "flow_api_engine.h"
@@ -20,6 +21,13 @@
#include "ntnic_mod_reg.h"
#include <rte_common.h>
+#define DMA_BLOCK_SIZE 256
+#define DMA_OVERHEAD 20
+#define WORDS_PER_STA_DATA (sizeof(struct flm_v25_sta_data_s) / sizeof(uint32_t))
+#define MAX_STA_DATA_RECORDS_PER_READ ((DMA_BLOCK_SIZE - DMA_OVERHEAD) / WORDS_PER_STA_DATA)
+#define WORDS_PER_INF_DATA (sizeof(struct flm_v25_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ ((DMA_BLOCK_SIZE - DMA_OVERHEAD) / WORDS_PER_INF_DATA)
+
#define NT_FLM_MISS_FLOW_TYPE 0
#define NT_FLM_UNHANDLED_FLOW_TYPE 1
#define NT_FLM_OP_UNLEARN 0
@@ -71,14 +79,127 @@ static uint32_t flm_lrn_update(struct flow_eth_dev *dev, uint32_t *inf_word_cnt,
return r.num;
}
+static inline bool is_remote_caller(uint8_t caller_id, uint8_t *port)
+{
+ if (caller_id < MAX_VDPA_PORTS + 1) {
+ *port = caller_id;
+ return true;
+ }
+
+ *port = caller_id - MAX_VDPA_PORTS - 1;
+ return false;
+}
+
+static void flm_mtr_read_inf_records(struct flow_eth_dev *dev, uint32_t *data, uint32_t records)
+{
+ for (uint32_t i = 0; i < records; ++i) {
+ struct flm_v25_inf_data_s *inf_data =
+ (struct flm_v25_inf_data_s *)&data[i * WORDS_PER_INF_DATA];
+ uint8_t caller_id;
+ uint8_t type;
+ union flm_handles flm_h;
+ ntnic_id_table_find(dev->ndev->id_table_handle, inf_data->id, &flm_h, &caller_id,
+ &type);
+
+ /* Check that received record hold valid meter statistics */
+ if (type == 1) {
+ switch (inf_data->cause) {
+ case INF_DATA_CAUSE_TIMEOUT_FLOW_DELETED:
+ case INF_DATA_CAUSE_TIMEOUT_FLOW_KEPT: {
+ struct flow_handle *fh = (struct flow_handle *)flm_h.p;
+ struct flm_age_event_s age_event;
+ uint8_t port;
+
+ age_event.context = fh->context;
+
+ is_remote_caller(caller_id, &port);
+
+ flm_age_queue_put(caller_id, &age_event);
+ flm_age_event_set(port);
+ }
+ break;
+
+ case INF_DATA_CAUSE_SW_UNLEARN:
+ case INF_DATA_CAUSE_NA:
+ case INF_DATA_CAUSE_PERIODIC_FLOW_INFO:
+ case INF_DATA_CAUSE_SW_PROBE:
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void flm_mtr_read_sta_records(struct flow_eth_dev *dev, uint32_t *data, uint32_t records)
+{
+ for (uint32_t i = 0; i < records; ++i) {
+ struct flm_v25_sta_data_s *sta_data =
+ (struct flm_v25_sta_data_s *)&data[i * WORDS_PER_STA_DATA];
+ uint8_t caller_id;
+ uint8_t type;
+ union flm_handles flm_h;
+ ntnic_id_table_find(dev->ndev->id_table_handle, sta_data->id, &flm_h, &caller_id,
+ &type);
+
+ if (type == 1) {
+ uint8_t port;
+ bool remote_caller = is_remote_caller(caller_id, &port);
+
+ pthread_mutex_lock(&dev->ndev->mtx);
+ ((struct flow_handle *)flm_h.p)->learn_ignored = 1;
+ pthread_mutex_unlock(&dev->ndev->mtx);
+ struct flm_status_event_s data = {
+ .flow = flm_h.p,
+ .learn_ignore = sta_data->lis,
+ .learn_failed = sta_data->lfs,
+ };
+
+ flm_sta_queue_put(port, remote_caller, &data);
+ }
+ }
+}
+
static uint32_t flm_update(struct flow_eth_dev *dev)
{
static uint32_t inf_word_cnt;
static uint32_t sta_word_cnt;
+ uint32_t inf_data[DMA_BLOCK_SIZE];
+ uint32_t sta_data[DMA_BLOCK_SIZE];
+
+ if (inf_word_cnt >= WORDS_PER_INF_DATA || sta_word_cnt >= WORDS_PER_STA_DATA) {
+ uint32_t inf_records = inf_word_cnt / WORDS_PER_INF_DATA;
+
+ if (inf_records > MAX_INF_DATA_RECORDS_PER_READ)
+ inf_records = MAX_INF_DATA_RECORDS_PER_READ;
+
+ uint32_t sta_records = sta_word_cnt / WORDS_PER_STA_DATA;
+
+ if (sta_records > MAX_STA_DATA_RECORDS_PER_READ)
+ sta_records = MAX_STA_DATA_RECORDS_PER_READ;
+
+ hw_mod_flm_inf_sta_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_STA_DATA,
+ inf_data, inf_records * WORDS_PER_INF_DATA,
+ &inf_word_cnt, sta_data,
+ sta_records * WORDS_PER_STA_DATA,
+ &sta_word_cnt);
+
+ if (inf_records > 0)
+ flm_mtr_read_inf_records(dev, inf_data, inf_records);
+
+ if (sta_records > 0)
+ flm_mtr_read_sta_records(dev, sta_data, sta_records);
+
+ return 1;
+ }
+
if (flm_lrn_update(dev, &inf_word_cnt, &sta_word_cnt) != 0)
return 1;
+ hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL, &inf_word_cnt);
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_STA_AVAIL, &sta_word_cnt);
+
return inf_word_cnt + sta_word_cnt;
}
@@ -1067,6 +1188,25 @@ static int interpret_flow_actions(const struct flow_eth_dev *dev,
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_AGE", dev);
+
+ if (action[aidx].conf) {
+ struct rte_flow_action_age age_tmp;
+ const struct rte_flow_action_age *age =
+ memcpy_mask_if(&age_tmp, action[aidx].conf,
+ action_mask ? action_mask[aidx].conf : NULL,
+ sizeof(struct rte_flow_action_age));
+ fd->age.timeout = hw_mod_flm_scrub_timeout_encode(age->timeout);
+ fd->age.context = age->context;
+ NT_LOG(DBG, FILTER,
+ "normalized timeout: %u, original timeout: %u, context: %p",
+ hw_mod_flm_scrub_timeout_decode(fd->age.timeout),
+ age->timeout, fd->age.context);
+ }
+
+ break;
+
default:
NT_LOG(ERR, FILTER, "Invalid or unsupported flow action received - %i",
action[aidx].type);
@@ -2466,6 +2606,7 @@ static void copy_fd_to_fh_flm(struct flow_handle *fh, const struct nic_flow_def
break;
}
}
+ fh->context = fd->age.context;
}
static int convert_fh_to_fh_flm(struct flow_handle *fh, const uint32_t *packet_data,
@@ -2722,6 +2863,21 @@ static int setup_flow_flm_actions(struct flow_eth_dev *dev,
return -1;
}
+ /* Setup SCRUB profile */
+ struct hw_db_inline_scrub_data scrub_data = { .timeout = fd->age.timeout };
+ struct hw_db_flm_scrub_idx scrub_idx =
+ hw_db_inline_scrub_add(dev->ndev, dev->ndev->hw_db_handle, &scrub_data);
+ local_idxs[(*local_idx_counter)++] = scrub_idx.raw;
+
+ if (scrub_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference FLM SCRUB resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return -1;
+ }
+
+ if (flm_scrub)
+ *flm_scrub = scrub_idx.ids;
+
/* Setup Action Set */
struct hw_db_inline_action_set_data action_set_data = {
.contains_jump = 0,
@@ -2730,6 +2886,7 @@ static int setup_flow_flm_actions(struct flow_eth_dev *dev,
.slc_lr = slc_lr_idx,
.tpe = tpe_idx,
.hsh = hsh_idx,
+ .scrub = scrub_idx,
};
struct hw_db_action_set_idx action_set_idx =
hw_db_inline_action_set_add(dev->ndev, dev->ndev->hw_db_handle, &action_set_data);
@@ -2796,6 +2953,7 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
goto error_out;
}
+ fh->context = fd->age.context;
nic_insert_flow(dev->ndev, fh);
} else if (attr->group > 0) {
@@ -2852,6 +3010,18 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
*/
int identical_km_entry_ft = -1;
+ /* Setup Action Set */
+
+ /* SCRUB/AGE action is not supported for group 0 */
+ if (fd->age.timeout != 0 || fd->age.context != NULL) {
+ NT_LOG(ERR, FILTER, "Action AGE is not supported for flow in group 0");
+ flow_nic_set_error(ERR_ACTION_AGE_UNSUPPORTED_GROUP_0, error);
+ goto error_out;
+ }
+
+ /* NOTE: SCRUB record 0 is used by default with timeout 0, i.e. flow will never
+ * AGE-out
+ */
struct hw_db_inline_action_set_data action_set_data = { 0 };
(void)action_set_data;
@@ -3348,6 +3518,15 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+ /* Initialize SCRUB with default index 0, i.e. flow will never AGE-out */
+ if (hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_PRESET_ALL, 0, 0) < 0)
+ goto err_exit0;
+
+ if (hw_mod_flm_scrub_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_SCRUB_RCP, 0);
+
/* Setup filter using matching all packets violating traffic policing parameters */
flow_nic_mark_resource_used(ndev, RES_CAT_CFN, NT_VIOLATING_MBR_CFN);
flow_nic_mark_resource_used(ndev, RES_QSL_RCP, NT_VIOLATING_MBR_QSL);
@@ -3483,6 +3662,10 @@ int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+ hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_PRESET_ALL, 0, 0);
+ hw_mod_flm_scrub_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_SCRUB_RCP, 0);
+
hw_db_inline_destroy(ndev->hw_db_handle);
#ifdef FLOW_DEBUG
@@ -55,4 +55,23 @@
*/
#define NTNIC_SCANNER_LOAD 0.01
-#endif /* _FLOW_API_PROFILE_INLINE_CONFIG_H_ */
+/*
+ * This define sets the timeout resolution of aged flow scanner (scrubber).
+ *
+ * The timeout resolution feature is provided in order to reduce the number of
+ * write-back operations for flows without attached meter. If the resolution
+ * is disabled (set to 0) and flow timeout is enabled via age action, then a write-back
+ * occurs every the flow is evicted from the flow cache, essentially causing the
+ * lookup performance to drop to that of a flow with meter. By setting the timeout
+ * resolution (>0), write-back for flows happens only when the difference between
+ * the last recorded time for the flow and the current time exceeds the chosen resolution.
+ *
+ * The parameter value is a power of 2 in units of 2^28 nanoseconds. It means that value 8 sets
+ * the timeout resolution to: 2^8 * 2^28 / 1e9 = 68,7 seconds
+ *
+ * NOTE: This parameter has a significant impact on flow lookup performance, especially
+ * if full scanner timeout resolution (=0) is configured.
+ */
+#define NTNIC_SCANNER_TIMEOUT_RESOLUTION 8
+
+#endif /* _FLOW_API_PROFILE_INLINE_CONFIG_H_ */
@@ -26,6 +26,7 @@
#include "ntnic_vfio.h"
#include "ntnic_mod_reg.h"
#include "nt_util.h"
+#include "profile_inline/flm_age_queue.h"
#include "profile_inline/flm_evt_queue.h"
#include "rte_pmd_ntnic.h"
@@ -1814,6 +1815,21 @@ THREAD_FUNC port_event_thread_fn(void *context)
}
}
+ /* AGED event */
+ /* Note: RTE_FLOW_PORT_FLAG_STRICT_QUEUE flag is not supported so
+ * event is always generated
+ */
+ int aged_event_count = flm_age_event_get(port_no);
+
+ if (aged_event_count > 0 && eth_dev && eth_dev->data &&
+ eth_dev->data->dev_private) {
+ rte_eth_dev_callback_process(eth_dev,
+ RTE_ETH_EVENT_FLOW_AGED,
+ NULL);
+ flm_age_event_clear(port_no);
+ do_wait = false;
+ }
+
if (do_wait)
nt_os_wait_usec(10);