@@ -184,8 +184,11 @@ extern const char *dbg_res_descr[];
int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
uint32_t alignment);
+int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ unsigned int num, uint32_t alignment);
void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx);
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index);
int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index);
#endif
@@ -666,8 +666,16 @@ int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
void hw_mod_qsl_free(struct flow_api_backend_s *be);
int hw_mod_qsl_reset(struct flow_api_backend_s *be);
int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value);
int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value);
int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t *value);
int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx, int count);
int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
uint32_t value);
@@ -104,11 +104,52 @@ int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
return -1;
}
+int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ unsigned int num, uint32_t alignment)
+{
+ unsigned int idx_offs;
+
+ for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1);
+ res_idx += alignment) {
+ if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+ for (idx_offs = 1; idx_offs < num; idx_offs++)
+ if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs))
+ break;
+
+ if (idx_offs < num)
+ continue;
+
+ /* found a contiguous number of "num" res_type elements - allocate them */
+ for (idx_offs = 0; idx_offs < num; idx_offs++) {
+ flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs);
+ ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+ }
+
+ return res_idx;
+ }
+ }
+
+ return -1;
+}
+
void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
{
flow_nic_mark_resource_unused(ndev, res_type, idx);
}
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
+{
+ NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)",
+ dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+ assert(flow_nic_is_resource_used(ndev, res_type, index));
+
+ if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+ return -1;
+
+ ndev->res[res_type].ref[index]++;
+ return 0;
+}
+
int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
{
NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
@@ -346,6 +387,18 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+ for (int i = 0; i < eth_dev->num_queues; ++i) {
+ uint32_t qen_value = 0;
+ uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
+
+ hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
+ hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
+ qen_value & ~(1U << (queue_id % 4)));
+ hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+ }
+ }
+
#ifdef FLOW_DEBUG
ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
#endif
@@ -546,6 +599,18 @@ static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no
eth_dev->rss_target_id = -1;
+ if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+ for (i = 0; i < eth_dev->num_queues; i++) {
+ uint32_t qen_value = 0;
+ uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id;
+
+ hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value);
+ hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4,
+ qen_value | (1 << (queue_id % 4)));
+ hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+ }
+ }
+
*rss_target_id = eth_dev->rss_target_id;
nic_insert_eth_port_dev(ndev, eth_dev);
@@ -104,6 +104,114 @@ int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count
return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
}
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->qsl.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_RCP_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+ sizeof(struct qsl_v7_rcp_s));
+ break;
+
+ case HW_QSL_RCP_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->qsl.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->qsl.v7.rcp, struct qsl_v7_rcp_s, index, *value,
+ be->qsl.nb_rcp_categories);
+ break;
+
+ case HW_QSL_RCP_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->qsl.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->qsl.v7.rcp, struct qsl_v7_rcp_s, index, *value);
+ break;
+
+ case HW_QSL_RCP_DISCARD:
+ GET_SET(be->qsl.v7.rcp[index].discard, value);
+ break;
+
+ case HW_QSL_RCP_DROP:
+ GET_SET(be->qsl.v7.rcp[index].drop, value);
+ break;
+
+ case HW_QSL_RCP_TBL_LO:
+ GET_SET(be->qsl.v7.rcp[index].tbl_lo, value);
+ break;
+
+ case HW_QSL_RCP_TBL_HI:
+ GET_SET(be->qsl.v7.rcp[index].tbl_hi, value);
+ break;
+
+ case HW_QSL_RCP_TBL_IDX:
+ GET_SET(be->qsl.v7.rcp[index].tbl_idx, value);
+ break;
+
+ case HW_QSL_RCP_TBL_MSK:
+ GET_SET(be->qsl.v7.rcp[index].tbl_msk, value);
+ break;
+
+ case HW_QSL_RCP_LR:
+ GET_SET(be->qsl.v7.rcp[index].lr, value);
+ break;
+
+ case HW_QSL_RCP_TSA:
+ GET_SET(be->qsl.v7.rcp[index].tsa, value);
+ break;
+
+ case HW_QSL_RCP_VLI:
+ GET_SET(be->qsl.v7.rcp[index].vli, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ /* end case 7 */
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value)
+{
+ return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count)
{
if (count == ALL_ENTRIES)
@@ -117,6 +225,73 @@ int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count
return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
}
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->qsl.nb_qst_entries) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_QST_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+ sizeof(struct qsl_v7_qst_s));
+ break;
+
+ case HW_QSL_QST_QUEUE:
+ GET_SET(be->qsl.v7.qst[index].queue, value);
+ break;
+
+ case HW_QSL_QST_EN:
+ GET_SET(be->qsl.v7.qst[index].en, value);
+ break;
+
+ case HW_QSL_QST_TX_PORT:
+ GET_SET(be->qsl.v7.qst[index].tx_port, value);
+ break;
+
+ case HW_QSL_QST_LRE:
+ GET_SET(be->qsl.v7.qst[index].lre, value);
+ break;
+
+ case HW_QSL_QST_TCI:
+ GET_SET(be->qsl.v7.qst[index].tci, value);
+ break;
+
+ case HW_QSL_QST_VEN:
+ GET_SET(be->qsl.v7.qst[index].ven, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ /* end case 7 */
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value)
+{
+ return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count)
{
if (count == ALL_ENTRIES)
@@ -130,6 +305,49 @@ int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count
return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
}
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= QSL_QEN_ENTRIES) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_QEN_EN:
+ GET_SET(be->qsl.v7.qen[index].en, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ /* end case 7 */
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t value)
+{
+ return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index,
+ uint32_t *value)
+{
+ return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx, int count)
{
if (count == ALL_ENTRIES)
@@ -20,12 +20,18 @@ struct hw_db_inline_resource_db {
int ref;
} *cot;
+ struct hw_db_inline_resource_db_qsl {
+ struct hw_db_inline_qsl_data data;
+ int qst_idx;
+ } *qsl;
+
struct hw_db_inline_resource_db_slc_lr {
struct hw_db_inline_slc_lr_data data;
int ref;
} *slc_lr;
uint32_t nb_cot;
+ uint32_t nb_qsl;
uint32_t nb_slc_lr;
/* Items */
@@ -61,6 +67,14 @@ int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle)
return -1;
}
+ db->nb_qsl = ndev->be.qsl.nb_rcp_categories;
+ db->qsl = calloc(db->nb_qsl, sizeof(struct hw_db_inline_resource_db_qsl));
+
+ if (db->qsl == NULL) {
+ hw_db_inline_destroy(db);
+ return -1;
+ }
+
db->nb_slc_lr = ndev->be.max_categories;
db->slc_lr = calloc(db->nb_slc_lr, sizeof(struct hw_db_inline_resource_db_slc_lr));
@@ -86,6 +100,7 @@ void hw_db_inline_destroy(void *db_handle)
struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
free(db->cot);
+ free(db->qsl);
free(db->slc_lr);
free(db->cat);
@@ -110,6 +125,10 @@ void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct
hw_db_inline_cot_deref(ndev, db_handle, *(struct hw_db_cot_idx *)&idxs[i]);
break;
+ case HW_DB_IDX_TYPE_QSL:
+ hw_db_inline_qsl_deref(ndev, db_handle, *(struct hw_db_qsl_idx *)&idxs[i]);
+ break;
+
case HW_DB_IDX_TYPE_SLC_LR:
hw_db_inline_slc_lr_deref(ndev, db_handle,
*(struct hw_db_slc_lr_idx *)&idxs[i]);
@@ -145,6 +164,13 @@ int hw_db_inline_setup_mbr_filter(struct flow_nic_dev *ndev, uint32_t cat_hw_id,
const int offset = ((int)ndev->be.cat.cts_num + 1) / 2;
(void)offset;
+ /* QSL for traffic policing */
+ if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DROP, qsl_hw_id, 0x3) < 0)
+ return -1;
+
+ if (hw_mod_qsl_rcp_flush(&ndev->be, qsl_hw_id, 1) < 0)
+ return -1;
+
/* Select and enable QSL recipe */
if (hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 1, qsl_hw_id))
return -1;
@@ -255,6 +281,175 @@ void hw_db_inline_cot_deref(struct flow_nic_dev *ndev __rte_unused, void *db_han
}
}
+/******************************************************************************/
+/* QSL */
+/******************************************************************************/
+
+/* Calculate queue mask for QSL TBL_MSK for given number of queues.
+ * NOTE: If number of queues is not power of two, then queue mask will be created
+ * for nearest smaller power of two.
+ */
+static uint32_t queue_mask(uint32_t nr_queues)
+{
+ nr_queues |= nr_queues >> 1;
+ nr_queues |= nr_queues >> 2;
+ nr_queues |= nr_queues >> 4;
+ nr_queues |= nr_queues >> 8;
+ nr_queues |= nr_queues >> 16;
+ return nr_queues >> 1;
+}
+
+static int hw_db_inline_qsl_compare(const struct hw_db_inline_qsl_data *data1,
+ const struct hw_db_inline_qsl_data *data2)
+{
+ if (data1->discard != data2->discard || data1->drop != data2->drop ||
+ data1->table_size != data2->table_size || data1->retransmit != data2->retransmit) {
+ return 0;
+ }
+
+ for (int i = 0; i < HW_DB_INLINE_MAX_QST_PER_QSL; ++i) {
+ if (data1->table[i].queue != data2->table[i].queue ||
+ data1->table[i].queue_en != data2->table[i].queue_en ||
+ data1->table[i].tx_port != data2->table[i].tx_port ||
+ data1->table[i].tx_port_en != data2->table[i].tx_port_en) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+struct hw_db_qsl_idx hw_db_inline_qsl_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_qsl_data *data)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+ struct hw_db_qsl_idx qsl_idx = { .raw = 0 };
+ uint32_t qst_idx = 0;
+ int res;
+
+ qsl_idx.type = HW_DB_IDX_TYPE_QSL;
+
+ if (data->discard) {
+ qsl_idx.ids = 0;
+ return qsl_idx;
+ }
+
+ for (uint32_t i = 1; i < db->nb_qsl; ++i) {
+ if (hw_db_inline_qsl_compare(data, &db->qsl[i].data)) {
+ qsl_idx.ids = i;
+ hw_db_inline_qsl_ref(ndev, db, qsl_idx);
+ return qsl_idx;
+ }
+ }
+
+ res = flow_nic_alloc_resource(ndev, RES_QSL_RCP, 1);
+
+ if (res < 0) {
+ qsl_idx.error = 1;
+ return qsl_idx;
+ }
+
+ qsl_idx.ids = res & 0xff;
+
+ if (data->table_size > 0) {
+ res = flow_nic_alloc_resource_config(ndev, RES_QSL_QST, data->table_size, 1);
+
+ if (res < 0) {
+ flow_nic_deref_resource(ndev, RES_QSL_RCP, qsl_idx.ids);
+ qsl_idx.error = 1;
+ return qsl_idx;
+ }
+
+ qst_idx = (uint32_t)res;
+ }
+
+ memcpy(&db->qsl[qsl_idx.ids].data, data, sizeof(struct hw_db_inline_qsl_data));
+ db->qsl[qsl_idx.ids].qst_idx = qst_idx;
+
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, qsl_idx.ids, 0x0);
+
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, qsl_idx.ids, data->discard);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DROP, qsl_idx.ids, data->drop * 0x3);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_LR, qsl_idx.ids, data->retransmit * 0x3);
+
+ if (data->table_size == 0) {
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_LO, qsl_idx.ids, 0x0);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_HI, qsl_idx.ids, 0x0);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_IDX, qsl_idx.ids, 0x0);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_MSK, qsl_idx.ids, 0x0);
+
+ } else {
+ const uint32_t table_start = qst_idx;
+ const uint32_t table_end = table_start + data->table_size - 1;
+
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_LO, qsl_idx.ids, table_start);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_HI, qsl_idx.ids, table_end);
+
+ /* Toeplitz hash function uses TBL_IDX and TBL_MSK. */
+ uint32_t msk = queue_mask(table_end - table_start + 1);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_IDX, qsl_idx.ids, table_start);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_MSK, qsl_idx.ids, msk);
+
+ for (uint32_t i = 0; i < data->table_size; ++i) {
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, table_start + i, 0x0);
+
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_TX_PORT, table_start + i,
+ data->table[i].tx_port);
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_LRE, table_start + i,
+ data->table[i].tx_port_en);
+
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_QUEUE, table_start + i,
+ data->table[i].queue);
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_EN, table_start + i,
+ data->table[i].queue_en);
+ }
+
+ hw_mod_qsl_qst_flush(&ndev->be, table_start, data->table_size);
+ }
+
+ hw_mod_qsl_rcp_flush(&ndev->be, qsl_idx.ids, 1);
+
+ return qsl_idx;
+}
+
+void hw_db_inline_qsl_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx)
+{
+ (void)db_handle;
+
+ if (!idx.error && idx.ids != 0)
+ flow_nic_ref_resource(ndev, RES_QSL_RCP, idx.ids);
+}
+
+void hw_db_inline_qsl_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (idx.error || idx.ids == 0)
+ return;
+
+ if (flow_nic_deref_resource(ndev, RES_QSL_RCP, idx.ids) == 0) {
+ const int table_size = (int)db->qsl[idx.ids].data.table_size;
+
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, idx.ids, 0x0);
+ hw_mod_qsl_rcp_flush(&ndev->be, idx.ids, 1);
+
+ if (table_size > 0) {
+ const int table_start = db->qsl[idx.ids].qst_idx;
+
+ for (int i = 0; i < (int)table_size; ++i) {
+ hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL,
+ table_start + i, 0x0);
+ flow_nic_free_resource(ndev, RES_QSL_QST, table_start + i);
+ }
+
+ hw_mod_qsl_qst_flush(&ndev->be, table_start, table_size);
+ }
+
+ memset(&db->qsl[idx.ids].data, 0x0, sizeof(struct hw_db_inline_qsl_data));
+ db->qsl[idx.ids].qst_idx = 0;
+ }
+}
+
/******************************************************************************/
/* SLC_LR */
/******************************************************************************/
@@ -36,6 +36,10 @@ struct hw_db_cot_idx {
HW_DB_IDX;
};
+struct hw_db_qsl_idx {
+ HW_DB_IDX;
+};
+
struct hw_db_cat_idx {
HW_DB_IDX;
};
@@ -48,6 +52,7 @@ enum hw_db_idx_type {
HW_DB_IDX_TYPE_NONE = 0,
HW_DB_IDX_TYPE_COT,
HW_DB_IDX_TYPE_CAT,
+ HW_DB_IDX_TYPE_QSL,
HW_DB_IDX_TYPE_SLC_LR,
};
@@ -113,6 +118,7 @@ struct hw_db_inline_action_set_data {
int jump;
struct {
struct hw_db_cot_idx cot;
+ struct hw_db_qsl_idx qsl;
};
};
};
@@ -131,6 +137,11 @@ struct hw_db_cot_idx hw_db_inline_cot_add(struct flow_nic_dev *ndev, void *db_ha
void hw_db_inline_cot_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cot_idx idx);
void hw_db_inline_cot_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cot_idx idx);
+struct hw_db_qsl_idx hw_db_inline_qsl_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_qsl_data *data);
+void hw_db_inline_qsl_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx);
+void hw_db_inline_qsl_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx);
+
struct hw_db_slc_lr_idx hw_db_inline_slc_lr_add(struct flow_nic_dev *ndev, void *db_handle,
const struct hw_db_inline_slc_lr_data *data);
void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle,
@@ -2276,9 +2276,55 @@ static int convert_fh_to_fh_flm(struct flow_handle *fh, const uint32_t *packet_d
return 0;
}
+
+static void setup_db_qsl_data(struct nic_flow_def *fd, struct hw_db_inline_qsl_data *qsl_data,
+ uint32_t num_dest_port, uint32_t num_queues)
+{
+ memset(qsl_data, 0x0, sizeof(struct hw_db_inline_qsl_data));
+
+ if (fd->dst_num_avail <= 0) {
+ qsl_data->drop = 1;
+
+ } else {
+ assert(fd->dst_num_avail < HW_DB_INLINE_MAX_QST_PER_QSL);
+
+ uint32_t ports[fd->dst_num_avail];
+ uint32_t queues[fd->dst_num_avail];
+
+ uint32_t port_index = 0;
+ uint32_t queue_index = 0;
+ uint32_t max = num_dest_port > num_queues ? num_dest_port : num_queues;
+
+ memset(ports, 0, fd->dst_num_avail);
+ memset(queues, 0, fd->dst_num_avail);
+
+ qsl_data->table_size = max;
+ qsl_data->retransmit = num_dest_port > 0 ? 1 : 0;
+
+ for (int i = 0; i < fd->dst_num_avail; ++i)
+ if (fd->dst_id[i].type == PORT_PHY)
+ ports[port_index++] = fd->dst_id[i].id;
+
+ else if (fd->dst_id[i].type == PORT_VIRT)
+ queues[queue_index++] = fd->dst_id[i].id;
+
+ for (uint32_t i = 0; i < max; ++i) {
+ if (num_dest_port > 0) {
+ qsl_data->table[i].tx_port = ports[i % num_dest_port];
+ qsl_data->table[i].tx_port_en = 1;
+ }
+
+ if (num_queues > 0) {
+ qsl_data->table[i].queue = queues[i % num_queues];
+ qsl_data->table[i].queue_en = 1;
+ }
+ }
+ }
+}
+
static int setup_flow_flm_actions(struct flow_eth_dev *dev,
const struct nic_flow_def *fd,
- const struct hw_db_inline_qsl_data *qsl_data __rte_unused,
+ const struct hw_db_inline_qsl_data *qsl_data,
const struct hw_db_inline_hsh_data *hsh_data __rte_unused,
uint32_t group __rte_unused,
uint32_t local_idxs[],
@@ -2288,6 +2334,17 @@ static int setup_flow_flm_actions(struct flow_eth_dev *dev,
uint32_t *flm_scrub __rte_unused,
struct rte_flow_error *error)
{
+ /* Finalize QSL */
+ struct hw_db_qsl_idx qsl_idx =
+ hw_db_inline_qsl_add(dev->ndev, dev->ndev->hw_db_handle, qsl_data);
+ local_idxs[(*local_idx_counter)++] = qsl_idx.raw;
+
+ if (qsl_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference QSL resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return -1;
+ }
+
/* Setup SLC LR */
struct hw_db_slc_lr_idx slc_lr_idx = { .raw = 0 };
@@ -2328,6 +2385,7 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
fh->caller_id = caller_id;
struct hw_db_inline_qsl_data qsl_data;
+ setup_db_qsl_data(fd, &qsl_data, num_dest_port, num_queues);
struct hw_db_inline_hsh_data hsh_data;
@@ -2398,6 +2456,19 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
goto error_out;
}
+
+ /* Finalize QSL */
+ struct hw_db_qsl_idx qsl_idx =
+ hw_db_inline_qsl_add(dev->ndev, dev->ndev->hw_db_handle,
+ &qsl_data);
+ fh->db_idxs[fh->db_idx_counter++] = qsl_idx.raw;
+ action_set_data.qsl = qsl_idx;
+
+ if (qsl_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference QSL resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ goto error_out;
+ }
}
/* Setup CAT */
@@ -2469,6 +2540,24 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
goto err_exit0;
+ /* Initialize QSL with unmatched recipe index 0 - discard */
+ if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+ goto err_exit0;
+
+ if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+ /* Initialize QST with default index 0 */
+ if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0, 0x0) < 0)
+ goto err_exit0;
+
+ if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
/* SLC LR index 0 is reserved */
flow_nic_mark_resource_used(ndev, RES_SLC_LR_RCP, 0);
@@ -2487,6 +2576,7 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
/* Setup filter using matching all packets violating traffic policing parameters */
flow_nic_mark_resource_used(ndev, RES_CAT_CFN, NT_VIOLATING_MBR_CFN);
+ flow_nic_mark_resource_used(ndev, RES_QSL_RCP, NT_VIOLATING_MBR_QSL);
if (hw_db_inline_setup_mbr_filter(ndev, NT_VIOLATING_MBR_CFN,
NT_FLM_VIOLATING_MBR_FLOW_TYPE,
@@ -2533,6 +2623,10 @@ int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
hw_mod_cat_cot_flush(&ndev->be, 0, 1);
flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+ hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_PRESET_ALL, 0, 0);
hw_mod_slc_lr_rcp_flush(&ndev->be, 0, 1);
flow_nic_free_resource(ndev, RES_SLC_LR_RCP, 0);