@@ -889,24 +889,40 @@ void hw_mod_tpe_free(struct flow_api_backend_s *be);
int hw_mod_tpe_reset(struct flow_api_backend_s *be);
int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t *value);
int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value);
enum debug_mode_e {
FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
@@ -169,6 +169,82 @@ int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.rpp_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index,
+ *value, be->tpe.nb_rcp_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_RPP_RCP_EXP:
+ GET_SET(be->tpe.v3.rpp_rcp[index].exp, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
/*
* IFR_RCP
*/
@@ -203,6 +279,90 @@ int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.ins_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_ins_v1_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index,
+ *value, be->tpe.nb_rcp_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_INS_RCP_DYN:
+ GET_SET(be->tpe.v3.ins_rcp[index].dyn, value);
+ break;
+
+ case HW_TPE_INS_RCP_OFS:
+ GET_SET(be->tpe.v3.ins_rcp[index].ofs, value);
+ break;
+
+ case HW_TPE_INS_RCP_LEN:
+ GET_SET(be->tpe.v3.ins_rcp[index].len, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
/*
* RPL_RCP
*/
@@ -220,6 +380,102 @@ int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.rpl_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v3_rpl_v4_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index,
+ *value, be->tpe.nb_rcp_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_RPL_RCP_DYN:
+ GET_SET(be->tpe.v3.rpl_rcp[index].dyn, value);
+ break;
+
+ case HW_TPE_RPL_RCP_OFS:
+ GET_SET(be->tpe.v3.rpl_rcp[index].ofs, value);
+ break;
+
+ case HW_TPE_RPL_RCP_LEN:
+ GET_SET(be->tpe.v3.rpl_rcp[index].len, value);
+ break;
+
+ case HW_TPE_RPL_RCP_RPL_PTR:
+ GET_SET(be->tpe.v3.rpl_rcp[index].rpl_ptr, value);
+ break;
+
+ case HW_TPE_RPL_RCP_EXT_PRIO:
+ GET_SET(be->tpe.v3.rpl_rcp[index].ext_prio, value);
+ break;
+
+ case HW_TPE_RPL_RCP_ETH_TYPE_WR:
+ GET_SET(be->tpe.v3.rpl_rcp[index].eth_type_wr, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
/*
* RPL_EXT
*/
@@ -237,6 +493,86 @@ int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rpl_ext_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.rpl_ext[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpl_v2_ext_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rpl_ext_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index,
+ *value, be->tpe.nb_rpl_ext_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rpl_ext_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index,
+ *value);
+ break;
+
+ case HW_TPE_RPL_EXT_RPL_PTR:
+ GET_SET(be->tpe.v3.rpl_ext[index].rpl_ptr, value);
+ break;
+
+ case HW_TPE_RPL_EXT_META_RPL_LEN:
+ GET_SET(be->tpe.v3.rpl_ext[index].meta_rpl_len, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
/*
* RPL_RPL
*/
@@ -254,6 +590,89 @@ int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rpl_depth) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.rpl_rpl[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rpl_depth) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index,
+ *value, be->tpe.nb_rpl_depth);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rpl_depth) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index,
+ *value);
+ break;
+
+ case HW_TPE_RPL_RPL_VALUE:
+ if (get)
+ memcpy(value, be->tpe.v3.rpl_rpl[index].value,
+ sizeof(uint32_t) * 4);
+
+ else
+ memcpy(be->tpe.v3.rpl_rpl[index].value, value,
+ sizeof(uint32_t) * 4);
+
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t *value)
+{
+ return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
/*
* CPY_RCP
*/
@@ -273,6 +692,96 @@ int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ const uint32_t cpy_size = be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+
+ if (index >= cpy_size) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.cpy_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= cpy_size) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index,
+ *value, cpy_size);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= cpy_size) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_CPY_RCP_READER_SELECT:
+ GET_SET(be->tpe.v3.cpy_rcp[index].reader_select, value);
+ break;
+
+ case HW_TPE_CPY_RCP_DYN:
+ GET_SET(be->tpe.v3.cpy_rcp[index].dyn, value);
+ break;
+
+ case HW_TPE_CPY_RCP_OFS:
+ GET_SET(be->tpe.v3.cpy_rcp[index].ofs, value);
+ break;
+
+ case HW_TPE_CPY_RCP_LEN:
+ GET_SET(be->tpe.v3.cpy_rcp[index].len, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
/*
* HFU_RCP
*/
@@ -290,6 +799,166 @@ int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.hfu_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index,
+ *value, be->tpe.nb_rcp_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_WR:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_wr, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_outer_l4_len, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_a_sub_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_WR:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_wr, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_b_sub_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_WR:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_wr, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_ofs, value);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].len_c_sub_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_TTL_WR:
+ GET_SET(be->tpe.v3.hfu_rcp[index].ttl_wr, value);
+ break;
+
+ case HW_TPE_HFU_RCP_TTL_POS_DYN:
+ GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_dyn, value);
+ break;
+
+ case HW_TPE_HFU_RCP_TTL_POS_OFS:
+ GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_ofs, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
/*
* CSU_RCP
*/
@@ -306,3 +975,91 @@ int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ uint32_t index, uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ switch (_VER_) {
+ case 3:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ memset(&be->tpe.v3.csu_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_csu_v0_rcp_s));
+ break;
+
+ case HW_TPE_FIND:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ FIND_EQUAL_INDEX(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index,
+ *value, be->tpe.nb_rcp_categories);
+ break;
+
+ case HW_TPE_COMPARE:
+ if (!get) {
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ if (*value >= be->tpe.nb_rcp_categories) {
+ INDEX_TOO_LARGE_LOG;
+ return INDEX_TOO_LARGE;
+ }
+
+ DO_COMPARE_INDEXS(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index,
+ *value);
+ break;
+
+ case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+ GET_SET(be->tpe.v3.csu_rcp[index].ol3_cmd, value);
+ break;
+
+ case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+ GET_SET(be->tpe.v3.csu_rcp[index].ol4_cmd, value);
+ break;
+
+ case HW_TPE_CSU_RCP_INNER_L3_CMD:
+ GET_SET(be->tpe.v3.csu_rcp[index].il3_cmd, value);
+ break;
+
+ case HW_TPE_CSU_RCP_INNER_L4_CMD:
+ GET_SET(be->tpe.v3.csu_rcp[index].il4_cmd, value);
+ break;
+
+ default:
+ UNSUP_FIELD_LOG;
+ return UNSUP_FIELD;
+ }
+
+ break;
+
+ default:
+ UNSUP_VER_LOG;
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+ uint32_t value)
+{
+ return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
@@ -30,6 +30,17 @@ struct hw_db_inline_resource_db {
int ref;
} *slc_lr;
+ struct hw_db_inline_resource_db_tpe {
+ struct hw_db_inline_tpe_data data;
+ int ref;
+ } *tpe;
+
+ struct hw_db_inline_resource_db_tpe_ext {
+ struct hw_db_inline_tpe_ext_data data;
+ int replace_ram_idx;
+ int ref;
+ } *tpe_ext;
+
struct hw_db_inline_resource_db_hsh {
struct hw_db_inline_hsh_data data;
int ref;
@@ -38,6 +49,8 @@ struct hw_db_inline_resource_db {
uint32_t nb_cot;
uint32_t nb_qsl;
uint32_t nb_slc_lr;
+ uint32_t nb_tpe;
+ uint32_t nb_tpe_ext;
uint32_t nb_hsh;
/* Items */
@@ -101,6 +114,22 @@ int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle)
return -1;
}
+ db->nb_tpe = ndev->be.tpe.nb_rcp_categories;
+ db->tpe = calloc(db->nb_tpe, sizeof(struct hw_db_inline_resource_db_tpe));
+
+ if (db->tpe == NULL) {
+ hw_db_inline_destroy(db);
+ return -1;
+ }
+
+ db->nb_tpe_ext = ndev->be.tpe.nb_rpl_ext_categories;
+ db->tpe_ext = calloc(db->nb_tpe_ext, sizeof(struct hw_db_inline_resource_db_tpe_ext));
+
+ if (db->tpe_ext == NULL) {
+ hw_db_inline_destroy(db);
+ return -1;
+ }
+
db->nb_cat = ndev->be.cat.nb_cat_funcs;
db->cat = calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_cat));
@@ -154,6 +183,8 @@ void hw_db_inline_destroy(void *db_handle)
free(db->cot);
free(db->qsl);
free(db->slc_lr);
+ free(db->tpe);
+ free(db->tpe_ext);
free(db->hsh);
free(db->cat);
@@ -195,6 +226,15 @@ void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct
*(struct hw_db_slc_lr_idx *)&idxs[i]);
break;
+ case HW_DB_IDX_TYPE_TPE:
+ hw_db_inline_tpe_deref(ndev, db_handle, *(struct hw_db_tpe_idx *)&idxs[i]);
+ break;
+
+ case HW_DB_IDX_TYPE_TPE_EXT:
+ hw_db_inline_tpe_ext_deref(ndev, db_handle,
+ *(struct hw_db_tpe_ext_idx *)&idxs[i]);
+ break;
+
case HW_DB_IDX_TYPE_KM_RCP:
hw_db_inline_km_deref(ndev, db_handle, *(struct hw_db_km_idx *)&idxs[i]);
break;
@@ -240,6 +280,12 @@ const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle,
case HW_DB_IDX_TYPE_SLC_LR:
return &db->slc_lr[idxs[i].ids].data;
+ case HW_DB_IDX_TYPE_TPE:
+ return &db->tpe[idxs[i].ids].data;
+
+ case HW_DB_IDX_TYPE_TPE_EXT:
+ return &db->tpe_ext[idxs[i].ids].data;
+
case HW_DB_IDX_TYPE_KM_RCP:
return &db->km[idxs[i].id1].data;
@@ -652,6 +698,333 @@ void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle,
}
}
+/******************************************************************************/
+/* TPE */
+/******************************************************************************/
+
+static int hw_db_inline_tpe_compare(const struct hw_db_inline_tpe_data *data1,
+ const struct hw_db_inline_tpe_data *data2)
+{
+ for (int i = 0; i < 6; ++i)
+ if (data1->writer[i].en != data2->writer[i].en ||
+ data1->writer[i].reader_select != data2->writer[i].reader_select ||
+ data1->writer[i].dyn != data2->writer[i].dyn ||
+ data1->writer[i].ofs != data2->writer[i].ofs ||
+ data1->writer[i].len != data2->writer[i].len)
+ return 0;
+
+ return data1->insert_len == data2->insert_len && data1->new_outer == data2->new_outer &&
+ data1->calc_eth_type_from_inner_ip == data2->calc_eth_type_from_inner_ip &&
+ data1->ttl_en == data2->ttl_en && data1->ttl_dyn == data2->ttl_dyn &&
+ data1->ttl_ofs == data2->ttl_ofs && data1->len_a_en == data2->len_a_en &&
+ data1->len_a_pos_dyn == data2->len_a_pos_dyn &&
+ data1->len_a_pos_ofs == data2->len_a_pos_ofs &&
+ data1->len_a_add_dyn == data2->len_a_add_dyn &&
+ data1->len_a_add_ofs == data2->len_a_add_ofs &&
+ data1->len_a_sub_dyn == data2->len_a_sub_dyn &&
+ data1->len_b_en == data2->len_b_en &&
+ data1->len_b_pos_dyn == data2->len_b_pos_dyn &&
+ data1->len_b_pos_ofs == data2->len_b_pos_ofs &&
+ data1->len_b_add_dyn == data2->len_b_add_dyn &&
+ data1->len_b_add_ofs == data2->len_b_add_ofs &&
+ data1->len_b_sub_dyn == data2->len_b_sub_dyn &&
+ data1->len_c_en == data2->len_c_en &&
+ data1->len_c_pos_dyn == data2->len_c_pos_dyn &&
+ data1->len_c_pos_ofs == data2->len_c_pos_ofs &&
+ data1->len_c_add_dyn == data2->len_c_add_dyn &&
+ data1->len_c_add_ofs == data2->len_c_add_ofs &&
+ data1->len_c_sub_dyn == data2->len_c_sub_dyn;
+}
+
+struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_tpe_data *data)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+ struct hw_db_tpe_idx idx = { .raw = 0 };
+ int found = 0;
+
+ idx.type = HW_DB_IDX_TYPE_TPE;
+
+ for (uint32_t i = 1; i < db->nb_tpe; ++i) {
+ int ref = db->tpe[i].ref;
+
+ if (ref > 0 && hw_db_inline_tpe_compare(data, &db->tpe[i].data)) {
+ idx.ids = i;
+ hw_db_inline_tpe_ref(ndev, db, idx);
+ return idx;
+ }
+
+ if (!found && ref <= 0) {
+ found = 1;
+ idx.ids = i;
+ }
+ }
+
+ if (!found) {
+ idx.error = 1;
+ return idx;
+ }
+
+ db->tpe[idx.ids].ref = 1;
+ memcpy(&db->tpe[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_data));
+
+ if (data->insert_len > 0) {
+ hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_RPP_RCP_EXP, idx.ids, data->insert_len);
+ hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_DYN, idx.ids, 1);
+ hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_OFS, idx.ids, 0);
+ hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_LEN, idx.ids, data->insert_len);
+ hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_DYN, idx.ids, 1);
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_OFS, idx.ids, 0);
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_LEN, idx.ids, data->insert_len);
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_RPL_PTR, idx.ids, 0);
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_EXT_PRIO, idx.ids, 1);
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_ETH_TYPE_WR, idx.ids,
+ data->calc_eth_type_from_inner_ip);
+ hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1);
+ }
+
+ for (uint32_t i = 0; i < 6; ++i) {
+ if (data->writer[i].en) {
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT,
+ idx.ids + db->nb_tpe * i,
+ data->writer[i].reader_select);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN,
+ idx.ids + db->nb_tpe * i, data->writer[i].dyn);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS,
+ idx.ids + db->nb_tpe * i, data->writer[i].ofs);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN,
+ idx.ids + db->nb_tpe * i, data->writer[i].len);
+
+ } else {
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT,
+ idx.ids + db->nb_tpe * i, 0);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN,
+ idx.ids + db->nb_tpe * i, 0);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS,
+ idx.ids + db->nb_tpe * i, 0);
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN,
+ idx.ids + db->nb_tpe * i, 0);
+ }
+
+ hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1);
+ }
+
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_WR, idx.ids, data->len_a_en);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN, idx.ids,
+ data->new_outer);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_DYN, idx.ids,
+ data->len_a_pos_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_OFS, idx.ids,
+ data->len_a_pos_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_DYN, idx.ids,
+ data->len_a_add_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_OFS, idx.ids,
+ data->len_a_add_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_SUB_DYN, idx.ids,
+ data->len_a_sub_dyn);
+
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_WR, idx.ids, data->len_b_en);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_DYN, idx.ids,
+ data->len_b_pos_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_OFS, idx.ids,
+ data->len_b_pos_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_DYN, idx.ids,
+ data->len_b_add_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_OFS, idx.ids,
+ data->len_b_add_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_SUB_DYN, idx.ids,
+ data->len_b_sub_dyn);
+
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_WR, idx.ids, data->len_c_en);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_DYN, idx.ids,
+ data->len_c_pos_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_OFS, idx.ids,
+ data->len_c_pos_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_DYN, idx.ids,
+ data->len_c_add_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_OFS, idx.ids,
+ data->len_c_add_ofs);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_SUB_DYN, idx.ids,
+ data->len_c_sub_dyn);
+
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_WR, idx.ids, data->ttl_en);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_DYN, idx.ids, data->ttl_dyn);
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_OFS, idx.ids, data->ttl_ofs);
+ hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L3_CMD, idx.ids, 3);
+ hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L4_CMD, idx.ids, 3);
+ hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L3_CMD, idx.ids, 3);
+ hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L4_CMD, idx.ids, 3);
+ hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1);
+
+ return idx;
+}
+
+void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx)
+{
+ (void)ndev;
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (!idx.error)
+ db->tpe[idx.ids].ref += 1;
+}
+
+void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (idx.error)
+ return;
+
+ db->tpe[idx.ids].ref -= 1;
+
+ if (db->tpe[idx.ids].ref <= 0) {
+ for (uint32_t i = 0; i < 6; ++i) {
+ hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_PRESET_ALL,
+ idx.ids + db->nb_tpe * i, 0);
+ hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1);
+ }
+
+ hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1);
+
+ hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1);
+
+ memset(&db->tpe[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_data));
+ db->tpe[idx.ids].ref = 0;
+ }
+}
+
+/******************************************************************************/
+/* TPE_EXT */
+/******************************************************************************/
+
+static int hw_db_inline_tpe_ext_compare(const struct hw_db_inline_tpe_ext_data *data1,
+ const struct hw_db_inline_tpe_ext_data *data2)
+{
+ return data1->size == data2->size &&
+ memcmp(data1->hdr8, data2->hdr8, HW_DB_INLINE_MAX_ENCAP_SIZE) == 0;
+}
+
+struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_tpe_ext_data *data)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+ struct hw_db_tpe_ext_idx idx = { .raw = 0 };
+ int rpl_rpl_length = ((int)data->size + 15) / 16;
+ int found = 0, rpl_rpl_index = 0;
+
+ idx.type = HW_DB_IDX_TYPE_TPE_EXT;
+
+ if (data->size > HW_DB_INLINE_MAX_ENCAP_SIZE) {
+ idx.error = 1;
+ return idx;
+ }
+
+ for (uint32_t i = 1; i < db->nb_tpe_ext; ++i) {
+ int ref = db->tpe_ext[i].ref;
+
+ if (ref > 0 && hw_db_inline_tpe_ext_compare(data, &db->tpe_ext[i].data)) {
+ idx.ids = i;
+ hw_db_inline_tpe_ext_ref(ndev, db, idx);
+ return idx;
+ }
+
+ if (!found && ref <= 0) {
+ found = 1;
+ idx.ids = i;
+ }
+ }
+
+ if (!found) {
+ idx.error = 1;
+ return idx;
+ }
+
+ rpl_rpl_index = flow_nic_alloc_resource_config(ndev, RES_TPE_RPL, rpl_rpl_length, 1);
+
+ if (rpl_rpl_index < 0) {
+ idx.error = 1;
+ return idx;
+ }
+
+ db->tpe_ext[idx.ids].ref = 1;
+ db->tpe_ext[idx.ids].replace_ram_idx = rpl_rpl_index;
+ memcpy(&db->tpe_ext[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_ext_data));
+
+ hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_RPL_PTR, idx.ids, rpl_rpl_index);
+ hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_META_RPL_LEN, idx.ids, data->size);
+ hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1);
+
+ for (int i = 0; i < rpl_rpl_length; ++i) {
+ uint32_t rpl_data[4];
+ memcpy(rpl_data, data->hdr32 + i * 4, sizeof(rpl_data));
+ hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i,
+ rpl_data);
+ }
+
+ hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length);
+
+ return idx;
+}
+
+void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_tpe_ext_idx idx)
+{
+ (void)ndev;
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (!idx.error)
+ db->tpe_ext[idx.ids].ref += 1;
+}
+
+void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_tpe_ext_idx idx)
+{
+ struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+ if (idx.error)
+ return;
+
+ db->tpe_ext[idx.ids].ref -= 1;
+
+ if (db->tpe_ext[idx.ids].ref <= 0) {
+ const int rpl_rpl_length = ((int)db->tpe_ext[idx.ids].data.size + 15) / 16;
+ const int rpl_rpl_index = db->tpe_ext[idx.ids].replace_ram_idx;
+
+ hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+ hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1);
+
+ for (int i = 0; i < rpl_rpl_length; ++i) {
+ uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+ hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i,
+ rpl_zero);
+ flow_nic_free_resource(ndev, RES_TPE_RPL, rpl_rpl_index + i);
+ }
+
+ hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length);
+
+ memset(&db->tpe_ext[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_ext_data));
+ db->tpe_ext[idx.ids].ref = 0;
+ }
+}
+
+
/******************************************************************************/
/* CAT */
/******************************************************************************/
@@ -52,6 +52,60 @@ struct hw_db_slc_lr_idx {
HW_DB_IDX;
};
+struct hw_db_inline_tpe_data {
+ uint32_t insert_len : 16;
+ uint32_t new_outer : 1;
+ uint32_t calc_eth_type_from_inner_ip : 1;
+ uint32_t ttl_en : 1;
+ uint32_t ttl_dyn : 5;
+ uint32_t ttl_ofs : 8;
+
+ struct {
+ uint32_t en : 1;
+ uint32_t reader_select : 3;
+ uint32_t dyn : 5;
+ uint32_t ofs : 14;
+ uint32_t len : 5;
+ uint32_t padding : 4;
+ } writer[6];
+
+ uint32_t len_a_en : 1;
+ uint32_t len_a_pos_dyn : 5;
+ uint32_t len_a_pos_ofs : 8;
+ uint32_t len_a_add_dyn : 5;
+ uint32_t len_a_add_ofs : 8;
+ uint32_t len_a_sub_dyn : 5;
+
+ uint32_t len_b_en : 1;
+ uint32_t len_b_pos_dyn : 5;
+ uint32_t len_b_pos_ofs : 8;
+ uint32_t len_b_add_dyn : 5;
+ uint32_t len_b_add_ofs : 8;
+ uint32_t len_b_sub_dyn : 5;
+
+ uint32_t len_c_en : 1;
+ uint32_t len_c_pos_dyn : 5;
+ uint32_t len_c_pos_ofs : 8;
+ uint32_t len_c_add_dyn : 5;
+ uint32_t len_c_add_ofs : 8;
+ uint32_t len_c_sub_dyn : 5;
+};
+
+struct hw_db_inline_tpe_ext_data {
+ uint32_t size;
+ union {
+ uint8_t hdr8[HW_DB_INLINE_MAX_ENCAP_SIZE];
+ uint32_t hdr32[(HW_DB_INLINE_MAX_ENCAP_SIZE + 3) / 4];
+ };
+};
+
+struct hw_db_tpe_idx {
+ HW_DB_IDX;
+};
+struct hw_db_tpe_ext_idx {
+ HW_DB_IDX;
+};
+
struct hw_db_km_idx {
HW_DB_IDX;
};
@@ -70,6 +124,9 @@ enum hw_db_idx_type {
HW_DB_IDX_TYPE_CAT,
HW_DB_IDX_TYPE_QSL,
HW_DB_IDX_TYPE_SLC_LR,
+ HW_DB_IDX_TYPE_TPE,
+ HW_DB_IDX_TYPE_TPE_EXT,
+
HW_DB_IDX_TYPE_KM_RCP,
HW_DB_IDX_TYPE_KM_FT,
HW_DB_IDX_TYPE_HSH,
@@ -138,6 +195,7 @@ struct hw_db_inline_action_set_data {
struct {
struct hw_db_cot_idx cot;
struct hw_db_qsl_idx qsl;
+ struct hw_db_tpe_idx tpe;
struct hw_db_hsh_idx hsh;
};
};
@@ -181,6 +239,18 @@ void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle,
void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle,
struct hw_db_slc_lr_idx idx);
+struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_tpe_data *data);
+void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx);
+void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx);
+
+struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle,
+ const struct hw_db_inline_tpe_ext_data *data);
+void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_tpe_ext_idx idx);
+void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle,
+ struct hw_db_tpe_ext_idx idx);
+
struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle,
const struct hw_db_inline_hsh_data *data);
void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx);
@@ -18,6 +18,8 @@
#include "ntnic_mod_reg.h"
#include <rte_common.h>
+#define NT_FLM_MISS_FLOW_TYPE 0
+#define NT_FLM_UNHANDLED_FLOW_TYPE 1
#define NT_FLM_OP_UNLEARN 0
#define NT_FLM_OP_LEARN 1
@@ -2419,6 +2421,92 @@ static int setup_flow_flm_actions(struct flow_eth_dev *dev,
}
}
+ /* Setup TPE EXT */
+ if (fd->tun_hdr.len > 0) {
+ assert(fd->tun_hdr.len <= HW_DB_INLINE_MAX_ENCAP_SIZE);
+
+ struct hw_db_inline_tpe_ext_data tpe_ext_data = {
+ .size = fd->tun_hdr.len,
+ };
+
+ memset(tpe_ext_data.hdr8, 0x0, HW_DB_INLINE_MAX_ENCAP_SIZE);
+ memcpy(tpe_ext_data.hdr8, fd->tun_hdr.d.hdr8, (fd->tun_hdr.len + 15) & ~15);
+
+ struct hw_db_tpe_ext_idx tpe_ext_idx =
+ hw_db_inline_tpe_ext_add(dev->ndev, dev->ndev->hw_db_handle,
+ &tpe_ext_data);
+ local_idxs[(*local_idx_counter)++] = tpe_ext_idx.raw;
+
+ if (tpe_ext_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference TPE EXT resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return -1;
+ }
+
+ if (flm_rpl_ext_ptr)
+ *flm_rpl_ext_ptr = tpe_ext_idx.ids;
+ }
+
+ /* Setup TPE */
+ assert(fd->modify_field_count <= 6);
+
+ struct hw_db_inline_tpe_data tpe_data = {
+ .insert_len = fd->tun_hdr.len,
+ .new_outer = fd->tun_hdr.new_outer,
+ .calc_eth_type_from_inner_ip =
+ !fd->tun_hdr.new_outer && fd->header_strip_end_dyn == DYN_TUN_L3,
+ .ttl_en = fd->ttl_sub_enable,
+ .ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3,
+ .ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7,
+ };
+
+ for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+ tpe_data.writer[i].en = 1;
+ tpe_data.writer[i].reader_select = fd->modify_field[i].select;
+ tpe_data.writer[i].dyn = fd->modify_field[i].dyn;
+ tpe_data.writer[i].ofs = fd->modify_field[i].ofs;
+ tpe_data.writer[i].len = fd->modify_field[i].len;
+ }
+
+ if (fd->tun_hdr.new_outer) {
+ const int fcs_length = 4;
+
+ /* L4 length */
+ tpe_data.len_a_en = 1;
+ tpe_data.len_a_pos_dyn = DYN_L4;
+ tpe_data.len_a_pos_ofs = 4;
+ tpe_data.len_a_add_dyn = 18;
+ tpe_data.len_a_add_ofs = (uint32_t)(-fcs_length) & 0xff;
+ tpe_data.len_a_sub_dyn = DYN_L4;
+
+ /* L3 length */
+ tpe_data.len_b_en = 1;
+ tpe_data.len_b_pos_dyn = DYN_L3;
+ tpe_data.len_b_pos_ofs = fd->tun_hdr.ip_version == 4 ? 2 : 4;
+ tpe_data.len_b_add_dyn = 18;
+ tpe_data.len_b_add_ofs = (uint32_t)(-fcs_length) & 0xff;
+ tpe_data.len_b_sub_dyn = DYN_L3;
+
+ /* GTP length */
+ tpe_data.len_c_en = 1;
+ tpe_data.len_c_pos_dyn = DYN_L4_PAYLOAD;
+ tpe_data.len_c_pos_ofs = 2;
+ tpe_data.len_c_add_dyn = 18;
+ tpe_data.len_c_add_ofs = (uint32_t)(-8 - fcs_length) & 0xff;
+ tpe_data.len_c_sub_dyn = DYN_L4_PAYLOAD;
+ }
+
+ struct hw_db_tpe_idx tpe_idx =
+ hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle, &tpe_data);
+
+ local_idxs[(*local_idx_counter)++] = tpe_idx.raw;
+
+ if (tpe_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference TPE resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return -1;
+ }
+
return 0;
}
@@ -2539,6 +2627,30 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
goto error_out;
}
+
+ /* Setup TPE */
+ if (fd->ttl_sub_enable) {
+ struct hw_db_inline_tpe_data tpe_data = {
+ .insert_len = fd->tun_hdr.len,
+ .new_outer = fd->tun_hdr.new_outer,
+ .calc_eth_type_from_inner_ip = !fd->tun_hdr.new_outer &&
+ fd->header_strip_end_dyn == DYN_TUN_L3,
+ .ttl_en = fd->ttl_sub_enable,
+ .ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3,
+ .ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7,
+ };
+ struct hw_db_tpe_idx tpe_idx =
+ hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle,
+ &tpe_data);
+ fh->db_idxs[fh->db_idx_counter++] = tpe_idx.raw;
+ action_set_data.tpe = tpe_idx;
+
+ if (tpe_idx.error) {
+ NT_LOG(ERR, FILTER, "Could not reference TPE resource");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ goto error_out;
+ }
+ }
}
/* Setup CAT */
@@ -2843,6 +2955,16 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
if (!ndev->flow_mgnt_prepared) {
/* Check static arrays are big enough */
assert(ndev->be.tpe.nb_cpy_writers <= MAX_CPY_WRITERS_SUPPORTED);
+ /* KM Flow Type 0 is reserved */
+ flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+ flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+ /* Reserved FLM Flow Types */
+ flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_MISS_FLOW_TYPE);
+ flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_UNHANDLED_FLOW_TYPE);
+ flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE,
+ NT_FLM_VIOLATING_MBR_FLOW_TYPE);
+ flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
/* COT is locked to CFN. Don't set color for CFN 0 */
hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
@@ -2868,8 +2990,11 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
- /* SLC LR index 0 is reserved */
+ /* SLC LR & TPE index 0 were reserved */
flow_nic_mark_resource_used(ndev, RES_SLC_LR_RCP, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0
*/