@@ -654,6 +654,85 @@ struct tpe_func_s {
struct hw_mod_tpe_v3_s v3;
};
};
+enum hw_tpe_e {
+ /* functions */
+ HW_TPE_PRESET_ALL = 0,
+ HW_TPE_FIND,
+ HW_TPE_COMPARE,
+ /* Control fields */
+ HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+ HW_TPE_IFR_RCP_IPV4_EN,
+ HW_TPE_IFR_RCP_IPV4_DF_DROP,
+ HW_TPE_IFR_RCP_IPV6_EN,
+ HW_TPE_IFR_RCP_IPV6_DROP,
+ HW_TPE_IFR_RCP_MTU,
+ HW_TPE_INS_RCP_DYN,
+ HW_TPE_INS_RCP_OFS,
+ HW_TPE_INS_RCP_LEN,
+ HW_TPE_RPL_RCP_DYN,
+ HW_TPE_RPL_RCP_OFS,
+ HW_TPE_RPL_RCP_LEN,
+ HW_TPE_RPL_RCP_RPL_PTR,
+ HW_TPE_RPL_RCP_EXT_PRIO,
+ HW_TPE_RPL_RCP_ETH_TYPE_WR,
+ HW_TPE_RPL_EXT_RPL_PTR,
+ HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+ HW_TPE_RPL_RPL_VALUE,
+ HW_TPE_CPY_RCP_READER_SELECT,
+ HW_TPE_CPY_RCP_DYN,
+ HW_TPE_CPY_RCP_OFS,
+ HW_TPE_CPY_RCP_LEN,
+ HW_TPE_HFU_RCP_LEN_A_WR,
+ HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+ HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+ HW_TPE_HFU_RCP_LEN_B_WR,
+ HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+ HW_TPE_HFU_RCP_LEN_C_WR,
+ HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+ HW_TPE_HFU_RCP_TTL_WR,
+ HW_TPE_HFU_RCP_TTL_POS_DYN,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ HW_TPE_CSU_RCP_OUTER_L3_CMD,
+ HW_TPE_CSU_RCP_OUTER_L4_CMD,
+ HW_TPE_CSU_RCP_INNER_L3_CMD,
+ HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
enum debug_mode_e {
FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
@@ -813,6 +892,7 @@ struct flow_api_backend_s {
struct qsl_func_s qsl;
struct slc_lr_func_s slc_lr;
struct pdb_func_s pdb;
+ struct tpe_func_s tpe;
/* NIC attributes */
unsigned int num_phy_ports;
@@ -56,6 +56,7 @@ sources = files(
'nthw/flow_api/hw_mod/hw_mod_pdb.c',
'nthw/flow_api/hw_mod/hw_mod_qsl.c',
'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+ 'nthw/flow_api/hw_mod/hw_mod_tpe.c',
'nthw/flow_filter/flow_nthw_cat.c',
'nthw/flow_filter/flow_nthw_csu.c',
'nthw/flow_filter/flow_nthw_flm.c',
@@ -319,6 +319,15 @@ struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_b
if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
goto err_exit;
+ if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
+ goto err_exit;
+
+ if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
+ goto err_exit;
+
+ if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
+ goto err_exit;
+
if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
goto err_exit;
@@ -27,6 +27,7 @@ static const struct {
hw_mod_slc_lr_present
},
{ "PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset, hw_mod_pdb_present },
+ { "TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset, hw_mod_tpe_present },
};
#define MOD_COUNT (ARRAY_SIZE(module))
new file mode 100644
@@ -0,0 +1,277 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "hw_mod_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+ _VER_ = be->iface->get_tpe_version(be->be_dev);
+ NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_), VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_tpe_categories(be->be_dev);
+
+ if (nb <= 0)
+ return COUNT_ERROR(tpe_categories);
+
+ be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+
+ if (nb <= 0)
+ return COUNT_ERROR(tpe_ifr_categories);
+
+ be->tpe.nb_ifr_categories = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+
+ if (nb <= 0)
+ return COUNT_ERROR(tx_cpy_writers);
+
+ be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+
+ if (nb <= 0)
+ return COUNT_ERROR(tx_rpl_depth);
+
+ be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+
+ if (nb <= 0)
+ return COUNT_ERROR(tx_rpl_ext_categories);
+
+ be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 3:
+ if (!callocate_mod((struct common_func_s *)&be->tpe, 10, &be->tpe.v3.rpp_rcp,
+ be->tpe.nb_rcp_categories, sizeof(struct tpe_v1_rpp_v0_rcp_s),
+ &be->tpe.v3.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+ sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s), &be->tpe.v3.ifr_rcp,
+ be->tpe.nb_ifr_categories, sizeof(struct tpe_v2_ifr_v1_rcp_s),
+
+ &be->tpe.v3.ins_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_ins_v1_rcp_s),
+
+ &be->tpe.v3.rpl_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v3_rpl_v4_rcp_s), &be->tpe.v3.rpl_ext,
+ be->tpe.nb_rpl_ext_categories,
+ sizeof(struct tpe_v1_rpl_v2_ext_s), &be->tpe.v3.rpl_rpl,
+ be->tpe.nb_rpl_depth, sizeof(struct tpe_v1_rpl_v2_rpl_s),
+
+ &be->tpe.v3.cpy_rcp,
+ be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s),
+
+ &be->tpe.v3.hfu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s),
+
+ &be->tpe.v3.csu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_csu_v0_rcp_s)))
+ return -1;
+
+ break;
+
+ default:
+ return UNSUP_VER;
+ }
+
+ return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+ if (be->tpe.base) {
+ free(be->tpe.base);
+ be->tpe.base = NULL;
+ }
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+ int err = 0;
+
+ /* Zero entire cache area */
+ zero_module_cache((struct common_func_s *)(&be->tpe));
+
+ NT_LOG(DBG, FILTER, "INIT TPE\n");
+ err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+
+ return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_ifr_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_ifr_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rpl_ext_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rpl_depth;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ const uint32_t cpy_size = be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+
+ if (count == ALL_ENTRIES)
+ count = cpy_size;
+
+ if ((unsigned int)(start_idx + count) > cpy_size)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return INDEX_TOO_LARGE;
+
+ return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
+}