@@ -16,6 +16,7 @@
#include "hw_mod_pdb_v9.h"
#include "hw_mod_slc_lr_v2.h"
#include "hw_mod_hsh_v5.h"
+#include "hw_mod_tpe_v3.h"
#define MAX_PHYS_ADAPTERS 8
@@ -114,6 +115,18 @@ struct pdb_func_s {
};
};
+struct tpe_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_rcp_categories;
+ uint32_t nb_ifr_categories;
+ uint32_t nb_cpy_writers;
+ uint32_t nb_rpl_depth;
+ uint32_t nb_rpl_ext_categories;
+ union {
+ struct hw_mod_tpe_v3_s v3;
+ };
+};
+
enum debug_mode_e {
FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
@@ -244,6 +257,20 @@ struct flow_api_backend_ops {
uint32_t (*get_pdb_version)(void *dev);
int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb, int category, int cnt);
int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+ /* TPE */
+ bool (*get_tpe_present)(void *dev);
+ uint32_t (*get_tpe_version)(void *dev);
+ int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
+ int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe, int index, int cnt);
};
struct flow_api_backend_s {
new file mode 100644
@@ -0,0 +1,126 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V3_H_
+#define _HW_MOD_TPE_V3_H_
+
+#include <stdint.h>
+
+struct tpe_v1_rpp_v0_rcp_s {
+ uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+};
+
+struct tpe_v3_rpl_v4_rcp_s {
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+ uint32_t rpl_ptr;
+ uint32_t ext_prio;
+ uint32_t eth_type_wr;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+ uint32_t rpl_ptr;
+ uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+ uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+ uint32_t reader_select;
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+ uint32_t len_a_wr;
+ uint32_t len_a_outer_l4_len;
+ uint32_t len_a_pos_dyn;
+ uint32_t len_a_pos_ofs;
+ uint32_t len_a_add_dyn;
+ uint32_t len_a_add_ofs;
+ uint32_t len_a_sub_dyn;
+
+ uint32_t len_b_wr;
+ uint32_t len_b_pos_dyn;
+ uint32_t len_b_pos_ofs;
+ uint32_t len_b_add_dyn;
+ uint32_t len_b_add_ofs;
+ uint32_t len_b_sub_dyn;
+
+ uint32_t len_c_wr;
+ uint32_t len_c_pos_dyn;
+ uint32_t len_c_pos_ofs;
+ uint32_t len_c_add_dyn;
+ uint32_t len_c_add_ofs;
+ uint32_t len_c_sub_dyn;
+
+ uint32_t ttl_wr;
+ uint32_t ttl_pos_dyn;
+ uint32_t ttl_pos_ofs;
+
+ uint32_t cs_inf;
+ uint32_t l3_prt;
+ uint32_t l3_frag;
+ uint32_t tunnel;
+ uint32_t l4_prt;
+ uint32_t outer_l3_ofs;
+ uint32_t outer_l4_ofs;
+ uint32_t inner_l3_ofs;
+ uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+ uint32_t ol3_cmd;
+ uint32_t ol4_cmd;
+ uint32_t il3_cmd;
+ uint32_t il4_cmd;
+};
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+ uint32_t ipv4_en;
+ uint32_t ipv4_df_drop;
+ uint32_t ipv6_en;
+ uint32_t ipv6_drop;
+ uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+ uint32_t ipv4_en;
+ uint32_t ipv4_df_drop;
+ uint32_t ipv6_en;
+ uint32_t ipv6_drop;
+ uint32_t mtu;
+};
+
+struct hw_mod_tpe_v3_s {
+ struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+ struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+ struct tpe_v3_rpl_v4_rcp_s *rpl_rcp;
+ struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+ struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+ struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+ struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+ struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+ struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+ struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V3_H_ */
@@ -1576,6 +1576,357 @@ static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
return 0;
}
+/*
+ * TPE
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL && be->p_rpp_lr_nthw != NULL &&
+ be->p_tx_cpy_nthw != NULL && be->p_tx_ins_nthw != NULL &&
+ be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ const uint32_t csu_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+ (nthw_module_get_minor_version(be->p_csu_nthw->m_csu) & 0xffff));
+
+ const uint32_t hfu_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+ (nthw_module_get_minor_version(be->p_hfu_nthw->m_hfu) & 0xffff));
+
+ const uint32_t rpp_lr_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr) << 16) |
+ (nthw_module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) & 0xffff));
+
+ const uint32_t tx_cpy_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy) << 16) |
+ (nthw_module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) & 0xffff));
+
+ const uint32_t tx_ins_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_tx_ins_nthw->m_tx_ins) << 16) |
+ (nthw_module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) & 0xffff));
+
+ const uint32_t tx_rpl_version =
+ (uint32_t)((nthw_module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl) << 16) |
+ (nthw_module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) & 0xffff));
+
+ /*
+ * we have to support 9563-55-28 and 9563-55-30
+ * so check for INS ver 0.1 and RPL ver 0.2 or for INS ver 0.2 and RPL ver 0.4
+ */
+ if (csu_version == 0 && hfu_version == 2 && rpp_lr_version >= 1 && tx_cpy_version == 2 &&
+ ((tx_ins_version == 1 && tx_rpl_version == 2) ||
+ (tx_ins_version == 2 && tx_rpl_version == 4))) {
+ return 3;
+ }
+
+ if (csu_version == 0 && hfu_version == 2 && rpp_lr_version >= 1 && tx_cpy_version == 4 &&
+ ((tx_ins_version == 1 && tx_rpl_version == 2) ||
+ (tx_ins_version == 2 && tx_rpl_version == 4))) {
+ return 3;
+ }
+
+ assert(false);
+ return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+ if (rpp_lr->ver >= 1) {
+ rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+ rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw, rpp_lr->v3.rpp_rcp[index + i].exp);
+ rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+ return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr, int index, int cnt)
+{
+ int res = 0;
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+ if (rpp_lr->ver >= 2) {
+ rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+ rpp_lr_nthw_ifr_rcp_ipv4_en(be->p_rpp_lr_nthw,
+ rpp_lr->v3.rpp_ifr_rcp[index + i].ipv4_en);
+ rpp_lr_nthw_ifr_rcp_ipv4_df_drop(be->p_rpp_lr_nthw,
+ rpp_lr->v3.rpp_ifr_rcp[index + i]
+ .ipv4_df_drop);
+ rpp_lr_nthw_ifr_rcp_ipv6_en(be->p_rpp_lr_nthw,
+ rpp_lr->v3.rpp_ifr_rcp[index + i].ipv6_en);
+ rpp_lr_nthw_ifr_rcp_ipv6_drop(be->p_rpp_lr_nthw,
+ rpp_lr->v3.rpp_ifr_rcp[index + i].ipv6_drop);
+ rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+ rpp_lr->v3.rpp_ifr_rcp[index + i].mtu);
+ rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+ }
+
+ } else {
+ res = -1;
+ }
+
+ CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+ return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr, int index, int cnt)
+{
+ int res = 0;
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+ if (ifr->ver >= 2) {
+ ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+ ifr_nthw_rcp_ipv4_en(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].ipv4_en);
+ ifr_nthw_rcp_ipv4_df_drop(be->p_ifr_nthw,
+ ifr->v3.ifr_rcp[index + i].ipv4_df_drop);
+ ifr_nthw_rcp_ipv6_en(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].ipv6_en);
+ ifr_nthw_rcp_ipv6_drop(be->p_ifr_nthw,
+ ifr->v3.ifr_rcp[index + i].ipv6_drop);
+ ifr_nthw_rcp_mtu(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].mtu);
+ ifr_nthw_rcp_flush(be->p_ifr_nthw);
+ }
+
+ } else {
+ res = -1;
+ }
+
+ CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+ return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+ if (tx_ins->ver >= 1) {
+ tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+ tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].dyn);
+ tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].ofs);
+ tx_ins_nthw_rcp_len(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].len);
+ tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+ return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].dyn);
+ tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].ofs);
+ tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].len);
+ tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+ tx_rpl->v3.rpl_rcp[index + i].rpl_ptr);
+ tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+ tx_rpl->v3.rpl_rcp[index + i].ext_prio);
+
+ if (tx_rpl->ver >= 3) {
+ tx_rpl_nthw_rcp_eth_type_wr(be->p_tx_rpl_nthw,
+ tx_rpl->v3.rpl_rcp[index + i]
+ .eth_type_wr);
+ }
+
+ tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+ tx_rpl->v3.rpl_ext[index + i].rpl_ptr);
+ tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+ tx_rpl->v3.rpl_rpl[index + i].value);
+ tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ unsigned int wr_index = -1;
+
+ CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+ if (tx_cpy->ver >= 1) {
+ for (int i = 0; i < cnt; i++) {
+ if (wr_index != (index + i) / tx_cpy->nb_rcp_categories) {
+ wr_index = (index + i) / tx_cpy->nb_rcp_categories;
+ tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index, 1);
+ }
+
+ tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+ (index + i) % tx_cpy->nb_rcp_categories);
+ tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v3.cpy_rcp[index + i]
+ .reader_select);
+ tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v3.cpy_rcp[index + i].dyn);
+ tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v3.cpy_rcp[index + i].ofs);
+ tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v3.cpy_rcp[index + i].len);
+ tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+ }
+ }
+
+ CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+ return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+ if (hfu->ver >= 1) {
+ hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+ hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_a_wr);
+ hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_outer_l4_len);
+ hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_pos_dyn);
+ hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_pos_ofs);
+ hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_add_dyn);
+ hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_add_ofs);
+ hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_a_sub_dyn);
+ hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_b_wr);
+ hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_b_pos_dyn);
+ hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_b_pos_ofs);
+ hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_b_add_dyn);
+ hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_b_add_ofs);
+ hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_b_sub_dyn);
+ hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_c_wr);
+ hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_c_pos_dyn);
+ hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_c_pos_ofs);
+ hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_c_add_dyn);
+ hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_c_add_ofs);
+ hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].len_c_sub_dyn);
+ hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].ttl_wr);
+ hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].ttl_pos_dyn);
+ hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+ hfu->v3.hfu_rcp[index + i].ttl_pos_ofs);
+ hfu_nthw_rcp_flush(be->p_hfu_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+ return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+ if (csu->ver >= 1) {
+ csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+
+ for (int i = 0; i < cnt; i++) {
+ csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+ csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+ csu->v3.csu_rcp[index + i].ol3_cmd);
+ csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+ csu->v3.csu_rcp[index + i].ol4_cmd);
+ csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+ csu->v3.csu_rcp[index + i].il3_cmd);
+ csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+ csu->v3.csu_rcp[index + i].il4_cmd);
+ csu_nthw_rcp_flush(be->p_csu_nthw);
+ }
+ }
+
+ CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+ return 0;
+}
+
/*
* DBS
*/
@@ -1705,6 +2056,19 @@ const struct flow_api_backend_ops flow_be_iface = {
pdb_get_version,
pdb_rcp_flush,
pdb_config_flush,
+
+ tpe_get_present,
+ tpe_get_version,
+ tpe_rpp_rcp_flush,
+ tpe_rpp_ifr_rcp_flush,
+ tpe_ifr_rcp_flush,
+ tpe_ins_rcp_flush,
+ tpe_rpl_rcp_flush,
+ tpe_rpl_ext_flush,
+ tpe_rpl_rpl_flush,
+ tpe_cpy_rcp_flush,
+ tpe_hfu_rcp_flush,
+ tpe_csu_rcp_flush,
};
const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
@@ -12,6 +12,11 @@
#include "flow_nthw_csu.h"
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_csu, n_debug_mode);
+}
+
struct csu_nthw *csu_nthw_new(void)
{
struct csu_nthw *p = malloc(sizeof(struct csu_nthw));
@@ -60,3 +65,77 @@ int csu_nthw_init(struct csu_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_ctrl_adr, val);
+}
+
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+ /*
+ * Select L3 calc method for outer layer3.
+ * 0: Do not touch checksum field.
+ * 1: Check, but do not touch checksum field.
+ * 2: Insert checksum header value for BAD checksum.
+ * 3: Insert checksum header value for GOOD checksum.
+ */
+ nthw_field_set_val32(p->mp_rcp_data_ol3_cmd, val);
+}
+
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+ /*
+ * Select L4 calc method for outer layer4.
+ * 0: Do not touch checksum field.
+ * 1: Check, but do not touch checksum field.
+ * 2: Insert checksum header value for BAD checksum.
+ * 3: Insert checksum header value for GOOD checksum.
+ * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+ * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+ * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+ * otherwise GOOD checksum. 7: Set UDP checksum value of ZERO for outer tunnel when tunnel
+ * is IPv4 and UDP, otherwise GOOD checksum.
+ */
+ nthw_field_set_val32(p->mp_rcp_data_ol4_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val)
+{
+ /*
+ * Select L3 calc method for inner layer3 (tunneled).
+ * 0: Do not touch checksum field.
+ * 1: Check, but do not touch checksum field.
+ * 2: Insert checksum header value for BAD checksum.
+ * 3: Insert checksum header value for GOOD checksum.
+ */
+ nthw_field_set_val32(p->mp_rcp_data_il3_cmd, val);
+}
+
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val)
+{
+ /*
+ * Select L4 calc method for inner layer4 (tunneled).
+ * 0: Do not touch checksum field.
+ * 1: Check, but do not touch checksum field.
+ * 2: Insert checksum header value for BAD checksum.
+ * 3: Insert checksum header value for GOOD checksum.
+ * 4: Set UDP checksum value of ZERO for both IPv4/IPv6, set good checksum for TCP.
+ * 5: Set UDP checksum value of ZERO for IPv4, set good checksum for TCP.
+ * 6: Set UDP checksum value of ZERO for outer tunnel when tunnel is IPv4/IPv6 and UDP,
+ * otherwise GOOD checksum. 7: Set UDP checksum value of ZERO for outer tunnel when tunnel
+ * is IPv4 and UDP, otherwise GOOD checksum.
+ */
+ nthw_field_set_val32(p->mp_rcp_data_il4_cmd, val);
+}
+
+void csu_nthw_rcp_flush(const struct csu_nthw *p)
+{
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
@@ -31,5 +31,14 @@ void csu_nthw_delete(struct csu_nthw *p);
int csu_nthw_init(struct csu_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int csu_nthw_setup(struct csu_nthw *p, int n_idx, int n_idx_cnt);
+void csu_nthw_set_debug_mode(struct csu_nthw *p, unsigned int n_debug_mode);
+
+void csu_nthw_rcp_select(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_cnt(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_outer_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l3_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_inner_l4_cmd(const struct csu_nthw *p, uint32_t val);
+void csu_nthw_rcp_flush(const struct csu_nthw *p);
#endif /* _FLOW_NTHW_CSU_H_ */
@@ -12,6 +12,11 @@
#include "flow_nthw_hfu.h"
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_hfu, n_debug_mode);
+}
+
struct hfu_nthw *hfu_nthw_new(void)
{
struct hfu_nthw *p = malloc(sizeof(struct hfu_nthw));
@@ -97,3 +102,129 @@ int hfu_nthw_init(struct hfu_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_addr, val);
+}
+
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_wr, val);
+}
+
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_ol4len, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_a_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_wr, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_b_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_wr, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_add_dyn, val);
+}
+
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_add_ofs, val);
+}
+
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len_c_sub_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ttl_wr, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ttl_pos_dyn, val);
+}
+
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ttl_pos_ofs, val);
+}
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p)
+{
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
@@ -50,5 +50,35 @@ void hfu_nthw_delete(struct hfu_nthw *p);
int hfu_nthw_init(struct hfu_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int hfu_nthw_setup(struct hfu_nthw *p, int n_idx, int n_idx_cnt);
+void hfu_nthw_set_debug_mode(struct hfu_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void hfu_nthw_rcp_select(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_cnt(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_len_a_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_ol4len(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_a_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_b_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_add_ofs(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_len_c_sub_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_wr(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_dyn(const struct hfu_nthw *p, uint32_t val);
+void hfu_nthw_rcp_ttl_pos_ofs(const struct hfu_nthw *p, uint32_t val);
+
+void hfu_nthw_rcp_flush(const struct hfu_nthw *p);
#endif /* __FLOW_NTHW_HFU_H__ */
@@ -12,6 +12,11 @@
#include "flow_nthw_ifr.h"
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_ifr, n_debug_mode);
+}
+
struct ifr_nthw *ifr_nthw_new(void)
{
struct ifr_nthw *p = malloc(sizeof(struct ifr_nthw));
@@ -66,3 +71,53 @@ int ifr_nthw_init(struct ifr_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_addr);
+ nthw_field_set_val32(p->mp_rcp_addr, val);
+}
+
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_cnt);
+ nthw_field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void ifr_nthw_rcp_ipv4_en(const struct ifr_nthw *p, uint32_t val)
+{
+ if (p->mp_rcp_data_ipv4_en)
+ nthw_field_set_val32(p->mp_rcp_data_ipv4_en, val);
+}
+
+void ifr_nthw_rcp_ipv4_df_drop(const struct ifr_nthw *p, uint32_t val)
+{
+ if (p->mp_rcp_data_ipv4_df_drop)
+ nthw_field_set_val32(p->mp_rcp_data_ipv4_df_drop, val);
+}
+
+void ifr_nthw_rcp_ipv6_en(const struct ifr_nthw *p, uint32_t val)
+{
+ if (p->mp_rcp_data_ipv6_en)
+ nthw_field_set_val32(p->mp_rcp_data_ipv6_en, val);
+}
+
+void ifr_nthw_rcp_ipv6_drop(const struct ifr_nthw *p, uint32_t val)
+{
+ if (p->mp_rcp_data_ipv6_drop)
+ nthw_field_set_val32(p->mp_rcp_data_ipv6_drop, val);
+}
+
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_data_mtu);
+ nthw_field_set_val32(p->mp_rcp_data_mtu, val);
+}
+
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p)
+{
+ assert(p->mp_rcp_ctrl);
+ assert(p->mp_rcp_data);
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
@@ -39,5 +39,16 @@ struct ifr_nthw *ifr_nthw_new(void);
int ifr_nthw_init(struct ifr_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int ifr_nthw_setup(struct ifr_nthw *p, int n_idx, int n_idx_cnt);
+void ifr_nthw_set_debug_mode(struct ifr_nthw *p, unsigned int n_debug_mode);
+
+/* IFR */
+void ifr_nthw_rcp_select(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_cnt(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_ipv4_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_ipv4_df_drop(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_ipv6_en(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_ipv6_drop(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_mtu(const struct ifr_nthw *p, uint32_t val);
+void ifr_nthw_rcp_flush(const struct ifr_nthw *p);
#endif /* __FLOW_NTHW_IFR_H__ */
@@ -12,6 +12,11 @@
#include "flow_nthw_rpp_lr.h"
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_rpp_lr, n_debug_mode);
+}
+
struct rpp_lr_nthw *rpp_lr_nthw_new(void)
{
struct rpp_lr_nthw *p = malloc(sizeof(struct rpp_lr_nthw));
@@ -74,3 +79,79 @@ int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_addr);
+ nthw_field_set_val32(p->mp_rcp_addr, val);
+}
+
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_cnt);
+ nthw_field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_data_exp);
+ nthw_field_set_val32(p->mp_rcp_data_exp, val);
+}
+
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p)
+{
+ assert(p->mp_rcp_ctrl);
+ assert(p->mp_rcp_data);
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
+
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_ifr_rcp_addr);
+ nthw_field_set_val32(p->mp_ifr_rcp_addr, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_ifr_rcp_cnt);
+ nthw_field_set_val32(p->mp_ifr_rcp_cnt, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_ipv4_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ if (p->mp_ifr_rcp_data_ipv4_en)
+ nthw_field_set_val32(p->mp_ifr_rcp_data_ipv4_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_ipv4_df_drop(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ if (p->mp_ifr_rcp_data_ipv4_df_drop)
+ nthw_field_set_val32(p->mp_ifr_rcp_data_ipv4_df_drop, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_ipv6_en(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ if (p->mp_ifr_rcp_data_ipv6_en)
+ nthw_field_set_val32(p->mp_ifr_rcp_data_ipv6_en, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_ipv6_drop(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ if (p->mp_ifr_rcp_data_ipv6_drop)
+ nthw_field_set_val32(p->mp_ifr_rcp_data_ipv6_drop, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val)
+{
+ assert(p->mp_ifr_rcp_data_mtu);
+ nthw_field_set_val32(p->mp_ifr_rcp_data_mtu, val);
+}
+
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p)
+{
+ assert(p->mp_ifr_rcp_ctrl);
+ assert(p->mp_ifr_rcp_data);
+ nthw_register_flush(p->mp_ifr_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_ifr_rcp_data, 1);
+}
@@ -40,5 +40,22 @@ void rpp_lr_nthw_delete(struct rpp_lr_nthw *p);
int rpp_lr_nthw_init(struct rpp_lr_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int rpp_lr_nthw_setup(struct rpp_lr_nthw *p, int n_idx, int n_idx_cnt);
+void rpp_lr_nthw_set_debug_mode(struct rpp_lr_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void rpp_lr_nthw_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_exp(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_rcp_flush(const struct rpp_lr_nthw *p);
+
+/* RCP IFR */
+void rpp_lr_nthw_ifr_rcp_select(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_cnt(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_ipv4_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_ipv4_df_drop(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_ipv6_en(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_ipv6_drop(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_mtu(const struct rpp_lr_nthw *p, uint32_t val);
+void rpp_lr_nthw_ifr_rcp_flush(const struct rpp_lr_nthw *p);
#endif /* __FLOW_NTHW_RPP_LR_H__ */
@@ -12,6 +12,11 @@
#include "flow_nthw_tx_cpy.h"
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_tx_cpy, n_debug_mode);
+}
+
struct tx_cpy_nthw *tx_cpy_nthw_new(void)
{
struct tx_cpy_nthw *p = malloc(sizeof(struct tx_cpy_nthw));
@@ -337,3 +342,47 @@ int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_ctrl_addr, val);
+}
+
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_ctrl_cnt, val);
+}
+
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+ uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_data_reader_select, val);
+}
+
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_data_dyn, val);
+}
+
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_data_ofs, val);
+}
+
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_field_set_val32(p->m_writers[index].mp_writer_data_len, val);
+}
+
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index)
+{
+ assert(index < p->m_writers_cnt);
+ nthw_register_flush(p->m_writers[index].mp_writer_ctrl, 1);
+ nthw_register_flush(p->m_writers[index].mp_writer_data, 1);
+}
@@ -45,5 +45,15 @@ void tx_cpy_nthw_delete(struct tx_cpy_nthw *p);
int tx_cpy_nthw_init(struct tx_cpy_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int tx_cpy_nthw_setup(struct tx_cpy_nthw *p, int n_idx, int n_idx_cnt);
+void tx_cpy_nthw_set_debug_mode(struct tx_cpy_nthw *p, unsigned int n_debug_mode);
+
+void tx_cpy_nthw_writer_select(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val);
+void tx_cpy_nthw_writer_cnt(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val);
+void tx_cpy_nthw_writer_reader_select(const struct tx_cpy_nthw *p, unsigned int index,
+ uint32_t val);
+void tx_cpy_nthw_writer_dyn(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val);
+void tx_cpy_nthw_writer_ofs(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val);
+void tx_cpy_nthw_writer_len(const struct tx_cpy_nthw *p, unsigned int index, uint32_t val);
+void tx_cpy_nthw_writer_flush(const struct tx_cpy_nthw *p, unsigned int index);
#endif /* __FLOW_NTHW_TX_CPY_H__ */
@@ -12,6 +12,11 @@
#include "flow_nthw_tx_ins.h"
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_tx_ins, n_debug_mode);
+}
+
struct tx_ins_nthw *tx_ins_nthw_new(void)
{
struct tx_ins_nthw *p = malloc(sizeof(struct tx_ins_nthw));
@@ -60,3 +65,34 @@ int tx_ins_nthw_init(struct tx_ins_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_addr, val);
+}
+
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_cnt, val);
+}
+
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p)
+{
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
@@ -31,5 +31,14 @@ void tx_ins_nthw_delete(struct tx_ins_nthw *p);
int tx_ins_nthw_init(struct tx_ins_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int tx_ins_nthw_setup(struct tx_ins_nthw *p, int n_idx, int n_idx_cnt);
+void tx_ins_nthw_set_debug_mode(struct tx_ins_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_ins_nthw_rcp_select(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_cnt(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_dyn(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_ofs(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_len(const struct tx_ins_nthw *p, uint32_t val);
+void tx_ins_nthw_rcp_flush(const struct tx_ins_nthw *p);
#endif /* __FLOW_NTHW_TX_INS_H__ */
@@ -12,6 +12,11 @@
#include "flow_nthw_tx_rpl.h"
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode)
+{
+ nthw_module_set_debug_mode(p->m_tx_rpl, n_debug_mode);
+}
+
struct tx_rpl_nthw *tx_rpl_nthw_new(void)
{
struct tx_rpl_nthw *p = malloc(sizeof(struct tx_rpl_nthw));
@@ -76,3 +81,92 @@ int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nthw_fpga_t *p_fpga, int n_instance)
return 0;
}
+
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_dyn, val);
+}
+
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ofs, val);
+}
+
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_len, val);
+}
+
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rcp_data_ext_prio, val);
+}
+
+void tx_rpl_nthw_rcp_eth_type_wr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ assert(p->mp_rcp_data_eth_type_wr);
+ nthw_field_set_val32(p->mp_rcp_data_eth_type_wr, val);
+}
+
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p)
+{
+ nthw_register_flush(p->mp_rcp_ctrl, 1);
+ nthw_register_flush(p->mp_rcp_data, 1);
+}
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_ext_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_ext_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_ext_data_rpl_ptr, val);
+}
+
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p)
+{
+ nthw_register_flush(p->mp_ext_ctrl, 1);
+ nthw_register_flush(p->mp_ext_data, 1);
+}
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rpl_ctrl_addr, val);
+}
+
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val)
+{
+ nthw_field_set_val32(p->mp_rpl_ctrl_cnt, val);
+}
+
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val)
+{
+ nthw_field_set_val(p->mp_rpl_data_value, val, 4);
+}
+
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p)
+{
+ nthw_register_flush(p->mp_rpl_ctrl, 1);
+ nthw_register_flush(p->mp_rpl_data, 1);
+}
@@ -48,5 +48,27 @@ void tx_rpl_nthw_delete(struct tx_rpl_nthw *p);
int tx_rpl_nthw_init(struct tx_rpl_nthw *p, nthw_fpga_t *p_fpga, int n_instance);
int tx_rpl_nthw_setup(struct tx_rpl_nthw *p, int n_idx, int n_idx_cnt);
+void tx_rpl_nthw_set_debug_mode(struct tx_rpl_nthw *p, unsigned int n_debug_mode);
+
+/* RCP */
+void tx_rpl_nthw_rcp_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_dyn(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ofs(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_len(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_ext_prio(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_eth_type_wr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rcp_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_ext_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_rpl_ptr(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_ext_flush(const struct tx_rpl_nthw *p);
+
+void tx_rpl_nthw_rpl_select(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_cnt(const struct tx_rpl_nthw *p, uint32_t val);
+void tx_rpl_nthw_rpl_value(const struct tx_rpl_nthw *p, const uint32_t *val);
+void tx_rpl_nthw_rpl_flush(const struct tx_rpl_nthw *p);
#endif /* __FLOW_NTHW_TX_RPL_H__ */