@@ -285,6 +285,19 @@ dbs_initialize_virt_queue_structs(void *avail_struct_addr, void *used_struct_add
flgs);
}
+static uint16_t dbs_qsize_log2(uint16_t qsize)
+{
+ uint32_t qs = 0;
+
+ while (qsize) {
+ qsize = qsize >> 1;
+ ++qs;
+ }
+
+ --qs;
+ return qs;
+}
+
static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
uint32_t index,
uint16_t start_idx,
@@ -300,7 +313,29 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
{
(void)header;
(void)desc_struct_phys_addr;
- (void)used_struct_phys_addr;
+ uint32_t qs = dbs_qsize_log2(queue_size);
+ uint32_t int_enable;
+ uint32_t vec;
+ uint32_t istk;
+
+ /*
+ * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+ * DBS_RX_QUEUES entries.
+ * Notice: We always start out with interrupts disabled (by setting the
+ * "irq_vector" argument to -1). Queues that require interrupts will have
+ * it enabled at a later time (after we have enabled vfio interrupts in
+ * the kernel).
+ */
+ int_enable = 0;
+ vec = 0;
+ istk = 0;
+ NT_LOG_DBGX(DBG, NTNIC, "set_rx_uw_data int=0 irq_vector=%u\n", irq_vector);
+
+ if (set_rx_uw_data(p_nthw_dbs, index,
+ (uint64_t)used_struct_phys_addr,
+ host_id, qs, 0, int_enable, vec, istk) != 0) {
+ return NULL;
+ }
/*
* 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
@@ -366,7 +401,28 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
{
(void)header;
(void)desc_struct_phys_addr;
- (void)used_struct_phys_addr;
+ uint32_t int_enable;
+ uint32_t vec;
+ uint32_t istk;
+ uint32_t qs = dbs_qsize_log2(queue_size);
+
+ /*
+ * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+ * DBS_TX_QUEUES entries.
+ * Notice: We always start out with interrupts disabled (by setting the
+ * "irq_vector" argument to -1). Queues that require interrupts will have
+ * it enabled at a later time (after we have enabled vfio interrupts in the
+ * kernel).
+ */
+ int_enable = 0;
+ vec = 0;
+ istk = 0;
+
+ if (set_tx_uw_data(p_nthw_dbs, index,
+ (uint64_t)used_struct_phys_addr,
+ host_id, qs, 0, int_enable, vec, istk, in_order) != 0) {
+ return NULL;
+ }
/*
* 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
@@ -33,6 +33,29 @@ struct nthw_dbs_tx_am_data_s {
uint32_t int_enable;
};
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+ uint64_t guest_physical_address;
+ uint32_t host_id;
+ uint32_t queue_size;
+ uint32_t packed;
+ uint32_t int_enable;
+ uint32_t vec;
+ uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+ uint64_t guest_physical_address;
+ uint32_t host_id;
+ uint32_t queue_size;
+ uint32_t packed;
+ uint32_t int_enable;
+ uint32_t vec;
+ uint32_t istk;
+ uint32_t in_order;
+};
+
/* DBS_TX_QP_DATA */
struct nthw_dbs_tx_qp_data_s {
uint32_t virtual_port;
@@ -121,6 +144,33 @@ struct nthw_dbs_s {
nthw_field_t *mp_fld_tx_avail_monitor_data_packed;
nthw_field_t *mp_fld_tx_avail_monitor_data_int;
+ nthw_register_t *mp_reg_rx_used_writer_control;
+ nthw_field_t *mp_fld_rx_used_writer_control_adr;
+ nthw_field_t *mp_fld_rx_used_writer_control_cnt;
+
+ nthw_register_t *mp_reg_rx_used_writer_data;
+ nthw_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+ nthw_field_t *mp_fld_rx_used_writer_data_host_id;
+ nthw_field_t *mp_fld_rx_used_writer_data_queue_size;
+ nthw_field_t *mp_fld_rx_used_writer_data_packed;
+ nthw_field_t *mp_fld_rx_used_writer_data_int;
+ nthw_field_t *mp_fld_rx_used_writer_data_vec;
+ nthw_field_t *mp_fld_rx_used_writer_data_istk;
+
+ nthw_register_t *mp_reg_tx_used_writer_control;
+ nthw_field_t *mp_fld_tx_used_writer_control_adr;
+ nthw_field_t *mp_fld_tx_used_writer_control_cnt;
+
+ nthw_register_t *mp_reg_tx_used_writer_data;
+ nthw_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+ nthw_field_t *mp_fld_tx_used_writer_data_host_id;
+ nthw_field_t *mp_fld_tx_used_writer_data_queue_size;
+ nthw_field_t *mp_fld_tx_used_writer_data_packed;
+ nthw_field_t *mp_fld_tx_used_writer_data_int;
+ nthw_field_t *mp_fld_tx_used_writer_data_vec;
+ nthw_field_t *mp_fld_tx_used_writer_data_istk;
+ nthw_field_t *mp_fld_tx_used_writer_data_in_order;
+
nthw_register_t *mp_reg_tx_queue_property_control;
nthw_field_t *mp_fld_tx_queue_property_control_adr;
nthw_field_t *mp_fld_tx_queue_property_control_cnt;
@@ -129,8 +179,10 @@ struct nthw_dbs_s {
nthw_field_t *mp_fld_tx_queue_property_data_v_port;
struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+ struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+ struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
};
@@ -174,6 +226,25 @@ int set_tx_am_data(nthw_dbs_t *p,
uint32_t host_id,
uint32_t packed,
uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk,
+ uint32_t in_order);
int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
#endif /* _NTNIC_DBS_H_ */
@@ -11,6 +11,27 @@
static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk,
+ uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
static void set_shadow_rx_am_data(nthw_dbs_t *p,
uint32_t index,
uint64_t guest_physical_address,
@@ -199,6 +220,52 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
p->mp_fld_tx_avail_monitor_data_int =
nthw_register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+ p->mp_reg_rx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+ p->mp_fld_rx_used_writer_control_adr =
+ nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+ p->mp_fld_rx_used_writer_control_cnt =
+ nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+ p->mp_reg_rx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+ p->mp_fld_rx_used_writer_data_guest_physical_address =
+ nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+ p->mp_fld_rx_used_writer_data_host_id =
+ nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+ p->mp_fld_rx_used_writer_data_queue_size =
+ nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+ p->mp_fld_rx_used_writer_data_packed =
+ nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+ p->mp_fld_rx_used_writer_data_int =
+ nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+ p->mp_fld_rx_used_writer_data_vec =
+ nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+ p->mp_fld_rx_used_writer_data_istk =
+ nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+ p->mp_reg_tx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+ p->mp_fld_tx_used_writer_control_adr =
+ nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+ p->mp_fld_tx_used_writer_control_cnt =
+ nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+ p->mp_reg_tx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+ p->mp_fld_tx_used_writer_data_guest_physical_address =
+ nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+ p->mp_fld_tx_used_writer_data_host_id =
+ nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+ p->mp_fld_tx_used_writer_data_queue_size =
+ nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+ p->mp_fld_tx_used_writer_data_packed =
+ nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+ p->mp_fld_tx_used_writer_data_int =
+ nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+ p->mp_fld_tx_used_writer_data_vec =
+ nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+ p->mp_fld_tx_used_writer_data_istk =
+ nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+ p->mp_fld_tx_used_writer_data_in_order =
+ nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
p->mp_reg_tx_queue_property_control =
nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
p->mp_fld_tx_queue_property_control_adr =
@@ -247,6 +314,9 @@ void dbs_reset(nthw_dbs_t *p)
for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
flush_rx_am_data(p, i);
+
+ set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+ flush_rx_uw_data(p, i);
}
/* Reset TX memory banks and shado */
@@ -254,6 +324,9 @@ void dbs_reset(nthw_dbs_t *p)
set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
flush_tx_am_data(p, i);
+ set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+ flush_tx_uw_data(p, i);
+
set_shadow_tx_qp_data(p, i, 0);
flush_tx_qp_data(p, i);
}
@@ -491,6 +564,249 @@ int set_tx_am_data(nthw_dbs_t *p,
return 0;
}
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+ nthw_register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+ uint64_t guest_physical_address)
+{
+ p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+ p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+ p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+ p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+ p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+ p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+ p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk)
+{
+ set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+ set_shadow_rx_uw_data_host_id(p, index, host_id);
+ set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+ set_shadow_rx_uw_data_packed(p, index, packed);
+ set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+ set_shadow_rx_uw_data_vec(p, index, vec);
+ set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+ (uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address, 2);
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+ p->m_rx_uw_shadow[index].host_id);
+
+ if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+ (1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+
+ } else {
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+ p->m_rx_uw_shadow[index].queue_size);
+ }
+
+ if (p->mp_fld_rx_used_writer_data_packed) {
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+ p->m_rx_uw_shadow[index].packed);
+ }
+
+ if (p->mp_fld_rx_used_writer_data_int) {
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_int,
+ p->m_rx_uw_shadow[index].int_enable);
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+ p->m_rx_uw_shadow[index].vec);
+ nthw_field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+ p->m_rx_uw_shadow[index].istk);
+ }
+
+ set_rx_uw_data_index(p, index);
+ nthw_register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk)
+{
+ if (!p->mp_reg_rx_used_writer_data)
+ return -ENOTSUP;
+
+ set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+ int_enable, vec, istk);
+ flush_rx_uw_data(p, index);
+ return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+ nthw_register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+ uint64_t guest_physical_address)
+{
+ p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+ p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+ p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+ p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+ p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+ p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+ p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index, uint32_t in_order)
+{
+ p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk,
+ uint32_t in_order)
+{
+ set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+ set_shadow_tx_uw_data_host_id(p, index, host_id);
+ set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+ set_shadow_tx_uw_data_packed(p, index, packed);
+ set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+ set_shadow_tx_uw_data_vec(p, index, vec);
+ set_shadow_tx_uw_data_istk(p, index, istk);
+ set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+ (uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address, 2);
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+ p->m_tx_uw_shadow[index].host_id);
+
+ if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+ (1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+
+ } else {
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+ p->m_tx_uw_shadow[index].queue_size);
+ }
+
+ if (p->mp_fld_tx_used_writer_data_packed) {
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+ p->m_tx_uw_shadow[index].packed);
+ }
+
+ if (p->mp_fld_tx_used_writer_data_int) {
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_int,
+ p->m_tx_uw_shadow[index].int_enable);
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+ p->m_tx_uw_shadow[index].vec);
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+ p->m_tx_uw_shadow[index].istk);
+ }
+
+ if (p->mp_fld_tx_used_writer_data_in_order) {
+ nthw_field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+ p->m_tx_uw_shadow[index].in_order);
+ }
+
+ set_tx_uw_data_index(p, index);
+ nthw_register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t packed,
+ uint32_t int_enable,
+ uint32_t vec,
+ uint32_t istk,
+ uint32_t in_order)
+{
+ if (!p->mp_reg_tx_used_writer_data)
+ return -ENOTSUP;
+
+ set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+ int_enable, vec, istk, in_order);
+ flush_tx_uw_data(p, index);
+ return 0;
+}
+
static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
{
nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
@@ -45,6 +45,17 @@
#define DBS_RX_PTR_PTR (0x7f834481UL)
#define DBS_RX_PTR_QUEUE (0x4f3fa6d1UL)
#define DBS_RX_PTR_VALID (0xbcc5ec4dUL)
+#define DBS_RX_UW_CTRL (0x31afc0deUL)
+#define DBS_RX_UW_CTRL_ADR (0x2ee4a2c9UL)
+#define DBS_RX_UW_CTRL_CNT (0x3eec3b18UL)
+#define DBS_RX_UW_DATA (0x9e7e42c7UL)
+#define DBS_RX_UW_DATA_GPA (0x9193d52cUL)
+#define DBS_RX_UW_DATA_HID (0x71a5cf86UL)
+#define DBS_RX_UW_DATA_INT (0x22912312UL)
+#define DBS_RX_UW_DATA_ISTK (0xd469a7ddUL)
+#define DBS_RX_UW_DATA_PCKED (0xef15c665UL)
+#define DBS_RX_UW_DATA_QS (0x7d422f44UL)
+#define DBS_RX_UW_DATA_VEC (0x55cc9b53UL)
#define DBS_STATUS (0xb5f35220UL)
#define DBS_STATUS_OK (0xcf09a30fUL)
#define DBS_TX_AM_CTRL (0xd6d29b9UL)
@@ -94,6 +105,18 @@
#define DBS_TX_QP_CTRL_CNT (0x942b1855UL)
#define DBS_TX_QP_DATA (0x7a2a262bUL)
#define DBS_TX_QP_DATA_VPORT (0xda741d67UL)
+#define DBS_TX_UW_CTRL (0x3cb1b099UL)
+#define DBS_TX_UW_CTRL_ADR (0xd626e97fUL)
+#define DBS_TX_UW_CTRL_CNT (0xc62e70aeUL)
+#define DBS_TX_UW_DATA (0x93603280UL)
+#define DBS_TX_UW_DATA_GPA (0x69519e9aUL)
+#define DBS_TX_UW_DATA_HID (0x89678430UL)
+#define DBS_TX_UW_DATA_INO (0x5036a148UL)
+#define DBS_TX_UW_DATA_INT (0xda5368a4UL)
+#define DBS_TX_UW_DATA_ISTK (0xf693732fUL)
+#define DBS_TX_UW_DATA_PCKED (0xbc84af81UL)
+#define DBS_TX_UW_DATA_QS (0xdda7f099UL)
+#define DBS_TX_UW_DATA_VEC (0xad0ed0e5UL)
#endif /* _NTHW_FPGA_REG_DEFS_DBS_ */