@@ -104,6 +104,8 @@ struct nthw_virt_queue {
* 1: Napatech DVIO0 descriptor (12 bytes).
*/
void *avail_struct_phys_addr;
+ void *used_struct_phys_addr;
+ void *desc_struct_phys_addr;
};
static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
@@ -311,13 +313,21 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
uint32_t vq_type,
int irq_vector)
{
- (void)header;
- (void)desc_struct_phys_addr;
uint32_t qs = dbs_qsize_log2(queue_size);
uint32_t int_enable;
uint32_t vec;
uint32_t istk;
+ /*
+ * Setup DBS module - DSF00094
+ * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+ * DBS_RX_QUEUES entries.
+ */
+ if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, header,
+ 0) != 0) {
+ return NULL;
+ }
+
/*
* 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
* DBS_RX_QUEUES entries.
@@ -375,6 +385,8 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
rxvq[index].host_id = host_id;
rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+ rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+ rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
rxvq[index].vq_type = vq_type;
rxvq[index].in_order = 0; /* not used */
rxvq[index].irq_vector = irq_vector;
@@ -399,13 +411,21 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
int irq_vector,
uint32_t in_order)
{
- (void)header;
- (void)desc_struct_phys_addr;
uint32_t int_enable;
uint32_t vec;
uint32_t istk;
uint32_t qs = dbs_qsize_log2(queue_size);
+ /*
+ * Setup DBS module - DSF00094
+ * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+ * DBS_TX_QUEUES entries.
+ */
+ if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, port,
+ header, 0) != 0) {
+ return NULL;
+ }
+
/*
* 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
* DBS_TX_QUEUES entries.
@@ -468,6 +488,8 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
txvq[index].port = port;
txvq[index].virtual_port = virtual_port;
txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+ txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+ txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
txvq[index].vq_type = vq_type;
txvq[index].in_order = in_order;
txvq[index].irq_vector = irq_vector;
@@ -56,6 +56,25 @@ struct nthw_dbs_tx_uw_data_s {
uint32_t in_order;
};
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+ uint64_t guest_physical_address;
+ uint32_t host_id;
+ uint32_t queue_size;
+ uint32_t header;
+ uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+ uint64_t guest_physical_address;
+ uint32_t host_id;
+ uint32_t queue_size;
+ uint32_t header;
+ uint32_t port;
+ uint32_t packed;
+};
+
/* DBS_TX_QP_DATA */
struct nthw_dbs_tx_qp_data_s {
uint32_t virtual_port;
@@ -171,6 +190,29 @@ struct nthw_dbs_s {
nthw_field_t *mp_fld_tx_used_writer_data_istk;
nthw_field_t *mp_fld_tx_used_writer_data_in_order;
+ nthw_register_t *mp_reg_rx_descriptor_reader_control;
+ nthw_field_t *mp_fld_rx_descriptor_reader_control_adr;
+ nthw_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+ nthw_register_t *mp_reg_rx_descriptor_reader_data;
+ nthw_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+ nthw_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+ nthw_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+ nthw_field_t *mp_fld_rx_descriptor_reader_data_header;
+ nthw_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+ nthw_register_t *mp_reg_tx_descriptor_reader_control;
+ nthw_field_t *mp_fld_tx_descriptor_reader_control_adr;
+ nthw_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+ nthw_register_t *mp_reg_tx_descriptor_reader_data;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_port;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_header;
+ nthw_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
nthw_register_t *mp_reg_tx_queue_property_control;
nthw_field_t *mp_fld_tx_queue_property_control_adr;
nthw_field_t *mp_fld_tx_queue_property_control_cnt;
@@ -180,9 +222,11 @@ struct nthw_dbs_s {
struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+ struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+ struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
};
@@ -245,6 +289,21 @@ int set_tx_uw_data(nthw_dbs_t *p,
uint32_t vec,
uint32_t istk,
uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t header,
+ uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t port,
+ uint32_t header,
+ uint32_t packed);
int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
#endif /* _NTNIC_DBS_H_ */
@@ -11,6 +11,23 @@
static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t port,
+ uint32_t header,
+ uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t header,
+ uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
static void set_shadow_tx_uw_data(nthw_dbs_t *p,
uint32_t index,
uint64_t guest_physical_address,
@@ -266,6 +283,54 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
p->mp_fld_tx_used_writer_data_in_order =
nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+ p->mp_reg_rx_descriptor_reader_control =
+ nthw_module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+ p->mp_fld_rx_descriptor_reader_control_adr =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_control,
+ DBS_RX_DR_CTRL_ADR);
+ p->mp_fld_rx_descriptor_reader_control_cnt =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_control,
+ DBS_RX_DR_CTRL_CNT);
+
+ p->mp_reg_rx_descriptor_reader_data =
+ nthw_module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+ p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+ p->mp_fld_rx_descriptor_reader_data_host_id =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+ p->mp_fld_rx_descriptor_reader_data_queue_size =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+ p->mp_fld_rx_descriptor_reader_data_header =
+ nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+ p->mp_fld_rx_descriptor_reader_data_packed =
+ nthw_register_query_field(p->mp_reg_rx_descriptor_reader_data,
+ DBS_RX_DR_DATA_PCKED);
+
+ p->mp_reg_tx_descriptor_reader_control =
+ nthw_module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+ p->mp_fld_tx_descriptor_reader_control_adr =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_control,
+ DBS_TX_DR_CTRL_ADR);
+ p->mp_fld_tx_descriptor_reader_control_cnt =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_control,
+ DBS_TX_DR_CTRL_CNT);
+
+ p->mp_reg_tx_descriptor_reader_data =
+ nthw_module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+ p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+ p->mp_fld_tx_descriptor_reader_data_host_id =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+ p->mp_fld_tx_descriptor_reader_data_queue_size =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+ p->mp_fld_tx_descriptor_reader_data_header =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+ p->mp_fld_tx_descriptor_reader_data_port =
+ nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+ p->mp_fld_tx_descriptor_reader_data_packed =
+ nthw_register_query_field(p->mp_reg_tx_descriptor_reader_data,
+ DBS_TX_DR_DATA_PCKED);
+
p->mp_reg_tx_queue_property_control =
nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
p->mp_fld_tx_queue_property_control_adr =
@@ -317,6 +382,9 @@ void dbs_reset(nthw_dbs_t *p)
set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
flush_rx_uw_data(p, i);
+
+ set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+ flush_rx_dr_data(p, i);
}
/* Reset TX memory banks and shado */
@@ -327,6 +395,9 @@ void dbs_reset(nthw_dbs_t *p)
set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
flush_tx_uw_data(p, i);
+ set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+ flush_tx_dr_data(p, i);
+
set_shadow_tx_qp_data(p, i, 0);
flush_tx_qp_data(p, i);
}
@@ -807,6 +878,202 @@ int set_tx_uw_data(nthw_dbs_t *p,
return 0;
}
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+ nthw_register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+ uint64_t guest_physical_address)
+{
+ p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+ p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+ p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index, uint32_t header)
+{
+ p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+ p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t header,
+ uint32_t packed)
+{
+ set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+ set_shadow_rx_dr_data_host_id(p, index, host_id);
+ set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+ set_shadow_rx_dr_data_header(p, index, header);
+ set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+ (uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address, 2);
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+ p->m_rx_dr_shadow[index].host_id);
+
+ if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+ (1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+
+ } else {
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+ p->m_rx_dr_shadow[index].queue_size);
+ }
+
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+ p->m_rx_dr_shadow[index].header);
+
+ if (p->mp_fld_rx_descriptor_reader_data_packed) {
+ nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+ p->m_rx_dr_shadow[index].packed);
+ }
+
+ set_rx_dr_data_index(p, index);
+ nthw_register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t header,
+ uint32_t packed)
+{
+ if (!p->mp_reg_rx_descriptor_reader_data)
+ return -ENOTSUP;
+
+ set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size, header,
+ packed);
+ flush_rx_dr_data(p, index);
+ return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+ nthw_register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+ uint64_t guest_physical_address)
+{
+ p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+ p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+ p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index, uint32_t header)
+{
+ p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+ p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+ p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t port,
+ uint32_t header,
+ uint32_t packed)
+{
+ set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+ set_shadow_tx_dr_data_host_id(p, index, host_id);
+ set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+ set_shadow_tx_dr_data_header(p, index, header);
+ set_shadow_tx_dr_data_port(p, index, port);
+ set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+ nthw_field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+ (uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address, 2);
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+ p->m_tx_dr_shadow[index].host_id);
+
+ if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+ (1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+
+ } else {
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+ p->m_tx_dr_shadow[index].queue_size);
+ }
+
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+ p->m_tx_dr_shadow[index].header);
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+ p->m_tx_dr_shadow[index].port);
+
+ if (p->mp_fld_tx_descriptor_reader_data_packed) {
+ nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+ p->m_tx_dr_shadow[index].packed);
+ }
+
+ set_tx_dr_data_index(p, index);
+ nthw_register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p,
+ uint32_t index,
+ uint64_t guest_physical_address,
+ uint32_t host_id,
+ uint32_t queue_size,
+ uint32_t port,
+ uint32_t header,
+ uint32_t packed)
+{
+ if (!p->mp_reg_tx_descriptor_reader_data)
+ return -ENOTSUP;
+
+ set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size, port, header,
+ packed);
+ flush_tx_dr_data(p, index);
+ return 0;
+}
+
static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
{
nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
@@ -30,6 +30,15 @@
#define DBS_RX_CONTROL_QE (0x3e928d3UL)
#define DBS_RX_CONTROL_UWE (0xb490e8dbUL)
#define DBS_RX_CONTROL_UWS (0x40445d8aUL)
+#define DBS_RX_DR_CTRL (0xa0cbc617UL)
+#define DBS_RX_DR_CTRL_ADR (0xa7b57286UL)
+#define DBS_RX_DR_CTRL_CNT (0xb7bdeb57UL)
+#define DBS_RX_DR_DATA (0xf1a440eUL)
+#define DBS_RX_DR_DATA_GPA (0x18c20563UL)
+#define DBS_RX_DR_DATA_HDR (0xb98ed4d5UL)
+#define DBS_RX_DR_DATA_HID (0xf8f41fc9UL)
+#define DBS_RX_DR_DATA_PCKED (0x1e27ce2aUL)
+#define DBS_RX_DR_DATA_QS (0xffb980ddUL)
#define DBS_RX_IDLE (0x93c723bfUL)
#define DBS_RX_IDLE_BUSY (0x8e043b5bUL)
#define DBS_RX_IDLE_IDLE (0x9dba27ccUL)
@@ -74,6 +83,16 @@
#define DBS_TX_CONTROL_QE (0xa30cf70eUL)
#define DBS_TX_CONTROL_UWE (0x4c52a36dUL)
#define DBS_TX_CONTROL_UWS (0xb886163cUL)
+#define DBS_TX_DR_CTRL (0xadd5b650UL)
+#define DBS_TX_DR_CTRL_ADR (0x5f773930UL)
+#define DBS_TX_DR_CTRL_CNT (0x4f7fa0e1UL)
+#define DBS_TX_DR_DATA (0x2043449UL)
+#define DBS_TX_DR_DATA_GPA (0xe0004ed5UL)
+#define DBS_TX_DR_DATA_HDR (0x414c9f63UL)
+#define DBS_TX_DR_DATA_HID (0x36547fUL)
+#define DBS_TX_DR_DATA_PCKED (0x4db6a7ceUL)
+#define DBS_TX_DR_DATA_PORT (0xf306968cUL)
+#define DBS_TX_DR_DATA_QS (0x5f5c5f00UL)
#define DBS_TX_IDLE (0xf0171685UL)
#define DBS_TX_IDLE_BUSY (0x61399ebbUL)
#define DBS_TX_IDLE_IDLE (0x7287822cUL)