diff mbox series

[RFC,06/29] net/qdma: add qdma access library

Message ID 20220706075219.517046-7-aman.kumar@vvdntech.in (mailing list archive)
State New
Delegated to: Thomas Monjalon
Headers show
Series cover letter for net/qdma PMD | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Aman Kumar July 6, 2022, 7:51 a.m. UTC
add qdma hardware library.
modify qdma meson.build to compile new added files.

Signed-off-by: Aman Kumar <aman.kumar@vvdntech.in>
---
 drivers/net/qdma/meson.build                  |   14 +
 .../eqdma_soft_access/eqdma_soft_access.c     | 5832 ++++++++++++
 .../eqdma_soft_access/eqdma_soft_access.h     |  294 +
 .../eqdma_soft_access/eqdma_soft_reg.h        | 1211 +++
 .../eqdma_soft_access/eqdma_soft_reg_dump.c   | 3908 ++++++++
 .../net/qdma/qdma_access/qdma_access_common.c | 1271 +++
 .../net/qdma/qdma_access/qdma_access_common.h |  888 ++
 .../net/qdma/qdma_access/qdma_access_errors.h |   60 +
 .../net/qdma/qdma_access/qdma_access_export.h |  243 +
 .../qdma/qdma_access/qdma_access_version.h    |   24 +
 drivers/net/qdma/qdma_access/qdma_list.c      |   51 +
 drivers/net/qdma/qdma_access/qdma_list.h      |  109 +
 .../net/qdma/qdma_access/qdma_mbox_protocol.c | 2107 +++++
 .../net/qdma/qdma_access/qdma_mbox_protocol.h |  681 ++
 drivers/net/qdma/qdma_access/qdma_platform.c  |  224 +
 drivers/net/qdma/qdma_access/qdma_platform.h  |  156 +
 .../net/qdma/qdma_access/qdma_platform_env.h  |   32 +
 drivers/net/qdma/qdma_access/qdma_reg_dump.h  |   77 +
 .../net/qdma/qdma_access/qdma_resource_mgmt.c |  787 ++
 .../net/qdma/qdma_access/qdma_resource_mgmt.h |  201 +
 .../qdma_s80_hard_access.c                    | 5851 ++++++++++++
 .../qdma_s80_hard_access.h                    |  266 +
 .../qdma_s80_hard_access/qdma_s80_hard_reg.h  | 2031 +++++
 .../qdma_s80_hard_reg_dump.c                  | 7999 +++++++++++++++++
 .../qdma_soft_access/qdma_soft_access.c       | 6106 +++++++++++++
 .../qdma_soft_access/qdma_soft_access.h       |  280 +
 .../qdma_soft_access/qdma_soft_reg.h          |  570 ++
 27 files changed, 41273 insertions(+)
 create mode 100644 drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
 create mode 100644 drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.h
 create mode 100644 drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg.h
 create mode 100644 drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_access_common.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_access_common.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_access_errors.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_access_export.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_access_version.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_list.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_list.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_mbox_protocol.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_mbox_protocol.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_platform.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_platform.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_platform_env.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_reg_dump.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_resource_mgmt.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_resource_mgmt.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg_dump.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.c
 create mode 100644 drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_access.h
 create mode 100644 drivers/net/qdma/qdma_access/qdma_soft_access/qdma_soft_reg.h
diff mbox series

Patch

diff --git a/drivers/net/qdma/meson.build b/drivers/net/qdma/meson.build
index f0df5ef0d9..99076e1ebf 100644
--- a/drivers/net/qdma/meson.build
+++ b/drivers/net/qdma/meson.build
@@ -12,8 +12,22 @@  if (not dpdk_conf.has('RTE_ARCH_X86_64'))
 endif
 
 includes += include_directories('.')
+includes += include_directories('qdma_access')
+includes += include_directories('qdma_access/qdma_soft_access')
+includes += include_directories('qdma_access/eqdma_soft_access')
+includes += include_directories('qdma_access/qdma_s80_hard_access')
 
 sources = files(
         'qdma_ethdev.c',
         'qdma_common.c',
+        'qdma_access/eqdma_soft_access/eqdma_soft_access.c',
+        'qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c',
+        'qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c',
+        'qdma_access/qdma_s80_hard_access/qdma_s80_hard_reg_dump.c',
+        'qdma_access/qdma_soft_access/qdma_soft_access.c',
+        'qdma_access/qdma_list.c',
+        'qdma_access/qdma_resource_mgmt.c',
+        'qdma_access/qdma_mbox_protocol.c',
+        'qdma_access/qdma_access_common.c',
+        'qdma_access/qdma_platform.c',
 )
diff --git a/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
new file mode 100644
index 0000000000..38e0a7488d
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.c
@@ -0,0 +1,5832 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "eqdma_soft_access.h"
+#include "eqdma_soft_reg.h"
+#include "qdma_reg_dump.h"
+
+#ifdef ENABLE_WPP_TRACING
+#include "eqdma_soft_access.tmh"
+#endif
+
+/** EQDMA Context array size */
+#define EQDMA_SW_CONTEXT_NUM_WORDS           8
+#define EQDMA_HW_CONTEXT_NUM_WORDS           2
+#define EQDMA_PFETCH_CONTEXT_NUM_WORDS       2
+#define EQDMA_CR_CONTEXT_NUM_WORDS           1
+#define EQDMA_CMPT_CONTEXT_NUM_WORDS         6
+#define EQDMA_IND_INTR_CONTEXT_NUM_WORDS     4
+
+#define EQDMA_VF_USER_BAR_ID                 2
+
+#define EQDMA_REG_GROUP_1_START_ADDR	0x000
+#define EQDMA_REG_GROUP_2_START_ADDR	0x804
+#define EQDMA_REG_GROUP_3_START_ADDR	0xB00
+#define EQDMA_REG_GROUP_4_START_ADDR	0x5014
+
+#define EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS 9
+#define EQDMA_GLBL_TRQ_ERR_ALL_MASK	0XB3
+#define EQDMA_GLBL_DSC_ERR_ALL_MASK	0X1F9037E
+#define EQDMA_C2H_ERR_ALL_MASK		0X3F6DF
+#define EQDMA_C2H_FATAL_ERR_ALL_MASK	0X1FDF1B
+#define EQDMA_H2C_ERR_ALL_MASK		0X3F
+#define EQDMA_SBE_ERR_ALL_MASK		0XFFFFFFFF
+#define EQDMA_DBE_ERR_ALL_MASK		0XFFFFFFFF
+
+/* H2C Throttle settings */
+#define EQDMA_H2C_THROT_DATA_THRESH       0x5000
+#define EQDMA_THROT_EN_DATA               1
+#define EQDMA_THROT_EN_REQ                0
+#define EQDMA_H2C_THROT_REQ_THRESH        0xC0
+
+
+/** Auxiliary Bitmasks for fields spanning multiple words */
+#define EQDMA_SW_CTXT_PASID_GET_H_MASK              GENMASK(21, 12)
+#define EQDMA_SW_CTXT_PASID_GET_L_MASK              GENMASK(11, 0)
+#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_H_MASK    GENMASK_ULL(63, 53)
+#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_M_MASK    GENMASK_ULL(52, 21)
+#define EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_L_MASK    GENMASK_ULL(20, 0)
+#define EQDMA_CMPL_CTXT_PASID_GET_H_MASK            GENMASK(21, 9)
+#define EQDMA_CMPL_CTXT_PASID_GET_L_MASK            GENMASK(8, 0)
+#define EQDMA_INTR_CTXT_PASID_GET_H_MASK            GENMASK(21, 9)
+#define EQDMA_INTR_CTXT_PASID_GET_L_MASK            GENMASK(8, 0)
+
+
+#define EQDMA_OFFSET_GLBL2_PF_BARLITE_EXT	0x10C
+
+#define QDMA_OFFSET_GLBL2_PF_BARLITE_INT	0x104
+#define QDMA_GLBL2_PF3_BAR_MAP_MASK		GENMASK(23, 18)
+#define QDMA_GLBL2_PF2_BAR_MAP_MASK		GENMASK(17, 12)
+#define QDMA_GLBL2_PF1_BAR_MAP_MASK		GENMASK(11, 6)
+#define QDMA_GLBL2_PF0_BAR_MAP_MASK		GENMASK(5, 0)
+
+#define EQDMA_GLBL2_DBG_MODE_EN_MASK		BIT(4)
+#define EQDMA_GLBL2_DESC_ENG_MODE_MASK		GENMASK(3, 2)
+#define EQDMA_GLBL2_FLR_PRESENT_MASK		BIT(1)
+#define EQDMA_GLBL2_MAILBOX_EN_MASK		BIT(0)
+
+static void eqdma_hw_st_h2c_err_process(void *dev_hndl);
+static void eqdma_hw_st_c2h_err_process(void *dev_hndl);
+static void eqdma_hw_desc_err_process(void *dev_hndl);
+static void eqdma_hw_trq_err_process(void *dev_hndl);
+static void eqdma_hw_ram_sbe_err_process(void *dev_hndl);
+static void eqdma_hw_ram_dbe_err_process(void *dev_hndl);
+
+static struct eqdma_hw_err_info eqdma_err_info[EQDMA_ERRS_ALL] = {
+	/* Descriptor errors */
+	{
+		EQDMA_DSC_ERR_POISON,
+		"Poison error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_POISON_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_UR_CA,
+		"Unsupported request or completer aborted error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_UR_CA_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_BCNT,
+		"Unexpected Byte count in completion error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_BCNT_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_PARAM,
+		"Parameter mismatch error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_PARAM_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_ADDR,
+		"Address mismatch error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_ADDR_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_TAG,
+		"Unexpected tag error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_TAG_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_FLR,
+		"FLR error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_FLR_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_TIMEOUT,
+		"Timed out error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_TIMEOUT_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_DAT_POISON,
+		"Poison data error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_DAT_POISON_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_FLR_CANCEL,
+		"Descriptor fetch cancelled due to FLR error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_FLR_CANCEL_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_DMA,
+		"DMA engine error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_DMA_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_DSC,
+		"Invalid PIDX update error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_DSC_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_RQ_CANCEL,
+		"Descriptor fetch cancelled due to disable register status error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_RQ_CANCEL_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_DBE,
+		"UNC_ERR_RAM_DBE error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_DBE_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_SBE,
+		"UNC_ERR_RAM_SBE error",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		GLBL_DSC_ERR_STS_SBE_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+	{
+		EQDMA_DSC_ERR_ALL,
+		"All Descriptor errors",
+		EQDMA_GLBL_DSC_ERR_MSK_ADDR,
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		EQDMA_GLBL_DSC_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_DSC_MASK,
+		&eqdma_hw_desc_err_process
+	},
+
+	/* TRQ errors */
+	{
+		EQDMA_TRQ_ERR_CSR_UNMAPPED,
+		"Access targeted unmapped register space via CSR pathway error",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_CSR_UNMAPPED_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_VF_ACCESS,
+		"VF attempted to access Global register space or Function map",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_VF_ACCESS_ERR_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_TCP_CSR_TIMEOUT,
+		"Timeout on request to dma internal csr register",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_QSPC_UNMAPPED,
+		"Access targeted unmapped register via queue space pathway",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_QSPC_UNMAPPED_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_QID_RANGE,
+		"Qid range error",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_QID_RANGE_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_TCP_QSPC_TIMEOUT,
+		"Timeout on request to dma internal queue space register",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+	{
+		EQDMA_TRQ_ERR_ALL,
+		"All TRQ errors",
+		EQDMA_GLBL_TRQ_ERR_MSK_ADDR,
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		EQDMA_GLBL_TRQ_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_TRQ_MASK,
+		&eqdma_hw_trq_err_process
+	},
+
+	/* C2H Errors */
+	{
+		EQDMA_ST_C2H_ERR_MTY_MISMATCH,
+		"MTY mismatch error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_MTY_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_LEN_MISMATCH,
+		"Packet length mismatch error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_LEN_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_SH_CMPT_DSC,
+		"A Shared CMPT queue has encountered a descriptor error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_SH_CMPT_DSC_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_QID_MISMATCH,
+		"Qid mismatch error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_QID_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_DESC_RSP_ERR,
+		"Descriptor error bit set",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_DESC_RSP_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
+		"Data parity error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_MSI_INT_FAIL,
+		"MSI got a fail response error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_MSI_INT_FAIL_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_ERR_DESC_CNT,
+		"Descriptor count error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_ERR_DESC_CNT_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
+		"Port id in packet and pfetch ctxt mismatch error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
+		"Writeback on invalid queue error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_WRB_INV_Q_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
+		"Completion queue gets full error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_WRB_QFULL_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
+		"Bad CIDX update by the software error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_WRB_CIDX_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
+		"C2H completion Parity error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_WRB_PRTY_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_AVL_RING_DSC,
+		"Available ring fetch returns descriptor with error",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_AVL_RING_DSC_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_HDR_ECC_UNC,
+		"multi-bit ecc error on c2h packet header",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_HDR_ECC_UNC_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_HDR_ECC_COR,
+		"single-bit ecc error on c2h packet header",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		C2H_ERR_STAT_HDR_ECC_COR_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_C2H_ERR_ALL,
+		"All C2h errors",
+		EQDMA_C2H_ERR_MASK_ADDR,
+		EQDMA_C2H_ERR_STAT_ADDR,
+		EQDMA_C2H_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+
+	/* C2H fatal errors */
+	{
+		EQDMA_ST_FATAL_ERR_MTY_MISMATCH,
+		"Fatal MTY mismatch error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_MTY_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_LEN_MISMATCH,
+		"Fatal Len mismatch error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_LEN_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_QID_MISMATCH,
+		"Fatal Qid mismatch error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_QID_MISMATCH_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_CMPT_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_WPL_DATA_PAR,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_AVL_RING_FIFO_RAM_RDBE,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_HDR_ECC_UNC,
+		"RAM double bit fatal error",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+	{
+		EQDMA_ST_FATAL_ERR_ALL,
+		"All fatal errors",
+		EQDMA_C2H_FATAL_ERR_MASK_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		EQDMA_C2H_FATAL_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK,
+		&eqdma_hw_st_c2h_err_process
+	},
+
+	/* H2C St errors */
+	{
+		EQDMA_ST_H2C_ERR_ZERO_LEN_DESC,
+		"Zero length descriptor error",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_ZERO_LEN_DS_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_SDI_MRKR_REQ_MOP,
+		"A non-EOP descriptor received",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_NO_DMA_DSC,
+		"No DMA descriptor received error",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_NO_DMA_DS_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_SBE,
+		"Single bit error detected on H2C-ST data error",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_SBE_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_DBE,
+		"Double bit error detected on H2C-ST data error",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_DBE_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_PAR,
+		"Internal data parity error",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		H2C_ERR_STAT_PAR_ERR_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+	{
+		EQDMA_ST_H2C_ERR_ALL,
+		"All H2C errors",
+		EQDMA_H2C_ERR_MASK_ADDR,
+		EQDMA_H2C_ERR_STAT_ADDR,
+		EQDMA_H2C_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK,
+		&eqdma_hw_st_h2c_err_process
+	},
+
+	/* SBE errors */
+	{
+		EQDMA_SBE_1_ERR_RC_RRQ_EVEN_RAM,
+		"RC RRQ Even RAM single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_1_ERR_TAG_ODD_RAM,
+		"Tag Odd Ram single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		RAM_SBE_STS_1_A_TAG_ODD_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_1_ERR_TAG_EVEN_RAM,
+		"Tag Even Ram single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		RAM_SBE_STS_1_A_TAG_EVEN_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_0,
+		"Pfch Ctxt CAM RAM 0 single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_1,
+		"Pfch Ctxt CAM RAM 1 single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_1_ERR_ALL,
+		"All SBE Errors.",
+		EQDMA_RAM_SBE_MSK_1_A_ADDR,
+		EQDMA_RAM_SBE_STS_1_A_ADDR,
+		EQDMA_SBE_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_H2C0_DAT,
+		"H2C MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_H2C0_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_H2C1_DAT,
+		"H2C MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_H2C1_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_H2C2_DAT,
+		"H2C MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_H2C2_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_H2C3_DAT,
+		"H2C MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_H2C3_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_C2H0_DAT,
+		"C2H MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_C2H0_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_C2H1_DAT,
+		"C2H MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_C2H1_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+{
+		EQDMA_SBE_ERR_MI_C2H2_DAT,
+		"C2H MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_C2H2_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_C2H3_DAT,
+		"C2H MM data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_C2H3_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_H2C_RD_BRG_DAT,
+		"Bridge master read single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_H2C_RD_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_H2C_WR_BRG_DAT,
+		"Bridge master write single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_H2C_WR_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_C2H_RD_BRG_DAT,
+		"Bridge slave read data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_C2H_RD_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_C2H_WR_BRG_DAT,
+		"Bridge slave write data buffer single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_C2H_WR_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_FUNC_MAP,
+		"Function map RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_FUNC_MAP_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DSC_HW_CTXT,
+		"Descriptor engine hardware context RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DSC_HW_CTXT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DSC_CRD_RCV,
+		"Descriptor engine receive credit context RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DSC_CRD_RCV_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DSC_SW_CTXT,
+		"Descriptor engine software context RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DSC_SW_CTXT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DSC_CPLI,
+		"Descriptor engine fetch completion information RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DSC_CPLI_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DSC_CPLD,
+		"Descriptor engine fetch completion data RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DSC_CPLD_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_MI_TL_SLV_FIFO_RAM,
+		"TL Slavle FIFO RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_TIMER_FIFO_RAM,
+		"Timer fifo RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_TIMER_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_QID_FIFO_RAM,
+		"C2H ST QID FIFO RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_QID_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_WRB_COAL_DATA_RAM,
+		"Writeback Coalescing RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_WRB_COAL_DATA_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_INT_CTXT_RAM,
+		"Interrupt context RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_INT_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
+		"C2H ST descriptor request RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_DESC_REQ_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_PFCH_CTXT_RAM,
+		"C2H ST prefetch RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_PFCH_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_WRB_CTXT_RAM,
+		"C2H ST completion context RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_WRB_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_PFCH_LL_RAM,
+		"C2H ST prefetch list RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_PFCH_LL_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_PEND_FIFO_RAM,
+		"Pend FIFO RAM single bit ECC error",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_PEND_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_RC_RRQ_ODD_RAM,
+		"RC RRQ Odd RAM single bit ECC error.",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		RAM_SBE_STS_A_RC_RRQ_ODD_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+	{
+		EQDMA_SBE_ERR_ALL,
+		"All SBE errors",
+		EQDMA_RAM_SBE_MSK_A_ADDR,
+		EQDMA_RAM_SBE_STS_A_ADDR,
+		EQDMA_SBE_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK,
+		&eqdma_hw_ram_sbe_err_process
+	},
+
+
+	/* DBE errors */
+	{
+		EQDMA_DBE_1_ERR_RC_RRQ_EVEN_RAM,
+		"RC RRQ Odd RAM double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_1_ERR_TAG_ODD_RAM,
+		"Tag Odd Ram double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		RAM_DBE_STS_1_A_TAG_ODD_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_1_ERR_TAG_EVEN_RAM,
+		"Tag Even Ram double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		RAM_DBE_STS_1_A_TAG_EVEN_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_0,
+		"Pfch Ctxt CAM RAM 0 double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_1,
+		"Pfch Ctxt CAM RAM double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_1_ERR_ALL,
+		"All DBE errors",
+		EQDMA_RAM_DBE_MSK_1_A_ADDR,
+		EQDMA_RAM_DBE_STS_1_A_ADDR,
+		EQDMA_DBE_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_H2C0_DAT,
+		"H2C MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_H2C0_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_H2C1_DAT,
+		"H2C MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_H2C1_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_H2C2_DAT,
+		"H2C MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_H2C2_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_H2C3_DAT,
+		"H2C MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_H2C3_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_C2H0_DAT,
+		"C2H MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_C2H0_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_C2H1_DAT,
+		"C2H MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_C2H1_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_C2H2_DAT,
+		"C2H MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_C2H2_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_C2H3_DAT,
+		"C2H MM data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_C2H3_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_H2C_RD_BRG_DAT,
+		"Bridge master read double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_H2C_RD_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_H2C_WR_BRG_DAT,
+		"Bridge master write double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_H2C_WR_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_C2H_RD_BRG_DAT,
+		"Bridge slave read data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_C2H_RD_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_C2H_WR_BRG_DAT,
+		"Bridge slave write data buffer double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_C2H_WR_BRG_DAT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_FUNC_MAP,
+		"Function map RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_FUNC_MAP_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DSC_HW_CTXT,
+		"Descriptor engine hardware context RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DSC_HW_CTXT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DSC_CRD_RCV,
+		"Descriptor engine receive credit context RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DSC_CRD_RCV_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DSC_SW_CTXT,
+		"Descriptor engine software context RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DSC_SW_CTXT_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DSC_CPLI,
+		"Descriptor engine fetch completion information RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DSC_CPLI_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DSC_CPLD,
+		"Descriptor engine fetch completion data RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DSC_CPLD_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_MI_TL_SLV_FIFO_RAM,
+		"TL Slave FIFO RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_TIMER_FIFO_RAM,
+		"Timer fifo RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_TIMER_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_QID_FIFO_RAM,
+		"C2H ST QID FIFO RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_QID_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_WRB_COAL_DATA_RAM,
+		"Writeback Coalescing RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_WRB_COAL_DATA_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_INT_CTXT_RAM,
+		"Interrupt context RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_INT_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
+		"C2H ST descriptor request RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_DESC_REQ_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_PFCH_CTXT_RAM,
+		"C2H ST prefetch RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_PFCH_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_WRB_CTXT_RAM,
+		"C2H ST completion context RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_WRB_CTXT_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_PFCH_LL_RAM,
+		"C2H ST prefetch list RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_PFCH_LL_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_PEND_FIFO_RAM,
+		"Pend FIFO RAM double bit ECC error",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_PEND_FIFO_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_RC_RRQ_ODD_RAM,
+		"RC RRQ Odd RAM double bit ECC error.",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		RAM_DBE_STS_A_RC_RRQ_ODD_RAM_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	},
+	{
+		EQDMA_DBE_ERR_ALL,
+		"All DBE errors",
+		EQDMA_RAM_DBE_MSK_A_ADDR,
+		EQDMA_RAM_DBE_STS_A_ADDR,
+		EQDMA_DBE_ERR_ALL_MASK,
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK,
+		&eqdma_hw_ram_dbe_err_process
+	}
+};
+
+static int32_t all_eqdma_hw_errs[EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS] = {
+	EQDMA_DSC_ERR_ALL,
+	EQDMA_TRQ_ERR_ALL,
+	EQDMA_ST_C2H_ERR_ALL,
+	EQDMA_ST_FATAL_ERR_ALL,
+	EQDMA_ST_H2C_ERR_ALL,
+	EQDMA_SBE_1_ERR_ALL,
+	EQDMA_SBE_ERR_ALL,
+	EQDMA_DBE_1_ERR_ALL,
+	EQDMA_DBE_ERR_ALL
+};
+
+static struct qctx_entry eqdma_sw_ctxt_entries[] = {
+	{"PIDX", 0},
+	{"IRQ Arm", 0},
+	{"Function Id", 0},
+	{"Queue Enable", 0},
+	{"Fetch Credit Enable", 0},
+	{"Write back/Intr Check", 0},
+	{"Write back/Intr Interval", 0},
+	{"Address Translation", 0},
+	{"Fetch Max", 0},
+	{"Ring Size", 0},
+	{"Descriptor Size", 0},
+	{"Bypass Enable", 0},
+	{"MM Channel", 0},
+	{"Writeback Enable", 0},
+	{"Interrupt Enable", 0},
+	{"Port Id", 0},
+	{"Interrupt No Last", 0},
+	{"Error", 0},
+	{"Writeback Error Sent", 0},
+	{"IRQ Request", 0},
+	{"Marker Disable", 0},
+	{"Is Memory Mapped", 0},
+	{"Descriptor Ring Base Addr (Low)", 0},
+	{"Descriptor Ring Base Addr (High)", 0},
+	{"Interrupt Vector/Ring Index", 0},
+	{"Interrupt Aggregation", 0},
+	{"Disable Interrupt with VF", 0},
+	{"Pack descriptor output interface", 0},
+	{"Irq Bypass", 0},
+};
+
+static struct qctx_entry eqdma_hw_ctxt_entries[] = {
+	{"CIDX", 0},
+	{"Credits Consumed", 0},
+	{"Descriptors Pending", 0},
+	{"Queue Invalid No Desc Pending", 0},
+	{"Eviction Pending", 0},
+	{"Fetch Pending", 0},
+};
+
+static struct qctx_entry eqdma_credit_ctxt_entries[] = {
+	{"Credit", 0},
+};
+
+static struct qctx_entry eqdma_cmpt_ctxt_entries[] = {
+	{"Enable Status Desc Update", 0},
+	{"Enable Interrupt", 0},
+	{"Trigger Mode", 0},
+	{"Function Id", 0},
+	{"Counter Index", 0},
+	{"Timer Index", 0},
+	{"Interrupt State", 0},
+	{"Color", 0},
+	{"Ring Size", 0},
+	{"Base Addr High (L)[37:6]", 0},
+	{"Base Addr High(H)[63:38]", 0},
+	{"Descriptor Size", 0},
+	{"PIDX", 0},
+	{"CIDX", 0},
+	{"Valid", 0},
+	{"Error", 0},
+	{"Trigger Pending", 0},
+	{"Timer Running", 0},
+	{"Full Update", 0},
+	{"Over Flow Check Disable", 0},
+	{"Address Translation", 0},
+	{"Interrupt Vector/Ring Index", 0},
+	{"Interrupt Aggregation", 0},
+	{"Disable Insterrupt with VF", 0},
+	{"c2h Direction", 0},
+	{"Base Addr Low[5:2]", 0},
+	{"Shared Completion Queue", 0},
+};
+
+static struct qctx_entry eqdma_c2h_pftch_ctxt_entries[] = {
+	{"Bypass", 0},
+	{"Buffer Size Index", 0},
+	{"Port Id", 0},
+	{"Variable Descriptor", 0},
+	{"Number of descriptors prefetched", 0},
+	{"Error", 0},
+	{"Prefetch Enable", 0},
+	{"In Prefetch", 0},
+	{"Software Credit", 0},
+	{"Valid", 0},
+};
+
+static struct qctx_entry eqdma_ind_intr_ctxt_entries[] = {
+	{"valid", 0},
+	{"vec", 0},
+	{"int_st", 0},
+	{"color", 0},
+	{"baddr_4k (Low)", 0},
+	{"baddr_4k (High)", 0},
+	{"page_size", 0},
+	{"pidx", 0},
+	{"at", 0},
+	{"Function Id", 0},
+};
+
+static int eqdma_indirect_reg_invalidate(void *dev_hndl,
+		enum ind_ctxt_cmd_sel sel, uint16_t hw_qid);
+static int eqdma_indirect_reg_clear(void *dev_hndl,
+		enum ind_ctxt_cmd_sel sel, uint16_t hw_qid);
+static int eqdma_indirect_reg_read(void *dev_hndl, enum ind_ctxt_cmd_sel sel,
+		uint16_t hw_qid, uint32_t cnt, uint32_t *data);
+static int eqdma_indirect_reg_write(void *dev_hndl, enum ind_ctxt_cmd_sel sel,
+		uint16_t hw_qid, uint32_t *data, uint16_t cnt);
+
+uint32_t eqdma_get_config_num_regs(void)
+{
+	return eqdma_config_num_regs_get();
+}
+
+struct xreg_info *eqdma_get_config_regs(void)
+{
+	return eqdma_config_regs_get();
+}
+
+uint32_t eqdma_reg_dump_buf_len(void)
+{
+	uint32_t length = (eqdma_config_num_regs_get() + 1)
+			* REG_DUMP_SIZE_PER_LINE;
+	return length;
+}
+
+int eqdma_context_buf_len(uint8_t st,
+		enum qdma_dev_q_type q_type, uint32_t *buflen)
+{
+	int len = 0;
+
+	if (q_type == QDMA_DEV_Q_TYPE_CMPT) {
+		len += (((sizeof(eqdma_cmpt_ctxt_entries) /
+			sizeof(eqdma_cmpt_ctxt_entries[0])) + 1) *
+			REG_DUMP_SIZE_PER_LINE);
+	} else {
+		len += (((sizeof(eqdma_sw_ctxt_entries) /
+				sizeof(eqdma_sw_ctxt_entries[0])) + 1) *
+				REG_DUMP_SIZE_PER_LINE);
+
+		len += (((sizeof(eqdma_hw_ctxt_entries) /
+			sizeof(eqdma_hw_ctxt_entries[0])) + 1) *
+			REG_DUMP_SIZE_PER_LINE);
+
+		len += (((sizeof(eqdma_credit_ctxt_entries) /
+			sizeof(eqdma_credit_ctxt_entries[0])) + 1) *
+			REG_DUMP_SIZE_PER_LINE);
+
+		if (st && q_type == QDMA_DEV_Q_TYPE_C2H) {
+			len += (((sizeof(eqdma_cmpt_ctxt_entries) /
+				sizeof(eqdma_cmpt_ctxt_entries[0])) + 1) *
+				REG_DUMP_SIZE_PER_LINE);
+
+			len += (((sizeof(eqdma_c2h_pftch_ctxt_entries) /
+				sizeof(eqdma_c2h_pftch_ctxt_entries[0])) + 1) *
+				REG_DUMP_SIZE_PER_LINE);
+		}
+	}
+
+	*buflen = len;
+	return 0;
+}
+
+static uint32_t eqdma_intr_context_buf_len(void)
+{
+	uint32_t len = 0;
+
+	len += (((sizeof(eqdma_ind_intr_ctxt_entries) /
+			sizeof(eqdma_ind_intr_ctxt_entries[0])) + 1) *
+			REG_DUMP_SIZE_PER_LINE);
+	return len;
+}
+
+/*
+ * eqdma_indirect_reg_invalidate() - helper function to invalidate indirect
+ *					context registers.
+ *
+ * return -QDMA_ERR_HWACC_BUSY_TIMEOUT if register
+ *	value didn't match, QDMA_SUCCESS other wise
+ */
+static int eqdma_indirect_reg_invalidate(void *dev_hndl,
+		enum ind_ctxt_cmd_sel sel, uint16_t hw_qid)
+{
+	union qdma_ind_ctxt_cmd cmd;
+
+	qdma_reg_access_lock(dev_hndl);
+
+	/* set command register */
+	cmd.word = 0;
+	cmd.bits.qid = hw_qid;
+	cmd.bits.op = QDMA_CTXT_CMD_INV;
+	cmd.bits.sel = sel;
+	qdma_reg_write(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR, cmd.word);
+
+	/* check if the operation went through well */
+	if (hw_monitor_reg(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR,
+			IND_CTXT_CMD_BUSY_MASK, 0,
+			QDMA_REG_POLL_DFLT_INTERVAL_US,
+			QDMA_REG_POLL_DFLT_TIMEOUT_US)) {
+		qdma_reg_access_release(dev_hndl);
+		qdma_log_error("%s: hw_monitor_reg failed with err:%d\n",
+						__func__,
+					   -QDMA_ERR_HWACC_BUSY_TIMEOUT);
+		return -QDMA_ERR_HWACC_BUSY_TIMEOUT;
+	}
+
+	qdma_reg_access_release(dev_hndl);
+
+	return QDMA_SUCCESS;
+}
+
+/*
+ * eqdma_indirect_reg_clear() - helper function to clear indirect
+ *				context registers.
+ *
+ * return -QDMA_ERR_HWACC_BUSY_TIMEOUT if register
+ *	value didn't match, QDMA_SUCCESS other wise
+ */
+static int eqdma_indirect_reg_clear(void *dev_hndl,
+		enum ind_ctxt_cmd_sel sel, uint16_t hw_qid)
+{
+	union qdma_ind_ctxt_cmd cmd;
+
+	qdma_reg_access_lock(dev_hndl);
+
+	/* set command register */
+	cmd.word = 0;
+	cmd.bits.qid = hw_qid;
+	cmd.bits.op = QDMA_CTXT_CMD_CLR;
+	cmd.bits.sel = sel;
+	qdma_reg_write(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR, cmd.word);
+
+	/* check if the operation went through well */
+	if (hw_monitor_reg(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR,
+			IND_CTXT_CMD_BUSY_MASK, 0,
+			QDMA_REG_POLL_DFLT_INTERVAL_US,
+			QDMA_REG_POLL_DFLT_TIMEOUT_US)) {
+		qdma_reg_access_release(dev_hndl);
+		qdma_log_error("%s: hw_monitor_reg failed with err:%d\n",
+						__func__,
+					   -QDMA_ERR_HWACC_BUSY_TIMEOUT);
+		return -QDMA_ERR_HWACC_BUSY_TIMEOUT;
+	}
+
+	qdma_reg_access_release(dev_hndl);
+
+	return QDMA_SUCCESS;
+}
+
+/*
+ * eqdma_indirect_reg_read() - helper function to read indirect
+ *				context registers.
+ *
+ * return -QDMA_ERR_HWACC_BUSY_TIMEOUT if register
+ *	value didn't match, QDMA_SUCCESS other wise
+ */
+static int eqdma_indirect_reg_read(void *dev_hndl, enum ind_ctxt_cmd_sel sel,
+		uint16_t hw_qid, uint32_t cnt, uint32_t *data)
+{
+	uint32_t index = 0, reg_addr = EQDMA_IND_CTXT_DATA_ADDR;
+	union qdma_ind_ctxt_cmd cmd;
+
+	qdma_reg_access_lock(dev_hndl);
+
+	/* set command register */
+	cmd.word = 0;
+	cmd.bits.qid = hw_qid;
+	cmd.bits.op = QDMA_CTXT_CMD_RD;
+	cmd.bits.sel = sel;
+
+	qdma_reg_write(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR, cmd.word);
+
+	/* check if the operation went through well */
+	if (hw_monitor_reg(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR,
+			IND_CTXT_CMD_BUSY_MASK, 0,
+			QDMA_REG_POLL_DFLT_INTERVAL_US,
+			QDMA_REG_POLL_DFLT_TIMEOUT_US)) {
+		qdma_reg_access_release(dev_hndl);
+		qdma_log_error("%s: hw_monitor_reg failed with err:%d\n",
+						__func__,
+					   -QDMA_ERR_HWACC_BUSY_TIMEOUT);
+		return -QDMA_ERR_HWACC_BUSY_TIMEOUT;
+	}
+
+	for (index = 0; index < cnt; index++, reg_addr += sizeof(uint32_t))
+		data[index] = qdma_reg_read(dev_hndl, reg_addr);
+
+	qdma_reg_access_release(dev_hndl);
+
+	return QDMA_SUCCESS;
+}
+
+/*
+ * eqdma_indirect_reg_write() - helper function to write indirect
+ *				context registers.
+ *
+ * return -QDMA_ERR_HWACC_BUSY_TIMEOUT if register
+ *	value didn't match, QDMA_SUCCESS other wise
+ */
+static int eqdma_indirect_reg_write(void *dev_hndl, enum ind_ctxt_cmd_sel sel,
+		uint16_t hw_qid, uint32_t *data, uint16_t cnt)
+{
+	uint32_t index, reg_addr;
+	struct qdma_indirect_ctxt_regs regs;
+	uint32_t *wr_data = (uint32_t *)&regs;
+
+	qdma_reg_access_lock(dev_hndl);
+
+	/* write the context data */
+	for (index = 0; index < QDMA_IND_CTXT_DATA_NUM_REGS; index++) {
+		if (index < cnt)
+			regs.qdma_ind_ctxt_data[index] = data[index];
+		else
+			regs.qdma_ind_ctxt_data[index] = 0;
+		regs.qdma_ind_ctxt_mask[index] = 0xFFFFFFFF;
+	}
+
+	regs.cmd.word = 0;
+	regs.cmd.bits.qid = hw_qid;
+	regs.cmd.bits.op = QDMA_CTXT_CMD_WR;
+	regs.cmd.bits.sel = sel;
+	reg_addr = EQDMA_IND_CTXT_DATA_ADDR;
+
+	for (index = 0; index < ((2 * QDMA_IND_CTXT_DATA_NUM_REGS) + 1);
+		 index++, reg_addr += sizeof(uint32_t))
+		qdma_reg_write(dev_hndl, reg_addr, wr_data[index]);
+
+	/* check if the operation went through well */
+	if (hw_monitor_reg(dev_hndl, EQDMA_IND_CTXT_CMD_ADDR,
+			IND_CTXT_CMD_BUSY_MASK, 0,
+			QDMA_REG_POLL_DFLT_INTERVAL_US,
+			QDMA_REG_POLL_DFLT_TIMEOUT_US)) {
+		qdma_reg_access_release(dev_hndl);
+		qdma_log_error("%s: hw_monitor_reg failed with err:%d\n",
+						__func__,
+					   -QDMA_ERR_HWACC_BUSY_TIMEOUT);
+		return -QDMA_ERR_HWACC_BUSY_TIMEOUT;
+	}
+
+	qdma_reg_access_release(dev_hndl);
+
+	return QDMA_SUCCESS;
+}
+
+/*
+ * eqdma_fill_sw_ctxt() - Helper function to fill sw context into structure
+ *
+ */
+static void eqdma_fill_sw_ctxt(struct qdma_descq_sw_ctxt *sw_ctxt)
+{
+	int i = 0;
+
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->pidx;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->irq_arm;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->fnc_id;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->qen;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->frcd_en;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->wbi_chk;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->wbi_intvl_en;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->at;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->fetch_max;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->rngsz_idx;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->desc_sz;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->bypass;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->mm_chn;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->wbk_en;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->irq_en;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->port_id;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->irq_no_last;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->err;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->err_wb_sent;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->irq_req;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->mrkr_dis;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->is_mm;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->ring_bs_addr & 0xFFFFFFFF;
+	eqdma_sw_ctxt_entries[i++].value =
+		(sw_ctxt->ring_bs_addr >> 32) & 0xFFFFFFFF;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->vec;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->intr_aggr;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->dis_intr_on_vf;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->pack_byp_out;
+	eqdma_sw_ctxt_entries[i++].value = sw_ctxt->irq_byp;
+}
+
+/*
+ * eqdma_fill_cmpt_ctxt() - Helper function to fill completion context
+ *                         into structure
+ *
+ */
+static void eqdma_fill_cmpt_ctxt(struct qdma_descq_cmpt_ctxt *cmpt_ctxt)
+{
+	int i = 0;
+
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->en_stat_desc;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->en_int;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->trig_mode;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->fnc_id;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->counter_idx;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->timer_idx;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->in_st;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->color;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->ringsz_idx;
+	eqdma_cmpt_ctxt_entries[i++].value =
+		(uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK,
+				    cmpt_ctxt->bs_addr);
+	eqdma_cmpt_ctxt_entries[i++].value =
+		(uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK,
+				    cmpt_ctxt->bs_addr);
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->desc_sz;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->pidx;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->cidx;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->valid;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->err;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->user_trig_pend;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->timer_running;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->full_upd;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->ovf_chk_dis;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->at;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->vec;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->int_aggr;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->dis_intr_on_vf;
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->dir_c2h;
+	eqdma_cmpt_ctxt_entries[i++].value =
+		(uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_LOW_MASK,
+				    cmpt_ctxt->bs_addr);
+	eqdma_cmpt_ctxt_entries[i++].value = cmpt_ctxt->sh_cmpt;
+}
+
+/*
+ * eqdma_fill_hw_ctxt() - Helper function to fill HW context into structure
+ *
+ */
+static void eqdma_fill_hw_ctxt(struct qdma_descq_hw_ctxt *hw_ctxt)
+{
+	int i = 0;
+
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->cidx;
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->crd_use;
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->dsc_pend;
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->idl_stp_b;
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->evt_pnd;
+	eqdma_hw_ctxt_entries[i++].value = hw_ctxt->fetch_pnd;
+}
+
+/*
+ * eqdma_fill_credit_ctxt() - Helper function to fill Credit context
+ *                           into structure
+ *
+ */
+static void eqdma_fill_credit_ctxt(struct qdma_descq_credit_ctxt *cr_ctxt)
+{
+	eqdma_credit_ctxt_entries[0].value = cr_ctxt->credit;
+}
+
+/*
+ * eqdma_fill_pfetch_ctxt() - Helper function to fill Prefetch context
+ *                           into structure
+ *
+ */
+static void eqdma_fill_pfetch_ctxt(struct qdma_descq_prefetch_ctxt
+		*pfetch_ctxt)
+{
+	int i = 0;
+
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->bypass;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->bufsz_idx;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->port_id;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->var_desc;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->num_pftch;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->err;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->pfch_en;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->pfch;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->sw_crdt;
+	eqdma_c2h_pftch_ctxt_entries[i++].value = pfetch_ctxt->valid;
+}
+
+/*
+ * eqdma_fill_intr_ctxt() - Helper function to fill interrupt context
+ *                           into structure
+ *
+ */
+static void eqdma_fill_intr_ctxt(struct qdma_indirect_intr_ctxt *intr_ctxt)
+{
+	int i = 0;
+
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->valid;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->vec;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->int_st;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->color;
+	eqdma_ind_intr_ctxt_entries[i++].value =
+			intr_ctxt->baddr_4k & 0xFFFFFFFF;
+	eqdma_ind_intr_ctxt_entries[i++].value =
+			(intr_ctxt->baddr_4k >> 32) & 0xFFFFFFFF;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->page_size;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->pidx;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->at;
+	eqdma_ind_intr_ctxt_entries[i++].value = intr_ctxt->func_id;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_set_default_global_csr() - function to set the global CSR register to
+ * default values. The value can be modified later by using the set/get csr
+ * functions
+ *
+ * @dev_hndl:	device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_set_default_global_csr(void *dev_hndl)
+{
+	/* Default values */
+	uint32_t cfg_val = 0, reg_val = 0;
+	uint32_t rng_sz[QDMA_NUM_RING_SIZES] = {2049, 65, 129, 193, 257, 385,
+		513, 769, 1025, 1537, 3073, 4097, 6145, 8193, 12289, 16385};
+	uint32_t tmr_cnt[QDMA_NUM_C2H_TIMERS] = {1, 2, 4, 5, 8, 10, 15, 20, 25,
+		30, 50, 75, 100, 125, 150, 200};
+	uint32_t cnt_th[QDMA_NUM_C2H_COUNTERS] = {2, 4, 8, 16, 24, 32, 48, 64,
+		80, 96, 112, 128, 144, 160, 176, 192};
+	uint32_t buf_sz[QDMA_NUM_C2H_BUFFER_SIZES] = {4096, 256, 512, 1024,
+		2048, 3968, 4096, 4096, 4096, 4096, 4096, 4096, 8192, 9018,
+		16384, 65535};
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	/* Configuring CSR registers */
+	/* Global ring sizes */
+	qdma_write_csr_values(dev_hndl, EQDMA_GLBL_RNG_SZ_1_ADDR, 0,
+			QDMA_NUM_RING_SIZES, rng_sz);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		/* Counter thresholds */
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_CNT_TH_ADDR, 0,
+				QDMA_NUM_C2H_COUNTERS, cnt_th);
+
+		/* Timer Counters */
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_TIMER_CNT_ADDR, 0,
+				QDMA_NUM_C2H_TIMERS, tmr_cnt);
+
+
+		/* Writeback Interval */
+		reg_val =
+			FIELD_SET(GLBL_DSC_CFG_MAXFETCH_MASK,
+					DEFAULT_MAX_DSC_FETCH) |
+			FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK,
+					DEFAULT_WRB_INT);
+		qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val);
+	}
+
+	if (dev_cap.st_en) {
+		/* Buffer Sizes */
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_BUF_SZ_ADDR, 0,
+				QDMA_NUM_C2H_BUFFER_SIZES, buf_sz);
+
+		/* Prefetch Configuration */
+
+		cfg_val = qdma_reg_read(dev_hndl,
+				EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR);
+
+		reg_val =
+			FIELD_SET(C2H_PFCH_CFG_FL_TH_MASK,
+					DEFAULT_PFCH_STOP_THRESH);
+		qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_ADDR, reg_val);
+
+		reg_val = FIELD_SET(C2H_PFCH_CFG_1_QCNT_MASK, (cfg_val >> 1)) |
+				  FIELD_SET(C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK,
+						((cfg_val >> 1) - 2));
+		qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_1_ADDR, reg_val);
+
+		reg_val = FIELD_SET(C2H_PFCH_CFG_2_NUM_MASK,
+					DEFAULT_PFCH_NUM_ENTRIES_PER_Q);
+
+		qdma_reg_write(dev_hndl, EQDMA_C2H_PFCH_CFG_2_ADDR, reg_val);
+
+		/* C2H interrupt timer tick */
+		qdma_reg_write(dev_hndl, EQDMA_C2H_INT_TIMER_TICK_ADDR,
+				DEFAULT_C2H_INTR_TIMER_TICK);
+
+		/* C2h Completion Coalesce Configuration */
+		cfg_val = qdma_reg_read(dev_hndl,
+				EQDMA_C2H_WRB_COAL_BUF_DEPTH_ADDR);
+		reg_val =
+			FIELD_SET(C2H_WRB_COAL_CFG_TICK_CNT_MASK,
+					DEFAULT_CMPT_COAL_TIMER_CNT) |
+			FIELD_SET(C2H_WRB_COAL_CFG_TICK_VAL_MASK,
+					DEFAULT_CMPT_COAL_TIMER_TICK) |
+			FIELD_SET(C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK, cfg_val);
+		qdma_reg_write(dev_hndl, EQDMA_C2H_WRB_COAL_CFG_ADDR, reg_val);
+
+		/* H2C throttle Configuration */
+
+		reg_val =
+			FIELD_SET(H2C_REQ_THROT_PCIE_DATA_THRESH_MASK,
+					EQDMA_H2C_THROT_DATA_THRESH) |
+			FIELD_SET(H2C_REQ_THROT_PCIE_EN_DATA_MASK,
+					EQDMA_THROT_EN_DATA) |
+			FIELD_SET(H2C_REQ_THROT_PCIE_MASK,
+					EQDMA_H2C_THROT_REQ_THRESH) |
+			FIELD_SET(H2C_REQ_THROT_PCIE_EN_REQ_MASK,
+					EQDMA_THROT_EN_REQ);
+		qdma_reg_write(dev_hndl, EQDMA_H2C_REQ_THROT_PCIE_ADDR,
+			reg_val);
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*
+ * dump_eqdma_context() - Helper function to dump queue context into string
+ *
+ * return len - length of the string copied into buffer
+ */
+static int dump_eqdma_context(struct qdma_descq_context *queue_context,
+		uint8_t st,	enum qdma_dev_q_type q_type,
+		char *buf, int buf_sz)
+{
+	int i = 0;
+	int n;
+	int len = 0;
+	int rv;
+	char banner[DEBGFS_LINE_SZ];
+
+	if (queue_context == NULL) {
+		qdma_log_error("%s: queue_context is NULL, err:%d\n",
+						__func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (q_type == QDMA_DEV_Q_TYPE_CMPT) {
+		eqdma_fill_cmpt_ctxt(&queue_context->cmpt_ctxt);
+	} else if (q_type == QDMA_DEV_Q_TYPE_H2C) {
+		eqdma_fill_sw_ctxt(&queue_context->sw_ctxt);
+		eqdma_fill_hw_ctxt(&queue_context->hw_ctxt);
+		eqdma_fill_credit_ctxt(&queue_context->cr_ctxt);
+	} else if (q_type == QDMA_DEV_Q_TYPE_C2H) {
+		eqdma_fill_sw_ctxt(&queue_context->sw_ctxt);
+		eqdma_fill_hw_ctxt(&queue_context->hw_ctxt);
+		eqdma_fill_credit_ctxt(&queue_context->cr_ctxt);
+		if (st) {
+			eqdma_fill_pfetch_ctxt(&queue_context->pfetch_ctxt);
+			eqdma_fill_cmpt_ctxt(&queue_context->cmpt_ctxt);
+		}
+	}
+
+	if (q_type != QDMA_DEV_Q_TYPE_CMPT) {
+		for (i = 0; i < DEBGFS_LINE_SZ - 5; i++) {
+			rv = QDMA_SNPRINTF_S(banner + i,
+				(DEBGFS_LINE_SZ - i),
+				sizeof("-"), "-");
+			if (rv < 0 || rv > (int)sizeof("-")) {
+				qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+		}
+
+		/* SW context dump */
+		n = sizeof(eqdma_sw_ctxt_entries) /
+				sizeof((eqdma_sw_ctxt_entries)[0]);
+		for (i = 0; i < n; i++) {
+			if (len >= buf_sz ||
+			    ((len + DEBGFS_LINE_SZ) >= buf_sz))
+				goto INSUF_BUF_EXIT;
+
+			if (i == 0) {
+				if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+					goto INSUF_BUF_EXIT;
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+						     DEBGFS_LINE_SZ, "\n%s",
+						     banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%40s", "SW Context");
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s\n", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+			}
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ,
+				"%-47s %#-10x %u\n",
+				eqdma_sw_ctxt_entries[i].name,
+				eqdma_sw_ctxt_entries[i].value,
+				eqdma_sw_ctxt_entries[i].value);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+
+		/* HW context dump */
+		n = sizeof(eqdma_hw_ctxt_entries) /
+				sizeof((eqdma_hw_ctxt_entries)[0]);
+		for (i = 0; i < n; i++) {
+			if (len >= buf_sz ||
+			    ((len + DEBGFS_LINE_SZ) >= buf_sz))
+				goto INSUF_BUF_EXIT;
+
+			if (i == 0) {
+				if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+					goto INSUF_BUF_EXIT;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%40s", "HW Context");
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s\n", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+			}
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ,
+				"%-47s %#-10x %u\n",
+				eqdma_hw_ctxt_entries[i].name,
+				eqdma_hw_ctxt_entries[i].value,
+				eqdma_hw_ctxt_entries[i].value);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+
+		/* Credit context dump */
+		n = sizeof(eqdma_credit_ctxt_entries) /
+			sizeof((eqdma_credit_ctxt_entries)[0]);
+		for (i = 0; i < n; i++) {
+			if (len >= buf_sz ||
+				((len + DEBGFS_LINE_SZ) >= buf_sz))
+				goto INSUF_BUF_EXIT;
+
+			if (i == 0) {
+				if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+					goto INSUF_BUF_EXIT;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%40s",
+					"Credit Context");
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s\n", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+			}
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ,
+				"%-47s %#-10x %u\n",
+				eqdma_credit_ctxt_entries[i].name,
+				eqdma_credit_ctxt_entries[i].value,
+				eqdma_credit_ctxt_entries[i].value);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+	}
+
+	if (q_type == QDMA_DEV_Q_TYPE_CMPT ||
+			(st && q_type == QDMA_DEV_Q_TYPE_C2H)) {
+		/* Completion context dump */
+		n = sizeof(eqdma_cmpt_ctxt_entries) /
+				sizeof((eqdma_cmpt_ctxt_entries)[0]);
+		for (i = 0; i < n; i++) {
+			if (len >= buf_sz ||
+			    ((len + DEBGFS_LINE_SZ) >= buf_sz))
+				goto INSUF_BUF_EXIT;
+
+			if (i == 0) {
+				if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+					goto INSUF_BUF_EXIT;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%40s",
+					"Completion Context");
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s\n", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+			}
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ,
+				"%-47s %#-10x %u\n",
+				eqdma_cmpt_ctxt_entries[i].name,
+				eqdma_cmpt_ctxt_entries[i].value,
+				eqdma_cmpt_ctxt_entries[i].value);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+	}
+
+	if (st && q_type == QDMA_DEV_Q_TYPE_C2H) {
+		/* Prefetch context dump */
+		n = sizeof(eqdma_c2h_pftch_ctxt_entries) /
+			sizeof(eqdma_c2h_pftch_ctxt_entries[0]);
+		for (i = 0; i < n; i++) {
+			if (len >= buf_sz ||
+				((len + DEBGFS_LINE_SZ) >= buf_sz))
+				goto INSUF_BUF_EXIT;
+
+			if (i == 0) {
+				if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+					goto INSUF_BUF_EXIT;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%40s",
+					"Prefetch Context");
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+
+				rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+					DEBGFS_LINE_SZ, "\n%s\n", banner);
+				if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+					qdma_log_error
+						("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+						__LINE__, __func__,
+						rv);
+					goto INSUF_BUF_EXIT;
+				}
+				len += rv;
+			}
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ,
+				"%-47s %#-10x %u\n",
+				eqdma_c2h_pftch_ctxt_entries[i].name,
+				eqdma_c2h_pftch_ctxt_entries[i].value,
+				eqdma_c2h_pftch_ctxt_entries[i].value);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+	}
+
+	return len;
+
+INSUF_BUF_EXIT:
+	if (buf_sz > DEBGFS_LINE_SZ) {
+		rv = QDMA_SNPRINTF_S((buf + buf_sz - DEBGFS_LINE_SZ),
+			buf_sz, DEBGFS_LINE_SZ,
+			"\n\nInsufficient buffer size, partial context dump\n");
+		if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+			qdma_log_error
+				("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+				__LINE__, __func__,
+				rv);
+		}
+	}
+
+	qdma_log_error("%s: Insufficient buffer size, err:%d\n",
+		__func__, -QDMA_ERR_NO_MEM);
+
+	return -QDMA_ERR_NO_MEM;
+}
+
+/*
+ * dump_eqdma_intr_context() - Helper function to dump interrupt context into
+ * string
+ *
+ * return len - length of the string copied into buffer
+ */
+static int dump_eqdma_intr_context(struct qdma_indirect_intr_ctxt *intr_ctx,
+		int ring_index,
+		char *buf, int buf_sz)
+{
+	int i = 0;
+	int n;
+	int len = 0;
+	int rv;
+	char banner[DEBGFS_LINE_SZ];
+
+	eqdma_fill_intr_ctxt(intr_ctx);
+
+	for (i = 0; i < DEBGFS_LINE_SZ - 5; i++) {
+		rv = QDMA_SNPRINTF_S(banner + i,
+			(DEBGFS_LINE_SZ - i),
+			sizeof("-"), "-");
+		if (rv < 0 || rv > (int)sizeof("-")) {
+			qdma_log_error
+				("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+				__LINE__, __func__,
+				rv);
+			goto INSUF_BUF_EXIT;
+		}
+	}
+
+	/* Interrupt context dump */
+	n = sizeof(eqdma_ind_intr_ctxt_entries) /
+			sizeof((eqdma_ind_intr_ctxt_entries)[0]);
+	for (i = 0; i < n; i++) {
+		if (len >= buf_sz || ((len + DEBGFS_LINE_SZ) >= buf_sz))
+			goto INSUF_BUF_EXIT;
+
+		if (i == 0) {
+			if ((len + (3 * DEBGFS_LINE_SZ)) >= buf_sz)
+				goto INSUF_BUF_EXIT;
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ, "\n%s", banner);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ, "\n%50s %d",
+				"Interrupt Context for ring#", ring_index);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+
+			rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len),
+				DEBGFS_LINE_SZ, "\n%s\n", banner);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				goto INSUF_BUF_EXIT;
+			}
+			len += rv;
+		}
+
+		rv = QDMA_SNPRINTF_S(buf + len, (buf_sz - len), DEBGFS_LINE_SZ,
+			"%-47s %#-10x %u\n",
+			eqdma_ind_intr_ctxt_entries[i].name,
+			eqdma_ind_intr_ctxt_entries[i].value,
+			eqdma_ind_intr_ctxt_entries[i].value);
+		if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+			qdma_log_error
+				("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+				__LINE__, __func__,
+				rv);
+			goto INSUF_BUF_EXIT;
+		}
+		len += rv;
+	}
+
+	return len;
+
+INSUF_BUF_EXIT:
+	if (buf_sz > DEBGFS_LINE_SZ) {
+		rv = QDMA_SNPRINTF_S((buf + buf_sz - DEBGFS_LINE_SZ),
+			buf_sz, DEBGFS_LINE_SZ,
+			"\n\nInsufficient buffer size, partial context dump\n");
+		if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+			qdma_log_error
+				("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+				__LINE__, __func__,
+				rv);
+		}
+	}
+
+	qdma_log_error("%s: Insufficient buffer size, err:%d\n",
+		__func__, -QDMA_ERR_NO_MEM);
+
+	return -QDMA_ERR_NO_MEM;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_get_version() - Function to get the eqdma version
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ * @version_info:	pointer to hold the version info
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_get_version(void *dev_hndl, uint8_t is_vf,
+		struct qdma_hw_version_info *version_info)
+{
+	uint32_t reg_val = 0;
+	uint32_t reg_addr = (is_vf) ? EQDMA_OFFSET_VF_VERSION :
+			EQDMA_GLBL2_MISC_CAP_ADDR;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	reg_val = qdma_reg_read(dev_hndl, reg_addr);
+
+	qdma_fetch_version_details(is_vf, reg_val, version_info);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_sw_context_write() - create sw context and program it
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the SW context data strucutre
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_sw_context_write(void *dev_hndl, uint8_t c2h,
+			 uint16_t hw_qid,
+			 const struct qdma_descq_sw_ctxt *ctxt)
+{
+	uint32_t sw_ctxt[EQDMA_SW_CONTEXT_NUM_WORDS] = {0};
+	uint16_t num_words_count = 0;
+	uint32_t pasid_l, pasid_h;
+	uint32_t virtio_desc_base_l, virtio_desc_base_m, virtio_desc_base_h;
+	enum ind_ctxt_cmd_sel sel = c2h ?
+			QDMA_CTXT_SEL_SW_C2H : QDMA_CTXT_SEL_SW_H2C;
+
+	/* Input args check */
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle=%p sw_ctxt=%p NULL, err:%d\n",
+					   __func__, dev_hndl, ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	pasid_l =
+		FIELD_GET(EQDMA_SW_CTXT_PASID_GET_L_MASK, ctxt->pasid);
+	pasid_h =
+		FIELD_GET(EQDMA_SW_CTXT_PASID_GET_H_MASK, ctxt->pasid);
+
+	virtio_desc_base_l = (uint32_t)FIELD_GET
+		(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_L_MASK,
+		ctxt->virtio_dsc_base);
+	virtio_desc_base_m = (uint32_t)FIELD_GET
+		(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_M_MASK,
+		ctxt->virtio_dsc_base);
+	virtio_desc_base_h = (uint32_t)FIELD_GET
+		(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_H_MASK,
+		ctxt->virtio_dsc_base);
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W0_PIDX_MASK, ctxt->pidx) |
+		FIELD_SET(SW_IND_CTXT_DATA_W0_IRQ_ARM_MASK, ctxt->irq_arm) |
+		FIELD_SET(SW_IND_CTXT_DATA_W0_FNC_MASK, ctxt->fnc_id);
+
+	qdma_log_debug("%s: pidx=%x, irq_arm=%x, fnc_id=%x\n",
+			 __func__, ctxt->pidx, ctxt->irq_arm, ctxt->fnc_id);
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W1_QEN_MASK, ctxt->qen) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_FCRD_EN_MASK, ctxt->frcd_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_WBI_CHK_MASK, ctxt->wbi_chk) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_WBI_INTVL_EN_MASK,
+				  ctxt->wbi_intvl_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_AT_MASK, ctxt->at) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_FETCH_MAX_MASK, ctxt->fetch_max) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_RNG_SZ_MASK, ctxt->rngsz_idx) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_DSC_SZ_MASK, ctxt->desc_sz) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_BYPASS_MASK, ctxt->bypass) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_MM_CHN_MASK, ctxt->mm_chn) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_WBK_EN_MASK, ctxt->wbk_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_IRQ_EN_MASK, ctxt->irq_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_PORT_ID_MASK, ctxt->port_id) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_IRQ_NO_LAST_MASK,
+			ctxt->irq_no_last) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_ERR_MASK, ctxt->err) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_ERR_WB_SENT_MASK,
+			ctxt->err_wb_sent) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_IRQ_REQ_MASK, ctxt->irq_req) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_MRKR_DIS_MASK, ctxt->mrkr_dis) |
+		FIELD_SET(SW_IND_CTXT_DATA_W1_IS_MM_MASK, ctxt->is_mm);
+
+	qdma_log_debug("%s: qen=%x, frcd_en=%x, wbi_chk=%x, wbi_intvl_en=%x\n",
+			 __func__, ctxt->qen, ctxt->frcd_en, ctxt->wbi_chk,
+			ctxt->wbi_intvl_en);
+
+	qdma_log_debug("%s: at=%x, fetch_max=%x, rngsz_idx=%x, desc_sz=%x\n",
+			__func__, ctxt->at, ctxt->fetch_max, ctxt->rngsz_idx,
+			ctxt->desc_sz);
+
+	qdma_log_debug("%s: bypass=%x, mm_chn=%x, wbk_en=%x, irq_en=%x\n",
+			__func__, ctxt->bypass, ctxt->mm_chn, ctxt->wbk_en,
+			ctxt->irq_en);
+
+	qdma_log_debug("%s: port_id=%x, irq_no_last=%x,err=%x",
+			__func__, ctxt->port_id, ctxt->irq_no_last, ctxt->err);
+	qdma_log_debug(", err_wb_sent=%x\n", ctxt->err_wb_sent);
+
+	qdma_log_debug("%s: irq_req=%x, mrkr_dis=%x, is_mm=%x\n",
+			__func__, ctxt->irq_req, ctxt->mrkr_dis, ctxt->is_mm);
+
+	sw_ctxt[num_words_count++] = ctxt->ring_bs_addr & 0xffffffff;
+	sw_ctxt[num_words_count++] = (ctxt->ring_bs_addr >> 32) & 0xffffffff;
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W4_VEC_MASK, ctxt->vec) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_INT_AGGR_MASK, ctxt->intr_aggr) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK,
+				ctxt->dis_intr_on_vf) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_VIRTIO_EN_MASK,
+				ctxt->virtio_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_PACK_BYP_OUT_MASK,
+				ctxt->pack_byp_out) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_IRQ_BYP_MASK, ctxt->irq_byp) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_HOST_ID_MASK, ctxt->host_id) |
+		FIELD_SET(SW_IND_CTXT_DATA_W4_PASID_L_MASK, pasid_l);
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W5_PASID_H_MASK, pasid_h) |
+		FIELD_SET(SW_IND_CTXT_DATA_W5_PASID_EN_MASK, ctxt->pasid_en) |
+		FIELD_SET(SW_IND_CTXT_DATA_W5_VIRTIO_DSC_BASE_L_MASK,
+				virtio_desc_base_l);
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W6_VIRTIO_DSC_BASE_M_MASK,
+				virtio_desc_base_m);
+
+	sw_ctxt[num_words_count++] =
+		FIELD_SET(SW_IND_CTXT_DATA_W7_VIRTIO_DSC_BASE_H_MASK,
+				virtio_desc_base_h);
+
+
+	qdma_log_debug("%s: vec=%x, intr_aggr=%x\n",
+			__func__, ctxt->vec, ctxt->intr_aggr);
+
+	return eqdma_indirect_reg_write(dev_hndl, sel, hw_qid,
+			sw_ctxt, num_words_count);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_sw_context_read() - read sw context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the output context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_sw_context_read(void *dev_hndl, uint8_t c2h,
+			 uint16_t hw_qid,
+			 struct qdma_descq_sw_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t sw_ctxt[EQDMA_SW_CONTEXT_NUM_WORDS] = {0};
+	uint32_t pasid_l, pasid_h;
+	uint32_t virtio_desc_base_l, virtio_desc_base_m, virtio_desc_base_h;
+	enum ind_ctxt_cmd_sel sel = c2h ?
+			QDMA_CTXT_SEL_SW_C2H : QDMA_CTXT_SEL_SW_H2C;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle=%p sw_ctxt=%p NULL, err:%d\n",
+					   __func__, dev_hndl, ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, hw_qid,
+			EQDMA_SW_CONTEXT_NUM_WORDS, sw_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->pidx = FIELD_GET(SW_IND_CTXT_DATA_W0_PIDX_MASK, sw_ctxt[0]);
+	ctxt->irq_arm =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W0_IRQ_ARM_MASK,
+				sw_ctxt[0]));
+	ctxt->fnc_id =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W0_FNC_MASK,
+				sw_ctxt[0]));
+
+	qdma_log_debug("%s: pidx=%x, irq_arm=%x, fnc_id=%x",
+			 __func__, ctxt->pidx, ctxt->irq_arm, ctxt->fnc_id);
+
+	ctxt->qen = FIELD_GET(SW_IND_CTXT_DATA_W1_QEN_MASK, sw_ctxt[1]);
+	ctxt->frcd_en = FIELD_GET(SW_IND_CTXT_DATA_W1_FCRD_EN_MASK, sw_ctxt[1]);
+	ctxt->wbi_chk = FIELD_GET(SW_IND_CTXT_DATA_W1_WBI_CHK_MASK, sw_ctxt[1]);
+	ctxt->wbi_intvl_en =
+		FIELD_GET(SW_IND_CTXT_DATA_W1_WBI_INTVL_EN_MASK, sw_ctxt[1]);
+	ctxt->at = FIELD_GET(SW_IND_CTXT_DATA_W1_AT_MASK, sw_ctxt[1]);
+	ctxt->fetch_max =
+		(uint8_t)FIELD_GET(SW_IND_CTXT_DATA_W1_FETCH_MAX_MASK,
+				sw_ctxt[1]);
+	ctxt->rngsz_idx =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_RNG_SZ_MASK,
+				sw_ctxt[1]));
+	ctxt->desc_sz =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_DSC_SZ_MASK,
+				sw_ctxt[1]));
+	ctxt->bypass =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_BYPASS_MASK,
+				sw_ctxt[1]));
+	ctxt->mm_chn =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_MM_CHN_MASK,
+				sw_ctxt[1]));
+	ctxt->wbk_en =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_WBK_EN_MASK,
+				sw_ctxt[1]));
+	ctxt->irq_en =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_IRQ_EN_MASK,
+				sw_ctxt[1]));
+	ctxt->port_id =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_PORT_ID_MASK,
+				sw_ctxt[1]));
+	ctxt->irq_no_last =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_IRQ_NO_LAST_MASK,
+			sw_ctxt[1]));
+	ctxt->err =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_ERR_MASK, sw_ctxt[1]));
+	ctxt->err_wb_sent =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_ERR_WB_SENT_MASK,
+			sw_ctxt[1]));
+	ctxt->irq_req =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_IRQ_REQ_MASK,
+				sw_ctxt[1]));
+	ctxt->mrkr_dis =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_MRKR_DIS_MASK,
+				sw_ctxt[1]));
+	ctxt->is_mm =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W1_IS_MM_MASK,
+				sw_ctxt[1]));
+
+	qdma_log_debug("%s: qen=%x, frcd_en=%x, wbi_chk=%x, wbi_intvl_en=%x\n",
+			 __func__, ctxt->qen, ctxt->frcd_en, ctxt->wbi_chk,
+			ctxt->wbi_intvl_en);
+	qdma_log_debug("%s: at=%x, fetch_max=%x, rngsz_idx=%x, desc_sz=%x\n",
+			__func__, ctxt->at, ctxt->fetch_max, ctxt->rngsz_idx,
+			ctxt->desc_sz);
+	qdma_log_debug("%s: bypass=%x, mm_chn=%x, wbk_en=%x, irq_en=%x\n",
+			__func__, ctxt->bypass, ctxt->mm_chn, ctxt->wbk_en,
+			ctxt->irq_en);
+	qdma_log_debug("%s: port_id=%x, irq_no_last=%x,",
+			__func__, ctxt->port_id, ctxt->irq_no_last);
+	qdma_log_debug(" err=%x, err_wb_sent=%x\n",
+			ctxt->err, ctxt->err_wb_sent);
+	qdma_log_debug("%s: irq_req=%x, mrkr_dis=%x, is_mm=%x\n",
+			__func__, ctxt->irq_req, ctxt->mrkr_dis, ctxt->is_mm);
+
+	ctxt->ring_bs_addr = ((uint64_t)sw_ctxt[3] << 32) | (sw_ctxt[2]);
+
+	ctxt->vec = FIELD_GET(SW_IND_CTXT_DATA_W4_VEC_MASK, sw_ctxt[4]);
+	ctxt->intr_aggr = (uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_INT_AGGR_MASK,
+			sw_ctxt[4]));
+	ctxt->dis_intr_on_vf =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK,
+				sw_ctxt[4]));
+	ctxt->virtio_en =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_VIRTIO_EN_MASK,
+				sw_ctxt[4]));
+	ctxt->pack_byp_out =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_PACK_BYP_OUT_MASK,
+				sw_ctxt[4]));
+	ctxt->irq_byp =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_IRQ_BYP_MASK,
+				sw_ctxt[4]));
+	ctxt->host_id =
+		(uint8_t)(FIELD_GET(SW_IND_CTXT_DATA_W4_HOST_ID_MASK,
+				sw_ctxt[4]));
+	pasid_l = FIELD_GET(SW_IND_CTXT_DATA_W4_PASID_L_MASK, sw_ctxt[4]);
+
+	pasid_h = FIELD_GET(SW_IND_CTXT_DATA_W5_PASID_H_MASK, sw_ctxt[5]);
+	ctxt->pasid_en = (uint8_t)FIELD_GET(SW_IND_CTXT_DATA_W5_PASID_EN_MASK,
+			sw_ctxt[5]);
+	virtio_desc_base_l =
+		FIELD_GET(SW_IND_CTXT_DATA_W5_VIRTIO_DSC_BASE_L_MASK,
+				sw_ctxt[5]);
+	virtio_desc_base_m =
+		FIELD_GET(SW_IND_CTXT_DATA_W6_VIRTIO_DSC_BASE_M_MASK,
+				sw_ctxt[6]);
+
+	virtio_desc_base_h =
+		FIELD_GET(SW_IND_CTXT_DATA_W7_VIRTIO_DSC_BASE_H_MASK,
+				sw_ctxt[6]);
+
+	ctxt->pasid =
+			FIELD_SET(EQDMA_SW_CTXT_PASID_GET_L_MASK, pasid_l) |
+			FIELD_SET(EQDMA_SW_CTXT_PASID_GET_H_MASK, pasid_h);
+
+	ctxt->virtio_dsc_base =
+			FIELD_SET(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_L_MASK,
+					(uint64_t)virtio_desc_base_l) |
+			FIELD_SET(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_M_MASK,
+					(uint64_t)virtio_desc_base_m) |
+			FIELD_SET(EQDMA_SW_CTXT_VIRTIO_DSC_BASE_GET_H_MASK,
+					(uint64_t)virtio_desc_base_h);
+
+	qdma_log_debug("%s: vec=%x, intr_aggr=%x\n",
+			__func__, ctxt->vec, ctxt->intr_aggr);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_sw_context_clear() - clear sw context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_sw_context_clear(void *dev_hndl, uint8_t c2h,
+			  uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ?
+			QDMA_CTXT_SEL_SW_C2H : QDMA_CTXT_SEL_SW_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_sw_context_invalidate() - invalidate sw context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_sw_context_invalidate(void *dev_hndl, uint8_t c2h,
+		uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ?
+			QDMA_CTXT_SEL_SW_C2H : QDMA_CTXT_SEL_SW_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_sw_ctx_conf() - configure SW context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_sw_ctx_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+				struct qdma_descq_sw_ctxt *ctxt,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_sw_context_read(dev_hndl, c2h, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+		rv = eqdma_sw_context_write(dev_hndl, c2h, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_sw_context_clear(dev_hndl, c2h, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_sw_context_invalidate(dev_hndl, c2h, hw_qid);
+		break;
+	default:
+		qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+						__func__,
+						access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_pfetch_context_write() - create prefetch context and program it
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the prefetch context data strucutre
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_pfetch_context_write(void *dev_hndl, uint16_t hw_qid,
+		const struct qdma_descq_prefetch_ctxt *ctxt)
+{
+	uint32_t pfetch_ctxt[EQDMA_PFETCH_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_PFTCH;
+	uint32_t sw_crdt_l, sw_crdt_h;
+	uint16_t num_words_count = 0;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle or pfetch ctxt NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	sw_crdt_l =
+		FIELD_GET(QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK, ctxt->sw_crdt);
+	sw_crdt_h =
+		FIELD_GET(QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK, ctxt->sw_crdt);
+
+	qdma_log_debug("%s: sw_crdt_l=%u, sw_crdt_h=%u, hw_qid=%u\n",
+			 __func__, sw_crdt_l, sw_crdt_h, hw_qid);
+
+	pfetch_ctxt[num_words_count++] =
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_BYPASS_MASK, ctxt->bypass) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_BUF_SZ_IDX_MASK,
+				ctxt->bufsz_idx) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_PORT_ID_MASK, ctxt->port_id) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_NUM_PFCH_MASK,
+				ctxt->num_pftch) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_VAR_DESC_MASK,
+				ctxt->var_desc) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_ERR_MASK, ctxt->err) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_PFCH_EN_MASK, ctxt->pfch_en) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_PFCH_MASK, ctxt->pfch) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W0_SW_CRDT_L_MASK, sw_crdt_l);
+
+	qdma_log_debug("%s: bypass=%x, bufsz_idx=%x, port_id=%x\n",
+			__func__, ctxt->bypass, ctxt->bufsz_idx, ctxt->port_id);
+	qdma_log_debug("%s: err=%x, pfch_en=%x, pfch=%x, ctxt->valid=%x\n",
+			__func__, ctxt->err, ctxt->pfch_en, ctxt->pfch,
+			ctxt->valid);
+
+	pfetch_ctxt[num_words_count++] =
+		FIELD_SET(PREFETCH_CTXT_DATA_W1_SW_CRDT_H_MASK, sw_crdt_h) |
+		FIELD_SET(PREFETCH_CTXT_DATA_W1_VALID_MASK, ctxt->valid);
+
+	return eqdma_indirect_reg_write(dev_hndl, sel, hw_qid,
+			pfetch_ctxt, num_words_count);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_pfetch_context_read() - read prefetch context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the output context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_pfetch_context_read(void *dev_hndl, uint16_t hw_qid,
+		struct qdma_descq_prefetch_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t pfetch_ctxt[EQDMA_PFETCH_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_PFTCH;
+	uint32_t sw_crdt_l, sw_crdt_h;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle or pfetch ctxt NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, hw_qid,
+			EQDMA_PFETCH_CONTEXT_NUM_WORDS, pfetch_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->bypass =
+		FIELD_GET(PREFETCH_CTXT_DATA_W0_BYPASS_MASK, pfetch_ctxt[0]);
+	ctxt->bufsz_idx =
+		FIELD_GET(PREFETCH_CTXT_DATA_W0_BUF_SZ_IDX_MASK,
+			pfetch_ctxt[0]);
+	ctxt->num_pftch = (uint16_t)FIELD_GET
+			(PREFETCH_CTXT_DATA_W0_NUM_PFCH_MASK, pfetch_ctxt[0]);
+	ctxt->port_id =
+		FIELD_GET(PREFETCH_CTXT_DATA_W0_PORT_ID_MASK, pfetch_ctxt[0]);
+	ctxt->var_desc = (uint8_t)
+		FIELD_GET(PREFETCH_CTXT_DATA_W0_VAR_DESC_MASK,
+				pfetch_ctxt[0]);
+	ctxt->err =
+		(uint8_t)(FIELD_GET(PREFETCH_CTXT_DATA_W0_ERR_MASK,
+			pfetch_ctxt[0]));
+	ctxt->pfch_en =
+		(uint8_t)(FIELD_GET(PREFETCH_CTXT_DATA_W0_PFCH_EN_MASK,
+			pfetch_ctxt[0]));
+	ctxt->pfch =
+		(uint8_t)(FIELD_GET(PREFETCH_CTXT_DATA_W0_PFCH_MASK,
+				pfetch_ctxt[0]));
+	sw_crdt_l =
+		FIELD_GET(PREFETCH_CTXT_DATA_W0_SW_CRDT_L_MASK, pfetch_ctxt[0]);
+
+	sw_crdt_h =
+		FIELD_GET(PREFETCH_CTXT_DATA_W1_SW_CRDT_H_MASK, pfetch_ctxt[1]);
+	ctxt->valid =
+		(uint8_t)(FIELD_GET(PREFETCH_CTXT_DATA_W1_VALID_MASK,
+			pfetch_ctxt[1]));
+
+	ctxt->sw_crdt =
+		FIELD_SET(QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK, sw_crdt_l) |
+		FIELD_SET(QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK, sw_crdt_h);
+
+	qdma_log_debug("%s: sw_crdt_l=%u, sw_crdt_h=%u, hw_qid=%u\n",
+			 __func__, sw_crdt_l, sw_crdt_h, hw_qid);
+	qdma_log_debug("%s: bypass=%x, bufsz_idx=%x, port_id=%x\n",
+			__func__, ctxt->bypass, ctxt->bufsz_idx, ctxt->port_id);
+	qdma_log_debug("%s: err=%x, pfch_en=%x, pfch=%x, ctxt->valid=%x\n",
+			__func__, ctxt->err, ctxt->pfch_en, ctxt->pfch,
+			ctxt->valid);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_pfetch_context_clear() - clear prefetch context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_pfetch_context_clear(void *dev_hndl, uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_PFTCH;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_pfetch_context_invalidate() - invalidate prefetch context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_pfetch_context_invalidate(void *dev_hndl, uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_PFTCH;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_pfetch_ctx_conf() - configure prefetch context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_pfetch_ctx_conf(void *dev_hndl, uint16_t hw_qid,
+				struct qdma_descq_prefetch_ctxt *ctxt,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_pfetch_context_read(dev_hndl, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+		rv = eqdma_pfetch_context_write(dev_hndl, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_pfetch_context_clear(dev_hndl, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_pfetch_context_invalidate(dev_hndl, hw_qid);
+		break;
+	default:
+		qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+						__func__,
+						access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_cmpt_context_write() - create completion context and program it
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the cmpt context data strucutre
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_cmpt_context_write(void *dev_hndl, uint16_t hw_qid,
+			   const struct qdma_descq_cmpt_ctxt *ctxt)
+{
+	uint32_t cmpt_ctxt[EQDMA_CMPT_CONTEXT_NUM_WORDS] = {0};
+	uint16_t num_words_count = 0;
+	uint32_t baddr4_high_l, baddr4_high_h,
+			baddr4_low, pidx_l, pidx_h, pasid_l, pasid_h;
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_CMPT;
+
+	/* Input args check */
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle or cmpt ctxt NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (ctxt->trig_mode > QDMA_CMPT_UPDATE_TRIG_MODE_TMR_CNTR) {
+		qdma_log_error("%s: trig_mode(%d) > (%d) is invalid, err:%d\n",
+					__func__,
+					ctxt->trig_mode,
+					QDMA_CMPT_UPDATE_TRIG_MODE_TMR_CNTR,
+					-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	baddr4_high_l = (uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK,
+			ctxt->bs_addr);
+	baddr4_high_h = (uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK,
+			ctxt->bs_addr);
+	baddr4_low = (uint32_t)FIELD_GET(EQDMA_COMPL_CTXT_BADDR_LOW_MASK,
+			ctxt->bs_addr);
+
+	pidx_l = FIELD_GET(QDMA_COMPL_CTXT_PIDX_GET_L_MASK, ctxt->pidx);
+	pidx_h = FIELD_GET(QDMA_COMPL_CTXT_PIDX_GET_H_MASK, ctxt->pidx);
+
+	pasid_l =
+		FIELD_GET(EQDMA_CMPL_CTXT_PASID_GET_L_MASK, ctxt->pasid);
+	pasid_h =
+		FIELD_GET(EQDMA_CMPL_CTXT_PASID_GET_H_MASK, ctxt->pasid);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W0_EN_STAT_DESC_MASK,
+				ctxt->en_stat_desc) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_EN_INT_MASK, ctxt->en_int) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_TRIG_MODE_MASK, ctxt->trig_mode) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_FNC_ID_MASK, ctxt->fnc_id) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_CNTER_IX_MASK,
+				ctxt->counter_idx) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_TIMER_IX_MASK, ctxt->timer_idx) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_INT_ST_MASK, ctxt->in_st) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_COLOR_MASK, ctxt->color) |
+		FIELD_SET(CMPL_CTXT_DATA_W0_QSIZE_IX_MASK, ctxt->ringsz_idx);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W1_BADDR4_HIGH_L_MASK, baddr4_high_l);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W2_BADDR4_HIGH_H_MASK, baddr4_high_h) |
+		FIELD_SET(CMPL_CTXT_DATA_W2_DESC_SIZE_MASK, ctxt->desc_sz) |
+		FIELD_SET(CMPL_CTXT_DATA_W2_PIDX_L_MASK, pidx_l);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W3_PIDX_H_MASK, pidx_h) |
+		FIELD_SET(CMPL_CTXT_DATA_W3_CIDX_MASK, ctxt->cidx) |
+		FIELD_SET(CMPL_CTXT_DATA_W3_VALID_MASK, ctxt->valid) |
+		FIELD_SET(CMPL_CTXT_DATA_W3_ERR_MASK, ctxt->err) |
+		FIELD_SET(CMPL_CTXT_DATA_W3_USER_TRIG_PEND_MASK,
+				ctxt->user_trig_pend);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W4_TIMER_RUNNING_MASK,
+				ctxt->timer_running) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_FULL_UPD_MASK, ctxt->full_upd) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_OVF_CHK_DIS_MASK,
+				ctxt->ovf_chk_dis) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_AT_MASK, ctxt->at) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_VEC_MASK, ctxt->vec) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_INT_AGGR_MASK, ctxt->int_aggr) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK,
+				ctxt->dis_intr_on_vf) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_VIO_MASK, ctxt->vio) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_DIR_C2H_MASK, ctxt->dir_c2h) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_HOST_ID_MASK, ctxt->host_id) |
+		FIELD_SET(CMPL_CTXT_DATA_W4_PASID_L_MASK, pasid_l);
+
+	cmpt_ctxt[num_words_count++] =
+		FIELD_SET(CMPL_CTXT_DATA_W5_PASID_H_MASK, pasid_h) |
+		FIELD_SET(CMPL_CTXT_DATA_W5_PASID_EN_MASK,
+				ctxt->pasid_en) |
+		FIELD_SET(CMPL_CTXT_DATA_W5_BADDR4_LOW_MASK,
+				baddr4_low) |
+		FIELD_SET(CMPL_CTXT_DATA_W5_VIO_EOP_MASK, ctxt->vio_eop) |
+		FIELD_SET(CMPL_CTXT_DATA_W5_SH_CMPT_MASK, ctxt->sh_cmpt);
+
+	return eqdma_indirect_reg_write(dev_hndl, sel, hw_qid,
+			cmpt_ctxt, num_words_count);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_cmpt_context_read() - read completion context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_cmpt_context_read(void *dev_hndl, uint16_t hw_qid,
+			   struct qdma_descq_cmpt_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t cmpt_ctxt[EQDMA_CMPT_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_CMPT;
+	uint32_t baddr4_high_l, baddr4_high_h, baddr4_low,
+			pidx_l, pidx_h, pasid_l, pasid_h;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle or cmpt ctxt NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, hw_qid,
+			EQDMA_CMPT_CONTEXT_NUM_WORDS, cmpt_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->en_stat_desc =
+		FIELD_GET(CMPL_CTXT_DATA_W0_EN_STAT_DESC_MASK, cmpt_ctxt[0]);
+	ctxt->en_int = FIELD_GET(CMPL_CTXT_DATA_W0_EN_INT_MASK, cmpt_ctxt[0]);
+	ctxt->trig_mode =
+		FIELD_GET(CMPL_CTXT_DATA_W0_TRIG_MODE_MASK, cmpt_ctxt[0]);
+	ctxt->fnc_id =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_FNC_ID_MASK,
+			cmpt_ctxt[0]));
+	ctxt->counter_idx =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_CNTER_IX_MASK,
+			cmpt_ctxt[0]));
+	ctxt->timer_idx =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_TIMER_IX_MASK,
+			cmpt_ctxt[0]));
+	ctxt->in_st =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_INT_ST_MASK,
+			cmpt_ctxt[0]));
+	ctxt->color =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_COLOR_MASK,
+			cmpt_ctxt[0]));
+	ctxt->ringsz_idx =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W0_QSIZE_IX_MASK,
+			cmpt_ctxt[0]));
+
+	baddr4_high_l = FIELD_GET(CMPL_CTXT_DATA_W1_BADDR4_HIGH_L_MASK,
+			cmpt_ctxt[1]);
+
+	baddr4_high_h = FIELD_GET(CMPL_CTXT_DATA_W2_BADDR4_HIGH_H_MASK,
+			cmpt_ctxt[2]);
+	ctxt->desc_sz =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W2_DESC_SIZE_MASK,
+			cmpt_ctxt[2]));
+	pidx_l = FIELD_GET(CMPL_CTXT_DATA_W2_PIDX_L_MASK, cmpt_ctxt[2]);
+
+	pidx_h = FIELD_GET(CMPL_CTXT_DATA_W3_PIDX_H_MASK, cmpt_ctxt[3]);
+	ctxt->cidx =
+		(uint16_t)(FIELD_GET(CMPL_CTXT_DATA_W3_CIDX_MASK,
+			cmpt_ctxt[3]));
+	ctxt->valid =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W3_VALID_MASK,
+			cmpt_ctxt[3]));
+	ctxt->err =
+		(uint8_t)(FIELD_GET(CMPL_CTXT_DATA_W3_ERR_MASK,
+			cmpt_ctxt[3]));
+	ctxt->user_trig_pend = (uint8_t)
+		(FIELD_GET(CMPL_CTXT_DATA_W3_USER_TRIG_PEND_MASK,
+			cmpt_ctxt[3]));
+
+	ctxt->timer_running =
+		FIELD_GET(CMPL_CTXT_DATA_W4_TIMER_RUNNING_MASK, cmpt_ctxt[4]);
+	ctxt->full_upd =
+		FIELD_GET(CMPL_CTXT_DATA_W4_FULL_UPD_MASK, cmpt_ctxt[4]);
+	ctxt->ovf_chk_dis =
+		FIELD_GET(CMPL_CTXT_DATA_W4_OVF_CHK_DIS_MASK, cmpt_ctxt[4]);
+	ctxt->at = FIELD_GET(CMPL_CTXT_DATA_W4_AT_MASK, cmpt_ctxt[4]);
+	ctxt->vec = FIELD_GET(CMPL_CTXT_DATA_W4_VEC_MASK, cmpt_ctxt[4]);
+	ctxt->int_aggr = (uint8_t)
+		(FIELD_GET(CMPL_CTXT_DATA_W4_INT_AGGR_MASK, cmpt_ctxt[4]));
+	ctxt->dis_intr_on_vf = (uint8_t)
+		FIELD_GET(CMPL_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK,
+				cmpt_ctxt[4]);
+	ctxt->vio = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W4_VIO_MASK,
+			cmpt_ctxt[4]);
+	ctxt->dir_c2h = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W4_DIR_C2H_MASK,
+			cmpt_ctxt[4]);
+	ctxt->host_id = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W4_HOST_ID_MASK,
+			cmpt_ctxt[4]);
+	pasid_l = FIELD_GET(CMPL_CTXT_DATA_W4_PASID_L_MASK, cmpt_ctxt[4]);
+
+	pasid_h = (uint32_t)FIELD_GET(CMPL_CTXT_DATA_W5_PASID_H_MASK,
+			cmpt_ctxt[5]);
+	ctxt->pasid_en = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W5_PASID_EN_MASK,
+			cmpt_ctxt[5]);
+	baddr4_low = (uint8_t)FIELD_GET
+			(CMPL_CTXT_DATA_W5_BADDR4_LOW_MASK, cmpt_ctxt[5]);
+	ctxt->vio_eop = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W5_VIO_EOP_MASK,
+			cmpt_ctxt[5]);
+	ctxt->sh_cmpt = (uint8_t)FIELD_GET(CMPL_CTXT_DATA_W5_SH_CMPT_MASK,
+			cmpt_ctxt[5]);
+
+	ctxt->bs_addr =
+		FIELD_SET(EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK,
+				(uint64_t)baddr4_high_l) |
+		FIELD_SET(EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK,
+				(uint64_t)baddr4_high_h) |
+		FIELD_SET(EQDMA_COMPL_CTXT_BADDR_LOW_MASK,
+				(uint64_t)baddr4_low);
+
+	ctxt->pasid =
+		FIELD_SET(EQDMA_CMPL_CTXT_PASID_GET_L_MASK, pasid_l) |
+		FIELD_SET(EQDMA_CMPL_CTXT_PASID_GET_H_MASK,
+				(uint64_t)pasid_h);
+
+	ctxt->pidx =
+		FIELD_SET(QDMA_COMPL_CTXT_PIDX_GET_L_MASK, pidx_l) |
+		FIELD_SET(QDMA_COMPL_CTXT_PIDX_GET_H_MASK, pidx_h);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_cmpt_context_clear() - clear completion context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_cmpt_context_clear(void *dev_hndl, uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_CMPT;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_cmpt_context_invalidate() - invalidate completion context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_cmpt_context_invalidate(void *dev_hndl, uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_CMPT;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_cmpt_ctx_conf() - configure completion context
+ *
+ * @dev_hndl:	device handle
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_cmpt_ctx_conf(void *dev_hndl, uint16_t hw_qid,
+			struct qdma_descq_cmpt_ctxt *ctxt,
+			enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_cmpt_context_read(dev_hndl, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+		rv = eqdma_cmpt_context_write(dev_hndl, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_cmpt_context_clear(dev_hndl, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_cmpt_context_invalidate(dev_hndl, hw_qid);
+		break;
+	default:
+		qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+						__func__,
+						access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_context_read() - read hardware context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the output context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_hw_context_read(void *dev_hndl, uint8_t c2h,
+			 uint16_t hw_qid, struct qdma_descq_hw_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t hw_ctxt[EQDMA_HW_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_HW_C2H :
+			QDMA_CTXT_SEL_HW_H2C;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_handle or hw_ctxt NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, hw_qid,
+			EQDMA_HW_CONTEXT_NUM_WORDS, hw_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->cidx = FIELD_GET(HW_IND_CTXT_DATA_W0_CIDX_MASK, hw_ctxt[0]);
+	ctxt->crd_use =
+		(uint16_t)(FIELD_GET(HW_IND_CTXT_DATA_W0_CRD_USE_MASK,
+					hw_ctxt[0]));
+
+	ctxt->dsc_pend =
+		(uint8_t)(FIELD_GET(HW_IND_CTXT_DATA_W1_DSC_PND_MASK,
+					hw_ctxt[1]));
+	ctxt->idl_stp_b =
+		(uint8_t)(FIELD_GET(HW_IND_CTXT_DATA_W1_IDL_STP_B_MASK,
+			hw_ctxt[1]));
+	ctxt->evt_pnd =
+		(uint8_t)(FIELD_GET(HW_IND_CTXT_DATA_W1_EVT_PND_MASK,
+			hw_ctxt[1]));
+	ctxt->fetch_pnd = (uint8_t)
+		(FIELD_GET(HW_IND_CTXT_DATA_W1_DSC_PND_MASK, hw_ctxt[1]));
+
+	qdma_log_debug("%s: cidx=%u, crd_use=%u, dsc_pend=%x\n",
+			__func__, ctxt->cidx, ctxt->crd_use, ctxt->dsc_pend);
+	qdma_log_debug("%s: idl_stp_b=%x, evt_pnd=%x, fetch_pnd=%x\n",
+			__func__, ctxt->idl_stp_b, ctxt->evt_pnd,
+			ctxt->fetch_pnd);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_context_clear() - clear hardware context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_hw_context_clear(void *dev_hndl, uint8_t c2h,
+			  uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_HW_C2H :
+			QDMA_CTXT_SEL_HW_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_context_invalidate() - invalidate hardware context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_hw_context_invalidate(void *dev_hndl, uint8_t c2h,
+				   uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_HW_C2H :
+			QDMA_CTXT_SEL_HW_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_ctx_conf() - configure HW context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *		QDMA_HW_ACCESS_WRITE Not supported
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_hw_ctx_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+				struct qdma_descq_hw_ctxt *ctxt,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	/** ctxt requires only H2C-0 or C2H-1
+	 *  return error for any other values
+	 */
+	if (c2h > 1) {
+		qdma_log_error("%s: c2h(%d) invalid, err:%d\n",
+						__func__,
+						c2h,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_hw_context_read(dev_hndl, c2h, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_hw_context_clear(dev_hndl, c2h, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_hw_context_invalidate(dev_hndl, c2h, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+	default:
+		qdma_log_error("%s: access_type=%d is invalid, err:%d\n",
+					   __func__, access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_credit_context_read() - read credit context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_credit_context_read(void *dev_hndl, uint8_t c2h,
+			 uint16_t hw_qid,
+			 struct qdma_descq_credit_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t cr_ctxt[EQDMA_CR_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_CR_C2H :
+			QDMA_CTXT_SEL_CR_H2C;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_hndl=%p credit_ctxt=%p, err:%d\n",
+						__func__, dev_hndl, ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, hw_qid,
+			EQDMA_CR_CONTEXT_NUM_WORDS, cr_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->credit = FIELD_GET(CRED_CTXT_DATA_W0_CREDT_MASK, cr_ctxt[0]);
+
+	qdma_log_debug("%s: credit=%u\n", __func__, ctxt->credit);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_credit_context_clear() - clear credit context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_credit_context_clear(void *dev_hndl, uint8_t c2h,
+			  uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_CR_C2H :
+			QDMA_CTXT_SEL_CR_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_credit_context_invalidate() - invalidate credit context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_credit_context_invalidate(void *dev_hndl, uint8_t c2h,
+				   uint16_t hw_qid)
+{
+	enum ind_ctxt_cmd_sel sel = c2h ? QDMA_CTXT_SEL_CR_C2H :
+			QDMA_CTXT_SEL_CR_H2C;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, hw_qid);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_credit_ctx_conf() - configure credit context
+ *
+ * @dev_hndl:	device handle
+ * @c2h:	is c2h queue
+ * @hw_qid:	hardware qid of the queue
+ * @ctxt:	pointer to the context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *		QDMA_HW_ACCESS_WRITE Not supported
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_credit_ctx_conf(void *dev_hndl, uint8_t c2h,
+		uint16_t hw_qid, struct qdma_descq_credit_ctxt *ctxt,
+		enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	/** ctxt requires only H2C-0 or C2H-1
+	 *  return error for any other values
+	 */
+	if (c2h > 1) {
+		qdma_log_error("%s: c2h(%d) invalid, err:%d\n",
+						__func__,
+						c2h,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_credit_context_read(dev_hndl, c2h, hw_qid, ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_credit_context_clear(dev_hndl, c2h, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_credit_context_invalidate(dev_hndl, c2h, hw_qid);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+	default:
+		qdma_log_error("%s: Invalid access type=%d, err:%d\n",
+					   __func__, access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+
+/*****************************************************************************/
+/**
+ * eqdma_indirect_intr_context_write() - create indirect interrupt context
+ *					and program it
+ *
+ * @dev_hndl:   device handle
+ * @ring_index: indirect interrupt ring index
+ * @ctxt:	pointer to the interrupt context data strucutre
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_indirect_intr_context_write(void *dev_hndl,
+		uint16_t ring_index, const struct qdma_indirect_intr_ctxt *ctxt)
+{
+	uint32_t intr_ctxt[EQDMA_IND_INTR_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_INT_COAL;
+	uint32_t baddr_l, baddr_m, baddr_h, pasid_l, pasid_h;
+	uint16_t num_words_count = 0;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_hndl=%p intr_ctxt=%p, err:%d\n",
+						__func__, dev_hndl, ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	baddr_l = (uint32_t)FIELD_GET(QDMA_INTR_CTXT_BADDR_GET_L_MASK,
+			ctxt->baddr_4k);
+	baddr_m = (uint32_t)FIELD_GET(QDMA_INTR_CTXT_BADDR_GET_M_MASK,
+			ctxt->baddr_4k);
+	baddr_h = (uint32_t)FIELD_GET(QDMA_INTR_CTXT_BADDR_GET_H_MASK,
+			ctxt->baddr_4k);
+
+	pasid_l =
+		FIELD_GET(EQDMA_INTR_CTXT_PASID_GET_L_MASK, ctxt->pasid);
+	pasid_h =
+		FIELD_GET(EQDMA_INTR_CTXT_PASID_GET_H_MASK, ctxt->pasid);
+
+	intr_ctxt[num_words_count++] =
+		FIELD_SET(INTR_CTXT_DATA_W0_VALID_MASK, ctxt->valid) |
+		FIELD_SET(INTR_CTXT_DATA_W0_VEC_MASK, ctxt->vec) |
+		FIELD_SET(INTR_CTXT_DATA_W0_INT_ST_MASK, ctxt->int_st) |
+		FIELD_SET(INTR_CTXT_DATA_W0_COLOR_MASK, ctxt->color) |
+		FIELD_SET(INTR_CTXT_DATA_W0_BADDR_4K_L_MASK, baddr_l);
+
+	intr_ctxt[num_words_count++] =
+		FIELD_SET(INTR_CTXT_DATA_W1_BADDR_4K_M_MASK, baddr_m);
+
+	intr_ctxt[num_words_count++] =
+		FIELD_SET(INTR_CTXT_DATA_W2_BADDR_4K_H_MASK, baddr_h) |
+		FIELD_SET(INTR_CTXT_DATA_W2_PAGE_SIZE_MASK, ctxt->page_size) |
+		FIELD_SET(INTR_CTXT_DATA_W2_PIDX_MASK, ctxt->pidx) |
+		FIELD_SET(INTR_CTXT_DATA_W2_AT_MASK, ctxt->at) |
+		FIELD_SET(INTR_CTXT_DATA_W2_HOST_ID_MASK, ctxt->host_id) |
+		FIELD_SET(INTR_CTXT_DATA_W2_PASID_L_MASK, pasid_l);
+
+	intr_ctxt[num_words_count++] =
+		FIELD_SET(INTR_CTXT_DATA_W3_PASID_H_MASK, pasid_h) |
+		FIELD_SET(INTR_CTXT_DATA_W3_PASID_EN_MASK, ctxt->pasid_en) |
+		FIELD_SET(INTR_CTXT_DATA_W3_FUNC_MASK, ctxt->func_id);
+
+	return eqdma_indirect_reg_write(dev_hndl, sel, ring_index,
+			intr_ctxt, num_words_count);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_indirect_intr_context_read() - read indirect interrupt context
+ *
+ * @dev_hndl:	device handle
+ * @ring_index:	indirect interrupt ring index
+ * @ctxt:	pointer to the output context data
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_indirect_intr_context_read(void *dev_hndl,
+		uint16_t ring_index, struct qdma_indirect_intr_ctxt *ctxt)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t intr_ctxt[EQDMA_IND_INTR_CONTEXT_NUM_WORDS] = {0};
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_INT_COAL;
+	uint64_t baddr_l, baddr_m, baddr_h, pasid_l, pasid_h;
+
+	if (!dev_hndl || !ctxt) {
+		qdma_log_error("%s: dev_hndl=%p intr_ctxt=%p, err:%d\n",
+						__func__, dev_hndl, ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_indirect_reg_read(dev_hndl, sel, ring_index,
+			EQDMA_IND_INTR_CONTEXT_NUM_WORDS, intr_ctxt);
+	if (rv < 0)
+		return rv;
+
+	ctxt->valid = FIELD_GET(INTR_CTXT_DATA_W0_VALID_MASK, intr_ctxt[0]);
+	ctxt->vec = FIELD_GET(INTR_CTXT_DATA_W0_VEC_MASK, intr_ctxt[0]);
+	ctxt->int_st =
+		(uint8_t)(FIELD_GET(INTR_CTXT_DATA_W0_INT_ST_MASK,
+			intr_ctxt[0]));
+	ctxt->color =
+		(uint8_t)(FIELD_GET(INTR_CTXT_DATA_W0_COLOR_MASK,
+			intr_ctxt[0]));
+	baddr_l = FIELD_GET(INTR_CTXT_DATA_W0_BADDR_4K_L_MASK, intr_ctxt[0]);
+
+	baddr_m = FIELD_GET(INTR_CTXT_DATA_W1_BADDR_4K_M_MASK, intr_ctxt[1]);
+
+	baddr_h = FIELD_GET(INTR_CTXT_DATA_W2_BADDR_4K_H_MASK, intr_ctxt[2]);
+	ctxt->page_size =
+		FIELD_GET(INTR_CTXT_DATA_W2_PAGE_SIZE_MASK, intr_ctxt[2]);
+	ctxt->pidx =
+		(uint16_t)(FIELD_GET(INTR_CTXT_DATA_W2_PIDX_MASK,
+			intr_ctxt[2]));
+	ctxt->at =
+		(uint8_t)(FIELD_GET(INTR_CTXT_DATA_W2_AT_MASK, intr_ctxt[2]));
+	ctxt->host_id = (uint8_t)(FIELD_GET(INTR_CTXT_DATA_W2_HOST_ID_MASK,
+			intr_ctxt[2]));
+	pasid_l = (uint8_t)(FIELD_GET(INTR_CTXT_DATA_W2_PASID_L_MASK,
+			intr_ctxt[2]));
+
+	pasid_h = FIELD_GET(INTR_CTXT_DATA_W3_PASID_H_MASK, intr_ctxt[3]);
+	ctxt->pasid_en = (uint8_t)FIELD_GET(INTR_CTXT_DATA_W3_PASID_EN_MASK,
+			intr_ctxt[3]);
+
+	ctxt->func_id = (uint16_t)FIELD_GET(INTR_CTXT_DATA_W3_FUNC_MASK,
+			intr_ctxt[3]);
+
+	ctxt->baddr_4k =
+		FIELD_SET(QDMA_INTR_CTXT_BADDR_GET_L_MASK, baddr_l) |
+		FIELD_SET(QDMA_INTR_CTXT_BADDR_GET_M_MASK, baddr_m) |
+		FIELD_SET(QDMA_INTR_CTXT_BADDR_GET_H_MASK, baddr_h);
+
+	ctxt->pasid =
+		FIELD_SET(EQDMA_INTR_CTXT_PASID_GET_L_MASK, pasid_l) |
+		FIELD_SET(EQDMA_INTR_CTXT_PASID_GET_H_MASK, pasid_h);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_indirect_intr_context_clear() - clear indirect interrupt context
+ *
+ * @dev_hndl:	device handle
+ * @ring_index:	indirect interrupt ring index
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_indirect_intr_context_clear(void *dev_hndl,
+		uint16_t ring_index)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_INT_COAL;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_clear(dev_hndl, sel, ring_index);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_indirect_intr_context_invalidate() - invalidate indirect interrupt
+ * context
+ *
+ * @dev_hndl:	device handle
+ * @ring_index:	indirect interrupt ring index
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_indirect_intr_context_invalidate(void *dev_hndl,
+					  uint16_t ring_index)
+{
+	enum ind_ctxt_cmd_sel sel = QDMA_CTXT_SEL_INT_COAL;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return eqdma_indirect_reg_invalidate(dev_hndl, sel, ring_index);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_indirect_intr_ctx_conf() - configure indirect interrupt context
+ *
+ * @dev_hndl:	device handle
+ * @ring_index:	indirect interrupt ring index
+ * @ctxt:	pointer to context data
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_indirect_intr_ctx_conf(void *dev_hndl, uint16_t ring_index,
+				struct qdma_indirect_intr_ctxt *ctxt,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_indirect_intr_context_read(dev_hndl, ring_index,
+							ctxt);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+		rv = eqdma_indirect_intr_context_write(dev_hndl, ring_index,
+							ctxt);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+		rv = eqdma_indirect_intr_context_clear(dev_hndl,
+							ring_index);
+		break;
+	case QDMA_HW_ACCESS_INVALIDATE:
+		rv = eqdma_indirect_intr_context_invalidate(dev_hndl,
+								ring_index);
+		break;
+	default:
+		qdma_log_error("%s: access_type=%d is invalid, err:%d\n",
+					   __func__, access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_dump_config_regs() - Function to get qdma config register dump in a
+ * buffer
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:      Whether PF or VF
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int eqdma_dump_config_regs(void *dev_hndl, uint8_t is_vf,
+		char *buf, uint32_t buflen)
+{
+	uint32_t i = 0, j = 0;
+	struct xreg_info *reg_info;
+	uint32_t num_regs = eqdma_config_num_regs_get();
+	uint32_t len = 0, val = 0;
+	int rv = QDMA_SUCCESS;
+	char name[DEBGFS_GEN_NAME_SZ] = "";
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (buflen < eqdma_reg_dump_buf_len()) {
+		qdma_log_error("%s: Buffer too small, err:%d\n",
+					__func__, -QDMA_ERR_NO_MEM);
+		return -QDMA_ERR_NO_MEM;
+	}
+
+	if (is_vf) {
+		qdma_log_error("%s: Wrong API used for VF, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	reg_info = eqdma_config_regs_get();
+
+	for (i = 0; i < num_regs; i++) {
+		int mask = get_capability_mask(dev_cap.mm_en,
+					       dev_cap.st_en,
+					       dev_cap.mm_cmpt_en,
+					       dev_cap.mailbox_en);
+
+		if ((mask & reg_info[i].mode) == 0)
+			continue;
+
+		/* If Debug Mode not enabled and the current register
+		 * is debug register, skip reading it.
+		 */
+		if (dev_cap.debug_mode == 0 &&
+				reg_info[i].is_debug_reg == 1)
+			continue;
+
+		for (j = 0; j < reg_info[i].repeat; j++) {
+			rv = QDMA_SNPRINTF_S(name, DEBGFS_GEN_NAME_SZ,
+					DEBGFS_GEN_NAME_SZ,
+					"%s", reg_info[i].name);
+			if (rv < 0 || rv > DEBGFS_GEN_NAME_SZ) {
+				qdma_log_error
+					("%d:%s QDMA_SNPRINTF_S() failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				return -QDMA_ERR_NO_MEM;
+			}
+			val = qdma_reg_read(dev_hndl,
+					(reg_info[i].addr + (j * 4)));
+			rv = dump_reg(buf + len, buflen - len,
+					(reg_info[i].addr + (j * 4)),
+						name, val);
+			if (rv < 0) {
+				qdma_log_error
+				("%s Buff too small, err:%d\n",
+				__func__,
+				-QDMA_ERR_NO_MEM);
+				return -QDMA_ERR_NO_MEM;
+			}
+			len += rv;
+		}
+	}
+
+	return len;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_dump_cpm_queue_context() - Function to get qdma queue context dump
+ * in a buffer
+ *
+ * @dev_hndl:   device handle
+ * @st:			Queue Mode(ST or MM)
+ * @q_type:		Queue type(H2C/C2H/CMPT)
+ * @context:	Queue Context
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int eqdma_dump_queue_context(void *dev_hndl,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		struct qdma_descq_context *ctxt_data,
+		char *buf, uint32_t buflen)
+{
+	int rv = 0;
+	uint32_t req_buflen = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!ctxt_data) {
+		qdma_log_error("%s: ctxt_data is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buf) {
+		qdma_log_error("%s: buf is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (q_type >= QDMA_DEV_Q_TYPE_MAX) {
+		qdma_log_error("%s: invalid q_type, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_context_buf_len(st, q_type, &req_buflen);
+	if (rv != QDMA_SUCCESS)
+		return rv;
+
+	if (buflen < req_buflen) {
+		qdma_log_error("%s: Too small buffer(%d), reqd(%d), err:%d\n",
+			__func__, buflen, req_buflen, -QDMA_ERR_NO_MEM);
+		return -QDMA_ERR_NO_MEM;
+	}
+
+	rv = dump_eqdma_context(ctxt_data, st, q_type,
+				buf, buflen);
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_dump_intr_context() - Function to get qdma interrupt context dump
+ * in a buffer
+ *
+ * @dev_hndl:   device handle
+ * @intr_ctx:	Interrupt Context
+ * @ring_index: Ring index
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int eqdma_dump_intr_context(void *dev_hndl,
+		struct qdma_indirect_intr_ctxt *intr_ctx,
+		int ring_index,
+		char *buf, uint32_t buflen)
+{
+	int rv = 0;
+	uint32_t req_buflen = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+	if (!intr_ctx) {
+		qdma_log_error("%s: intr_ctx is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buf) {
+		qdma_log_error("%s: buf is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	req_buflen = eqdma_intr_context_buf_len();
+	if (buflen < req_buflen) {
+		qdma_log_error("%s: Too small buffer(%d), reqd(%d), err:%d\n",
+			__func__, buflen, req_buflen, -QDMA_ERR_NO_MEM);
+		return -QDMA_ERR_NO_MEM;
+	}
+
+	rv = dump_eqdma_intr_context(intr_ctx, ring_index, buf, buflen);
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_read_dump_queue_context() - Function to read and dump the queue
+ * context in a buffer
+ *
+ * @dev_hndl:   device handle
+ * @hw_qid:     queue id
+ * @st:			Queue Mode(ST or MM)
+ * @q_type:		Queue type(H2C/C2H/CMPT)
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int eqdma_read_dump_queue_context(void *dev_hndl,
+		uint16_t qid_hw,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		char *buf, uint32_t buflen)
+{
+	int rv = QDMA_SUCCESS;
+	uint32_t req_buflen = 0;
+	struct qdma_descq_context context;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buf) {
+		qdma_log_error("%s: buf is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (q_type >= QDMA_DEV_Q_TYPE_MAX) {
+		qdma_log_error("%s: Not supported for q_type, err = %d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = eqdma_context_buf_len(st, q_type, &req_buflen);
+	if (rv != QDMA_SUCCESS)
+		return rv;
+
+	if (buflen < req_buflen) {
+		qdma_log_error("%s: Too small buffer(%d), reqd(%d), err:%d\n",
+			__func__, buflen, req_buflen, -QDMA_ERR_NO_MEM);
+		return -QDMA_ERR_NO_MEM;
+	}
+
+	qdma_memset(&context, 0, sizeof(struct qdma_descq_context));
+
+	if (q_type != QDMA_DEV_Q_TYPE_CMPT) {
+		rv = eqdma_sw_ctx_conf(dev_hndl, (uint8_t)q_type, qid_hw,
+				&context.sw_ctxt, QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error
+			("%s: Failed to read sw context, err = %d",
+					__func__, rv);
+			return rv;
+		}
+
+		rv = eqdma_hw_ctx_conf(dev_hndl, (uint8_t)q_type, qid_hw,
+				&context.hw_ctxt, QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error
+			("%s: Failed to read hw context, err = %d",
+					__func__, rv);
+			return rv;
+		}
+
+		rv = eqdma_credit_ctx_conf(dev_hndl, (uint8_t)q_type,
+				qid_hw, &context.cr_ctxt,
+				QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error
+			("%s: Failed to read credit context, err = %d",
+					__func__, rv);
+			return rv;
+		}
+
+		if (st && q_type == QDMA_DEV_Q_TYPE_C2H) {
+			rv = eqdma_pfetch_ctx_conf(dev_hndl,
+					qid_hw,
+					&context.pfetch_ctxt,
+					QDMA_HW_ACCESS_READ);
+			if (rv < 0) {
+				qdma_log_error
+			("%s: Failed to read pftech context, err = %d",
+						__func__, rv);
+				return rv;
+			}
+		}
+	}
+
+	if ((st && q_type == QDMA_DEV_Q_TYPE_C2H) ||
+			(!st && q_type == QDMA_DEV_Q_TYPE_CMPT)) {
+		rv = eqdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+						&context.cmpt_ctxt,
+						 QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error
+			("%s: Failed to read cmpt context, err = %d",
+					__func__, rv);
+			return rv;
+		}
+	}
+
+
+	rv = dump_eqdma_context(&context, st, q_type,
+				buf, buflen);
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_get_user_bar() - Function to get the AXI Master Lite(user bar) number
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ * @func_id:	function id of the PF
+ * @user_bar:	pointer to hold the AXI Master Lite bar number
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
+		uint8_t func_id, uint8_t *user_bar)
+{
+	uint8_t bar_found = 0;
+	uint8_t bar_idx = 0;
+	uint32_t user_bar_id = 0;
+	uint32_t reg_addr = (is_vf) ?  EQDMA_OFFSET_VF_USER_BAR :
+			EQDMA_OFFSET_GLBL2_PF_BARLITE_EXT;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!user_bar) {
+		qdma_log_error("%s: AXI Master Lite bar is NULL, err:%d\n",
+					__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	user_bar_id = qdma_reg_read(dev_hndl, reg_addr);
+	user_bar_id = (user_bar_id >> (6 * func_id)) & 0x3F;
+
+	for (bar_idx = 0; bar_idx < QDMA_BAR_NUM; bar_idx++) {
+		if (user_bar_id & (1 << bar_idx)) {
+			*user_bar = bar_idx;
+			bar_found = 1;
+			break;
+		}
+	}
+	if (bar_found == 0) {
+		*user_bar = 0;
+		qdma_log_error("%s: Bar not found, err:%d\n",
+					__func__,
+					-QDMA_ERR_HWACC_BAR_NOT_FOUND);
+		return -QDMA_ERR_HWACC_BAR_NOT_FOUND;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_ram_sbe_err_process() - Function to dump SBE error debug information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_ram_sbe_err_process(void *dev_hndl)
+{
+	eqdma_dump_reg_info(dev_hndl, EQDMA_RAM_SBE_STS_A_ADDR,
+						1, NULL, 0);
+	eqdma_dump_reg_info(dev_hndl, EQDMA_RAM_SBE_STS_1_A_ADDR,
+						1, NULL, 0);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_ram_dbe_err_process() - Function to dump DBE error debug information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_ram_dbe_err_process(void *dev_hndl)
+{
+	eqdma_dump_reg_info(dev_hndl, EQDMA_RAM_DBE_STS_A_ADDR,
+						1, NULL, 0);
+	eqdma_dump_reg_info(dev_hndl, EQDMA_RAM_DBE_STS_1_A_ADDR,
+						1, NULL, 0);
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_desc_err_process() - Function to dump Descriptor Error information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_desc_err_process(void *dev_hndl)
+{
+	int i = 0;
+	uint32_t desc_err_reg_list[] = {
+		EQDMA_GLBL_DSC_ERR_STS_ADDR,
+		EQDMA_GLBL_DSC_ERR_LOG0_ADDR,
+		EQDMA_GLBL_DSC_ERR_LOG1_ADDR,
+		EQDMA_GLBL_DSC_DBG_DAT0_ADDR,
+		EQDMA_GLBL_DSC_DBG_DAT1_ADDR,
+		EQDMA_GLBL_DSC_ERR_LOG2_ADDR
+	};
+	int desc_err_num_regs = sizeof(desc_err_reg_list) / sizeof(uint32_t);
+
+	for (i = 0; i < desc_err_num_regs; i++) {
+		eqdma_dump_reg_info(dev_hndl, desc_err_reg_list[i],
+					1, NULL, 0);
+	}
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_trq_err_process() - Function to dump Target Access Error information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_trq_err_process(void *dev_hndl)
+{
+	int i = 0;
+	uint32_t trq_err_reg_list[] = {
+		EQDMA_GLBL_TRQ_ERR_STS_ADDR,
+		EQDMA_GLBL_TRQ_ERR_LOG_ADDR
+	};
+	int trq_err_reg_num_regs = sizeof(trq_err_reg_list) / sizeof(uint32_t);
+
+	for (i = 0; i < trq_err_reg_num_regs; i++) {
+		eqdma_dump_reg_info(dev_hndl, trq_err_reg_list[i],
+					1, NULL, 0);
+	}
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_st_h2c_err_process() - Function to dump MM H2C Error information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_st_h2c_err_process(void *dev_hndl)
+{
+	int i = 0;
+	uint32_t st_h2c_err_reg_list[] = {
+		EQDMA_H2C_ERR_STAT_ADDR,
+		EQDMA_H2C_FIRST_ERR_QID_ADDR,
+		EQDMA_H2C_DBG_REG0_ADDR,
+		EQDMA_H2C_DBG_REG1_ADDR,
+		EQDMA_H2C_DBG_REG2_ADDR,
+		EQDMA_H2C_DBG_REG3_ADDR,
+		EQDMA_H2C_DBG_REG4_ADDR
+	};
+	int st_h2c_err_num_regs = sizeof(st_h2c_err_reg_list) / sizeof(uint32_t);
+
+	for (i = 0; i < st_h2c_err_num_regs; i++) {
+		eqdma_dump_reg_info(dev_hndl, st_h2c_err_reg_list[i],
+					1, NULL, 0);
+	}
+}
+
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_st_c2h_err_process() - Function to dump MM H2C Error information
+ *
+ * @dev_hndl: device handle
+ * @buf: Bufffer for the debug info to be dumped in
+ * @buflen: Length of the buffer
+ *
+ * Return: void
+ *****************************************************************************/
+static void eqdma_hw_st_c2h_err_process(void *dev_hndl)
+{
+	int i = 0;
+	uint32_t st_c2h_err_reg_list[] = {
+		EQDMA_C2H_ERR_STAT_ADDR,
+		EQDMA_C2H_FATAL_ERR_STAT_ADDR,
+		EQDMA_C2H_FIRST_ERR_QID_ADDR,
+		EQDMA_C2H_STAT_S_AXIS_C2H_ACCEPTED_ADDR,
+		EQDMA_C2H_STAT_S_AXIS_WRB_ACCEPTED_ADDR,
+		EQDMA_C2H_STAT_DESC_RSP_PKT_ACCEPTED_ADDR,
+		EQDMA_C2H_STAT_AXIS_PKG_CMP_ADDR,
+		EQDMA_C2H_STAT_DBG_DMA_ENG_0_ADDR,
+		EQDMA_C2H_STAT_DBG_DMA_ENG_1_ADDR,
+		EQDMA_C2H_STAT_DBG_DMA_ENG_2_ADDR,
+		EQDMA_C2H_STAT_DBG_DMA_ENG_3_ADDR,
+		EQDMA_C2H_STAT_DESC_RSP_DROP_ACCEPTED_ADDR,
+		EQDMA_C2H_STAT_DESC_RSP_ERR_ACCEPTED_ADDR
+	};
+	int st_c2h_err_num_regs = sizeof(st_c2h_err_reg_list) / sizeof(uint32_t);
+
+	for (i = 0; i < st_c2h_err_num_regs; i++) {
+		eqdma_dump_reg_info(dev_hndl, st_c2h_err_reg_list[i],
+					1, NULL, 0);
+	}
+}
+
+
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_get_error_name() - Function to get the error in string format
+ *
+ * @err_idx: error index
+ *
+ * Return: string - success and NULL on failure
+ *****************************************************************************/
+const char *eqdma_hw_get_error_name(uint32_t err_idx)
+{
+	if (err_idx >= EQDMA_ERRS_ALL) {
+		qdma_log_error("%s: err_idx=%d is invalid, returning NULL\n",
+				__func__, (enum eqdma_error_idx)err_idx);
+		return NULL;
+	}
+
+	return eqdma_err_info[(enum eqdma_error_idx)err_idx].err_name;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_hw_error_process() - Function to find the error that got
+ * triggered and call the handler qdma_hw_error_handler of that
+ * particular error.
+ *
+ * @dev_hndl: device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_hw_error_process(void *dev_hndl)
+{
+	uint32_t glbl_err_stat = 0, err_stat = 0;
+	uint32_t bit = 0, i = 0;
+	int32_t idx = 0;
+	struct qdma_dev_attributes dev_cap;
+	uint32_t hw_err_position[EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS] = {
+		EQDMA_DSC_ERR_POISON,
+		EQDMA_TRQ_ERR_CSR_UNMAPPED,
+		EQDMA_ST_C2H_ERR_MTY_MISMATCH,
+		EQDMA_ST_FATAL_ERR_MTY_MISMATCH,
+		EQDMA_ST_H2C_ERR_ZERO_LEN_DESC,
+		EQDMA_SBE_1_ERR_RC_RRQ_EVEN_RAM,
+		EQDMA_SBE_ERR_MI_H2C0_DAT,
+		EQDMA_DBE_1_ERR_RC_RRQ_EVEN_RAM,
+		EQDMA_DBE_ERR_MI_H2C0_DAT
+	};
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	glbl_err_stat = qdma_reg_read(dev_hndl, EQDMA_GLBL_ERR_STAT_ADDR);
+
+	if (!glbl_err_stat)
+		return QDMA_HW_ERR_NOT_DETECTED;
+
+
+	qdma_log_info("%s: Global Err Reg(0x%x) = 0x%x\n",
+				  __func__, EQDMA_GLBL_ERR_STAT_ADDR,
+				  glbl_err_stat);
+
+	for (i = 0; i < EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS; i++) {
+		bit = hw_err_position[i];
+
+		if (!dev_cap.st_en && (bit == EQDMA_ST_C2H_ERR_MTY_MISMATCH ||
+				bit == EQDMA_ST_FATAL_ERR_MTY_MISMATCH ||
+				bit == EQDMA_ST_H2C_ERR_ZERO_LEN_DESC))
+			continue;
+
+		err_stat = qdma_reg_read(dev_hndl,
+				eqdma_err_info[bit].stat_reg_addr);
+		if (err_stat) {
+			qdma_log_info("addr = 0x%08x val = 0x%08x",
+					eqdma_err_info[bit].stat_reg_addr,
+					err_stat);
+
+			eqdma_err_info[bit].eqdma_hw_err_process(dev_hndl);
+			for (idx = bit; idx < all_eqdma_hw_errs[i]; idx++) {
+				/* call the platform specific handler */
+				if (err_stat &
+				eqdma_err_info[idx].leaf_err_mask)
+					qdma_log_error("%s detected %s\n",
+						__func__,
+						eqdma_hw_get_error_name(idx));
+			}
+			qdma_reg_write(dev_hndl,
+					eqdma_err_info[bit].stat_reg_addr,
+					err_stat);
+		}
+	}
+
+	/* Write 1 to the global status register to clear the bits */
+	qdma_reg_write(dev_hndl, EQDMA_GLBL_ERR_STAT_ADDR, glbl_err_stat);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_hw_error_enable() - Function to enable all or a specific error
+ *
+ * @dev_hndl: device handle
+ * @err_idx: error index
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_hw_error_enable(void *dev_hndl, uint32_t err_idx)
+{
+	uint32_t idx = 0, i = 0;
+	uint32_t reg_val = 0;
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (err_idx > EQDMA_ERRS_ALL) {
+		qdma_log_error("%s: err_idx=%d is invalid, err:%d\n",
+				__func__, (enum eqdma_error_idx)err_idx,
+				-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (err_idx == EQDMA_ERRS_ALL) {
+		for (i = 0; i < EQDMA_TOTAL_LEAF_ERROR_AGGREGATORS; i++) {
+			idx = all_eqdma_hw_errs[i];
+
+			/* Don't access streaming registers in
+			 * MM only bitstreams
+			 */
+			if (!dev_cap.st_en) {
+				if (idx == EQDMA_ST_C2H_ERR_ALL ||
+					idx == EQDMA_ST_FATAL_ERR_ALL ||
+					idx == EQDMA_ST_H2C_ERR_ALL)
+					continue;
+			}
+
+			reg_val = eqdma_err_info[idx].leaf_err_mask;
+			qdma_reg_write(dev_hndl,
+				eqdma_err_info[idx].mask_reg_addr, reg_val);
+
+			reg_val = qdma_reg_read(dev_hndl,
+					EQDMA_GLBL_ERR_MASK_ADDR);
+			reg_val |= FIELD_SET
+				(eqdma_err_info[idx].global_err_mask, 1);
+			qdma_reg_write(dev_hndl, EQDMA_GLBL_ERR_MASK_ADDR,
+					reg_val);
+		}
+
+	} else {
+		/* Don't access streaming registers in MM only bitstreams
+		 *  QDMA_C2H_ERR_MTY_MISMATCH to QDMA_H2C_ERR_ALL are all
+		 *  ST errors
+		 */
+		if (!dev_cap.st_en) {
+			if (err_idx >= EQDMA_ST_C2H_ERR_MTY_MISMATCH &&
+					err_idx <= EQDMA_ST_H2C_ERR_ALL)
+				return QDMA_SUCCESS;
+		}
+
+		reg_val = qdma_reg_read(dev_hndl,
+				eqdma_err_info[err_idx].mask_reg_addr);
+		reg_val |= FIELD_SET(eqdma_err_info[err_idx].leaf_err_mask, 1);
+		qdma_reg_write(dev_hndl,
+				eqdma_err_info[err_idx].mask_reg_addr, reg_val);
+
+		reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL_ERR_MASK_ADDR);
+		reg_val |=
+			FIELD_SET(eqdma_err_info[err_idx].global_err_mask, 1);
+		qdma_reg_write(dev_hndl, EQDMA_GLBL_ERR_MASK_ADDR, reg_val);
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_get_device_attributes() - Function to get the qdma device
+ * attributes
+ *
+ * @dev_hndl:	device handle
+ * @dev_info:	pointer to hold the device info
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_get_device_attributes(void *dev_hndl,
+		struct qdma_dev_attributes *dev_info)
+{
+	uint8_t count = 0;
+	uint32_t reg_val = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	if (!dev_info) {
+		qdma_log_error("%s: dev_info is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	/* number of PFs */
+	reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL2_PF_BARLITE_INT);
+	if (FIELD_GET(QDMA_GLBL2_PF0_BAR_MAP_MASK, reg_val))
+		count++;
+	if (FIELD_GET(QDMA_GLBL2_PF1_BAR_MAP_MASK, reg_val))
+		count++;
+	if (FIELD_GET(QDMA_GLBL2_PF2_BAR_MAP_MASK, reg_val))
+		count++;
+	if (FIELD_GET(QDMA_GLBL2_PF3_BAR_MAP_MASK, reg_val))
+		count++;
+	dev_info->num_pfs = count;
+
+	/* Number of Qs */
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_CHANNEL_CAP_ADDR);
+	dev_info->num_qs =
+			FIELD_GET(GLBL2_CHANNEL_CAP_MULTIQ_MAX_MASK, reg_val);
+
+	/* FLR present */
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_MISC_CAP_ADDR);
+	dev_info->mailbox_en = FIELD_GET(EQDMA_GLBL2_MAILBOX_EN_MASK,
+		reg_val);
+	dev_info->flr_present = FIELD_GET(EQDMA_GLBL2_FLR_PRESENT_MASK,
+		reg_val);
+	dev_info->mm_cmpt_en  = 0;
+	dev_info->debug_mode = FIELD_GET(EQDMA_GLBL2_DBG_MODE_EN_MASK,
+		reg_val);
+	dev_info->desc_eng_mode = FIELD_GET(EQDMA_GLBL2_DESC_ENG_MODE_MASK,
+		reg_val);
+
+	/* ST/MM enabled? */
+	reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL2_CHANNEL_MDMA_ADDR);
+	dev_info->st_en = (FIELD_GET(GLBL2_CHANNEL_MDMA_C2H_ST_MASK, reg_val) &&
+		FIELD_GET(GLBL2_CHANNEL_MDMA_H2C_ST_MASK, reg_val)) ? 1 : 0;
+	dev_info->mm_en = (FIELD_GET(GLBL2_CHANNEL_MDMA_C2H_ENG_MASK, reg_val) &&
+		FIELD_GET(GLBL2_CHANNEL_MDMA_H2C_ENG_MASK, reg_val)) ? 1 : 0;
+
+	/* num of mm channels */
+	/* TODO : Register not yet defined for this. Hard coding it to 1.*/
+	dev_info->mm_channel_max = 1;
+
+	dev_info->qid2vec_ctx = 0;
+	dev_info->cmpt_ovf_chk_dis = 1;
+	dev_info->mailbox_intr = 1;
+	dev_info->sw_desc_64b = 1;
+	dev_info->cmpt_desc_64b = 1;
+	dev_info->dynamic_bar = 1;
+	dev_info->legacy_intr = 1;
+	dev_info->cmpt_trig_count_timer = 1;
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_init_ctxt_memory() - function to initialize the context memory
+ *
+ * @dev_hndl: device handle
+ *
+ * Return: returns the platform specific error code
+ *****************************************************************************/
+int eqdma_init_ctxt_memory(void *dev_hndl)
+{
+#ifdef ENABLE_INIT_CTXT_MEMORY
+	uint32_t data[QDMA_REG_IND_CTXT_REG_COUNT];
+	uint16_t i = 0;
+	struct qdma_dev_attributes dev_info;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_memset(data, 0, sizeof(uint32_t) * QDMA_REG_IND_CTXT_REG_COUNT);
+	eqdma_get_device_attributes(dev_hndl, &dev_info);
+
+	for (; i < dev_info.num_qs; i++) {
+		int sel = QDMA_CTXT_SEL_SW_C2H;
+		int rv;
+
+		for (; sel <= QDMA_CTXT_SEL_PFTCH; sel++) {
+			/** if the st mode(h2c/c2h) not enabled
+			 *  in the design, then skip the PFTCH
+			 *  and CMPT context setup
+			 */
+			if (dev_info.st_en == 0 &&
+				(sel == QDMA_CTXT_SEL_PFTCH ||
+				sel == QDMA_CTXT_SEL_CMPT)) {
+				qdma_log_debug("%s: ST context is skipped:",
+					__func__);
+				qdma_log_debug("sel = %d\n", sel);
+				continue;
+			}
+
+			rv = eqdma_indirect_reg_clear(dev_hndl,
+					(enum ind_ctxt_cmd_sel)sel, i);
+			if (rv < 0)
+				return rv;
+		}
+	}
+
+	/* fmap */
+	for (i = 0; i < dev_info.num_pfs; i++)
+		eqdma_indirect_reg_clear(dev_hndl,
+				QDMA_CTXT_SEL_FMAP, i);
+
+#else
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+#endif
+	return QDMA_SUCCESS;
+}
+
+
+static int get_reg_entry(uint32_t reg_addr, int *reg_entry)
+{
+	uint32_t i = 0;
+	struct xreg_info *reg_info;
+	uint32_t num_regs = eqdma_config_num_regs_get();
+
+	reg_info = eqdma_config_regs_get();
+
+	for (i = 0; (i < num_regs - 1); i++) {
+		if (reg_info[i].addr == reg_addr) {
+			*reg_entry = i;
+			break;
+		}
+	}
+
+	if (i >= num_regs - 1) {
+		qdma_log_error("%s: 0x%08x is missing register list, err:%d\n",
+					__func__,
+					reg_addr,
+					-QDMA_ERR_INV_PARAM);
+		*reg_entry = -1;
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return 0;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_dump_config_reg_list() - Dump the registers
+ *
+ * @dev_hndl:		device handle
+ * @total_regs :	Max registers to read
+ * @reg_list :		array of reg addr and reg values
+ * @buf :		pointer to buffer to be filled
+ * @buflen :		Length of the buffer
+ *
+ * Return: returns the platform specific error code
+ *****************************************************************************/
+int eqdma_dump_config_reg_list(void *dev_hndl, uint32_t total_regs,
+		struct qdma_reg_data *reg_list, char *buf, uint32_t buflen)
+{
+	uint32_t j = 0, len = 0;
+	uint32_t reg_count = 0;
+	int reg_data_entry;
+	int rv = 0;
+	char name[DEBGFS_GEN_NAME_SZ] = "";
+	struct xreg_info *reg_info = eqdma_config_regs_get();
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buf) {
+		qdma_log_error("%s: buf is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	for (reg_count = 0;
+			(reg_count < total_regs);) {
+		/* If Debug Mode not enabled and the current register
+		 * is debug register, skip reading it.
+		 */
+		if (dev_cap.debug_mode == 0 &&
+				reg_info[reg_count].is_debug_reg == 1)
+			continue;
+
+		rv = get_reg_entry(reg_list[reg_count].reg_addr,
+					&reg_data_entry);
+		if (rv < 0) {
+			qdma_log_error("%s: register missing in list, err:%d\n",
+						   __func__,
+						   -QDMA_ERR_INV_PARAM);
+			return rv;
+		}
+
+		for (j = 0; j < reg_info[reg_data_entry].repeat; j++) {
+			rv = QDMA_SNPRINTF_S(name, DEBGFS_GEN_NAME_SZ,
+					DEBGFS_GEN_NAME_SZ,
+					"%s_%d",
+					reg_info[reg_data_entry].name, j);
+			if (rv < 0 || rv > DEBGFS_GEN_NAME_SZ) {
+				qdma_log_error
+					("%d:%s snprintf failed, err:%d\n",
+					__LINE__, __func__,
+					rv);
+				return -QDMA_ERR_NO_MEM;
+			}
+			rv = dump_reg(buf + len, buflen - len,
+				(reg_info[reg_data_entry].addr + (j * 4)),
+					name,
+					reg_list[reg_count + j].reg_val);
+			if (rv < 0) {
+				qdma_log_error
+				("%s Buff too small, err:%d\n",
+				__func__,
+				-QDMA_ERR_NO_MEM);
+				return -QDMA_ERR_NO_MEM;
+			}
+			len += rv;
+		}
+		reg_count += j;
+	}
+
+	return len;
+}
+
+
+/*****************************************************************************/
+/**
+ * qdma_read_reg_list() - read the register values
+ *
+ * @dev_hndl:		device handle
+ * @is_vf:		Whether PF or VF
+ * @total_regs :	Max registers to read
+ * @reg_list :		array of reg addr and reg values
+ *
+ * Return: returns the platform specific error code
+ *****************************************************************************/
+int eqdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
+		uint16_t reg_rd_group,
+		uint16_t *total_regs,
+		struct qdma_reg_data *reg_list)
+{
+	uint16_t reg_count = 0, i = 0, j = 0;
+	struct xreg_info *reg_info;
+	uint32_t num_regs = eqdma_config_num_regs_get();
+	struct xreg_info *eqdma_config_regs = eqdma_config_regs_get();
+	struct qdma_dev_attributes dev_cap;
+	uint32_t reg_start_addr = 0;
+	int reg_index = 0;
+	int rv = 0;
+
+	if (!is_vf) {
+		qdma_log_error("%s: not supported for PF, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!reg_list) {
+		qdma_log_error("%s: reg_list is NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	switch (reg_rd_group) {
+	case QDMA_REG_READ_GROUP_1:
+			reg_start_addr = EQDMA_REG_GROUP_1_START_ADDR;
+			break;
+	case QDMA_REG_READ_GROUP_2:
+			reg_start_addr = EQDMA_REG_GROUP_2_START_ADDR;
+			break;
+	case QDMA_REG_READ_GROUP_3:
+			reg_start_addr = EQDMA_REG_GROUP_3_START_ADDR;
+			break;
+	case QDMA_REG_READ_GROUP_4:
+			reg_start_addr = EQDMA_REG_GROUP_4_START_ADDR;
+			break;
+	default:
+		qdma_log_error("%s: Invalid slot received\n",
+			   __func__);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = get_reg_entry(reg_start_addr, &reg_index);
+	if (rv < 0) {
+		qdma_log_error("%s: register missing in list, err:%d\n",
+					   __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return rv;
+	}
+	reg_info = &eqdma_config_regs[reg_index];
+
+	for (i = 0, reg_count = 0;
+			((i < num_regs - 1 - reg_index) &&
+			(reg_count < QDMA_MAX_REGISTER_DUMP)); i++) {
+		int mask = get_capability_mask(dev_cap.mm_en, dev_cap.st_en,
+				dev_cap.mm_cmpt_en, dev_cap.mailbox_en);
+
+		if (((mask & reg_info[i].mode) == 0) ||
+			reg_info[i].read_type == QDMA_REG_READ_PF_ONLY)
+			continue;
+
+		/* If Debug Mode not enabled and the current register
+		 * is debug register, skip reading it.
+		 */
+		if (dev_cap.debug_mode == 0 &&
+				reg_info[i].is_debug_reg == 1)
+			continue;
+
+		for (j = 0; j < reg_info[i].repeat &&
+				(reg_count < QDMA_MAX_REGISTER_DUMP);
+				j++) {
+			reg_list[reg_count].reg_addr =
+					(reg_info[i].addr + (j * 4));
+			reg_list[reg_count].reg_val =
+				qdma_reg_read(dev_hndl,
+					reg_list[reg_count].reg_addr);
+			reg_count++;
+		}
+	}
+
+	*total_regs = reg_count;
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_write_global_ring_sizes() - function to set the global ring size array
+ *
+ * @dev_hndl:   device handle
+ * @index: Index from where the values needs to written
+ * @count: number of entries to be written
+ * @glbl_rng_sz: pointer to the array having the values to write
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_write_global_ring_sizes(void *dev_hndl, uint8_t index,
+				uint8_t count, const uint32_t *glbl_rng_sz)
+{
+	if (!dev_hndl || !glbl_rng_sz || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_rng_sz,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_RING_SIZES) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_RING_SIZES,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_write_csr_values(dev_hndl, EQDMA_GLBL_RNG_SZ_1_ADDR, index, count,
+			glbl_rng_sz);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_read_global_ring_sizes() - function to get the global rng_sz array
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to read
+ * @count:	 number of entries to be read
+ * @glbl_rng_sz: pointer to array to hold the values read
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_read_global_ring_sizes(void *dev_hndl, uint8_t index,
+				uint8_t count, uint32_t *glbl_rng_sz)
+{
+	if (!dev_hndl || !glbl_rng_sz || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_rng_sz=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_rng_sz,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_RING_SIZES) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_BUFFER_SIZES,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_read_csr_values(dev_hndl, EQDMA_GLBL_RNG_SZ_1_ADDR, index, count,
+			glbl_rng_sz);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_write_global_timer_count() - function to set the timer values
+ *
+ * @dev_hndl:   device handle
+ * @glbl_tmr_cnt: pointer to the array having the values to write
+ * @index:	 Index from where the values needs to written
+ * @count:	 number of entries to be written
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_write_global_timer_count(void *dev_hndl, uint8_t index,
+				uint8_t count, const uint32_t *glbl_tmr_cnt)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_tmr_cnt || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_tmr_cnt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_TIMERS) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_TIMERS,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_TIMER_CNT_ADDR,
+				index, count, glbl_tmr_cnt);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_read_global_timer_count() - function to get the timer values
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to read
+ * @count:	 number of entries to be read
+ * @glbl_tmr_cnt: pointer to array to hold the values read
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_read_global_timer_count(void *dev_hndl, uint8_t index,
+				uint8_t count, uint32_t *glbl_tmr_cnt)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_tmr_cnt || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_tmr_cnt=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_tmr_cnt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_TIMERS) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_TIMERS,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		qdma_read_csr_values(dev_hndl,
+				EQDMA_C2H_TIMER_CNT_ADDR, index,
+				count, glbl_tmr_cnt);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_write_global_counter_threshold() - function to set the counter
+ *						threshold values
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to written
+ * @count:	 number of entries to be written
+ * @glbl_cnt_th: pointer to the array having the values to write
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_write_global_counter_threshold(void *dev_hndl, uint8_t index,
+		uint8_t count, const uint32_t *glbl_cnt_th)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_cnt_th || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_cnt_th,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_BUFFER_SIZES,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_CNT_TH_ADDR, index,
+				count, glbl_cnt_th);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_read_global_counter_threshold() - function to get the counter threshold
+ * values
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to read
+ * @count:	 number of entries to be read
+ * @glbl_cnt_th: pointer to array to hold the values read
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_read_global_counter_threshold(void *dev_hndl, uint8_t index,
+		uint8_t count, uint32_t *glbl_cnt_th)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_cnt_th || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_cnt_th=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_cnt_th,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_COUNTERS) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_COUNTERS,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		qdma_read_csr_values(dev_hndl, EQDMA_C2H_CNT_TH_ADDR, index,
+				count, glbl_cnt_th);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+			   __func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_write_global_buffer_sizes() - function to set the buffer sizes
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to written
+ * @count:	 number of entries to be written
+ * @glbl_buf_sz: pointer to the array having the values to write
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_write_global_buffer_sizes(void *dev_hndl, uint8_t index,
+		uint8_t count, const uint32_t *glbl_buf_sz)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_buf_sz || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_buf_sz,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_BUFFER_SIZES,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en) {
+		qdma_write_csr_values(dev_hndl, EQDMA_C2H_BUF_SZ_ADDR, index,
+				count, glbl_buf_sz);
+	} else {
+		qdma_log_error("%s: ST not supported, err:%d\n",
+				__func__,
+				-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_read_global_buffer_sizes() - function to get the buffer sizes
+ *
+ * @dev_hndl:   device handle
+ * @index:	 Index from where the values needs to read
+ * @count:	 number of entries to be read
+ * @glbl_buf_sz: pointer to array to hold the values read
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_read_global_buffer_sizes(void *dev_hndl, uint8_t index,
+				uint8_t count, uint32_t *glbl_buf_sz)
+{
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl || !glbl_buf_sz || !count) {
+		qdma_log_error("%s: dev_hndl=%p glbl_buf_sz=%p, err:%d\n",
+					   __func__, dev_hndl, glbl_buf_sz,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if ((index + count) > QDMA_NUM_C2H_BUFFER_SIZES) {
+		qdma_log_error("%s: index=%u count=%u > %d, err:%d\n",
+					   __func__, index, count,
+					   QDMA_NUM_C2H_BUFFER_SIZES,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en) {
+		qdma_read_csr_values(dev_hndl, EQDMA_C2H_BUF_SZ_ADDR, index,
+				count, glbl_buf_sz);
+	} else {
+		qdma_log_error("%s: ST is not supported, err:%d\n",
+					__func__,
+					-QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_global_csr_conf() - function to configure global csr
+ *
+ * @dev_hndl:	device handle
+ * @index:	Index from where the values needs to read
+ * @count:	number of entries to be read
+ * @csr_val:	uint32_t pointer to csr value
+ * @csr_type:	Type of the CSR (qdma_global_csr_type enum) to configure
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *		QDMA_HW_ACCESS_CLEAR - Not supported
+ *		QDMA_HW_ACCESS_INVALIDATE - Not supported
+ *
+ * (index + count) shall not be more than 16
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
+				uint32_t *csr_val,
+				enum qdma_global_csr_type csr_type,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (csr_type) {
+	case QDMA_CSR_RING_SZ:
+		switch (access_type) {
+		case QDMA_HW_ACCESS_READ:
+			rv = eqdma_read_global_ring_sizes(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		case QDMA_HW_ACCESS_WRITE:
+			rv = eqdma_write_global_ring_sizes(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		default:
+			qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+							__func__,
+							access_type,
+						   -QDMA_ERR_INV_PARAM);
+			rv = -QDMA_ERR_INV_PARAM;
+			break;
+		}
+		break;
+	case QDMA_CSR_TIMER_CNT:
+		switch (access_type) {
+		case QDMA_HW_ACCESS_READ:
+			rv = eqdma_read_global_timer_count(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		case QDMA_HW_ACCESS_WRITE:
+			rv = eqdma_write_global_timer_count(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		default:
+			qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+							__func__,
+							access_type,
+						   -QDMA_ERR_INV_PARAM);
+			rv = -QDMA_ERR_INV_PARAM;
+			break;
+		}
+		break;
+	case QDMA_CSR_CNT_TH:
+		switch (access_type) {
+		case QDMA_HW_ACCESS_READ:
+			rv =
+			eqdma_read_global_counter_threshold(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		case QDMA_HW_ACCESS_WRITE:
+			rv =
+			eqdma_write_global_counter_threshold(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		default:
+			qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+							__func__,
+							access_type,
+						   -QDMA_ERR_INV_PARAM);
+			rv = -QDMA_ERR_INV_PARAM;
+			break;
+		}
+		break;
+	case QDMA_CSR_BUF_SZ:
+		switch (access_type) {
+		case QDMA_HW_ACCESS_READ:
+			rv =
+			eqdma_read_global_buffer_sizes(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		case QDMA_HW_ACCESS_WRITE:
+			rv =
+			eqdma_write_global_buffer_sizes(dev_hndl,
+						index,
+						count,
+						csr_val);
+			break;
+		default:
+			qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+							__func__,
+							access_type,
+						   -QDMA_ERR_INV_PARAM);
+			rv = -QDMA_ERR_INV_PARAM;
+			break;
+		}
+		break;
+	default:
+		qdma_log_error("%s: csr_type(%d) invalid, err:%d\n",
+						__func__,
+						csr_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_global_writeback_interval_write() -  function to set the writeback
+ * interval
+ *
+ * @dev_hndl	device handle
+ * @wb_int:	Writeback Interval
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_global_writeback_interval_write(void *dev_hndl,
+		enum qdma_wrb_interval wb_int)
+{
+	uint32_t reg_val;
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (wb_int >=  QDMA_NUM_WRB_INTERVALS) {
+		qdma_log_error("%s: wb_int=%d is invalid, err:%d\n",
+					   __func__, wb_int,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR);
+		reg_val |= FIELD_SET(GLBL_DSC_CFG_WB_ACC_INT_MASK, wb_int);
+		qdma_reg_write(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR, reg_val);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+			   __func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_global_writeback_interval_read() -  function to get the writeback
+ * interval
+ *
+ * @dev_hndl:	device handle
+ * @wb_int:	pointer to the data to hold Writeback Interval
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int eqdma_global_writeback_interval_read(void *dev_hndl,
+		enum qdma_wrb_interval *wb_int)
+{
+	uint32_t reg_val;
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!wb_int) {
+		qdma_log_error("%s: wb_int is NULL, err:%d\n", __func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+		reg_val = qdma_reg_read(dev_hndl, EQDMA_GLBL_DSC_CFG_ADDR);
+		*wb_int = (enum qdma_wrb_interval)FIELD_GET
+				(GLBL_DSC_CFG_WB_ACC_INT_MASK, reg_val);
+	} else {
+		qdma_log_error("%s: ST or MM cmpt not supported, err:%d\n",
+			   __func__, -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED);
+		return -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * eqdma_global_writeback_interval_conf() - function to configure
+ *					the writeback interval
+ *
+ * @dev_hndl:   device handle
+ * @wb_int:	pointer to the data to hold Writeback Interval
+ * @access_type HW access type (qdma_hw_access_type enum) value
+ *		QDMA_HW_ACCESS_CLEAR - Not supported
+ *		QDMA_HW_ACCESS_INVALIDATE - Not supported
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_global_writeback_interval_conf(void *dev_hndl,
+				enum qdma_wrb_interval *wb_int,
+				enum qdma_hw_access_type access_type)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (access_type) {
+	case QDMA_HW_ACCESS_READ:
+		rv = eqdma_global_writeback_interval_read(dev_hndl, wb_int);
+		break;
+	case QDMA_HW_ACCESS_WRITE:
+		rv = eqdma_global_writeback_interval_write(dev_hndl, *wb_int);
+		break;
+	case QDMA_HW_ACCESS_CLEAR:
+	case QDMA_HW_ACCESS_INVALIDATE:
+	default:
+		qdma_log_error("%s: access_type(%d) invalid, err:%d\n",
+						__func__,
+						access_type,
+					   -QDMA_ERR_INV_PARAM);
+		rv = -QDMA_ERR_INV_PARAM;
+		break;
+	}
+
+	return rv;
+}
+
+
+/*****************************************************************************/
+/**
+ * eqdma_mm_channel_conf() - Function to enable/disable the MM channel
+ *
+ * @dev_hndl:	device handle
+ * @channel:	MM channel number
+ * @is_c2h:	Queue direction. Set 1 for C2H and 0 for H2C
+ * @enable:	Enable or disable MM channel
+ *
+ * Presently, we have only 1 MM channel
+ *
+ * Return:   0   - success and < 0 - failure
+ *****************************************************************************/
+int eqdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
+				uint8_t enable)
+{
+	uint32_t reg_addr = (is_c2h) ?  EQDMA_C2H_MM_CTL_ADDR :
+			EQDMA_H2C_MM_CTL_ADDR;
+	struct qdma_dev_attributes dev_cap;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	if (dev_cap.mm_en)
+		qdma_reg_write(dev_hndl,
+				reg_addr + (channel * QDMA_MM_CONTROL_STEP),
+				enable);
+
+
+	return QDMA_SUCCESS;
+}
+
+int eqdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
+		uint32_t num_regs, char *buf, uint32_t buflen)
+{
+	uint32_t total_num_regs = eqdma_config_num_regs_get();
+	struct xreg_info *config_regs  = eqdma_config_regs_get();
+	struct qdma_dev_attributes dev_cap;
+	const char *bitfield_name;
+	uint32_t i = 0, num_regs_idx = 0, k = 0, j = 0,
+			bitfield = 0, lsb = 0, msb = 31;
+	int rv = 0;
+	uint32_t reg_val;
+	uint32_t data_len = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	eqdma_get_device_attributes(dev_hndl, &dev_cap);
+
+	for (i = 0; i < total_num_regs; i++) {
+		if (reg_addr == config_regs[i].addr) {
+			j = i;
+			break;
+		}
+	}
+
+	if (i == total_num_regs) {
+		qdma_log_error("%s: Register not found err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		if (buf)
+			QDMA_SNPRINTF_S(buf, buflen,
+					DEBGFS_LINE_SZ,
+					"Register not found 0x%x\n", reg_addr);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	num_regs_idx = (j + num_regs < total_num_regs) ?
+					(j + num_regs) : total_num_regs;
+
+	for (; j < num_regs_idx ; j++) {
+		reg_val = qdma_reg_read(dev_hndl,
+				config_regs[j].addr);
+
+		if (buf) {
+			rv = QDMA_SNPRINTF_S(buf, buflen,
+						DEBGFS_LINE_SZ,
+						"\n%-40s 0x%-7x %-#10x %-10d\n",
+						config_regs[j].name,
+						config_regs[j].addr,
+						reg_val, reg_val);
+			if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+				qdma_log_error
+					("%s: Insufficient buffer, err:%d\n",
+					__func__, -QDMA_ERR_NO_MEM);
+				return -QDMA_ERR_NO_MEM;
+			}
+			buf += rv;
+			data_len += rv;
+			buflen -= rv;
+		} else {
+			qdma_log_info("%-40s 0x%-7x %-#10x %-10d\n",
+						  config_regs[j].name,
+						  config_regs[j].addr,
+						  reg_val, reg_val);
+		}
+
+		for (k = 0;
+			 k < config_regs[j].num_bitfields; k++) {
+			/* If Debug Mode not enabled and the current register
+			 * is debug register, skip reading it.
+			 */
+			if (dev_cap.debug_mode == 0 &&
+					config_regs[j].is_debug_reg == 1)
+				continue;
+
+			bitfield =
+				config_regs[j].bitfields[k].field_mask;
+			bitfield_name =
+				config_regs[i].bitfields[k].field_name;
+			lsb = 0;
+			msb = 31;
+
+			while (!(BIT(lsb) & bitfield))
+				lsb++;
+
+			while (!(BIT(msb) & bitfield))
+				msb--;
+
+			if (msb != lsb) {
+				if (buf) {
+					rv = QDMA_SNPRINTF_S(buf, buflen,
+							DEBGFS_LINE_SZ,
+							"%-40s [%2u,%2u]   %#-10x\n",
+							bitfield_name,
+							msb, lsb,
+							(reg_val & bitfield) >>
+								lsb);
+					if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+						qdma_log_error
+							("%s: Insufficient buffer, err:%d\n",
+							__func__,
+							-QDMA_ERR_NO_MEM);
+						return -QDMA_ERR_NO_MEM;
+					}
+					buf += rv;
+					data_len += rv;
+					buflen -= rv;
+				} else {
+					qdma_log_info
+						("%-40s [%2u,%2u]   %#-10x\n",
+						bitfield_name,
+						msb, lsb,
+						(reg_val & bitfield) >> lsb);
+				}
+
+			} else {
+				if (buf) {
+					rv = QDMA_SNPRINTF_S(buf, buflen,
+							DEBGFS_LINE_SZ,
+							"%-40s [%5u]   %#-10x\n",
+							bitfield_name,
+							lsb,
+							(reg_val & bitfield) >>
+								lsb);
+					if (rv < 0 || rv > DEBGFS_LINE_SZ) {
+						qdma_log_error
+							("%s: Insufficient buffer, err:%d\n",
+							__func__,
+							-QDMA_ERR_NO_MEM);
+						return -QDMA_ERR_NO_MEM;
+					}
+					buf += rv;
+					data_len += rv;
+					buflen -= rv;
+				} else {
+					qdma_log_info
+						("%-40s [%5u]   %#-10x\n",
+						bitfield_name,
+						lsb,
+						(reg_val & bitfield) >> lsb);
+				}
+			}
+		}
+	}
+
+	return data_len;
+}
diff --git a/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.h b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.h
new file mode 100644
index 0000000000..dc5d3de312
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_access.h
@@ -0,0 +1,294 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __EQDMA_SOFT_ACCESS_H_
+#define __EQDMA_SOFT_ACCESS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "qdma_platform.h"
+
+/**
+ * enum qdma_error_idx - qdma errors
+ */
+enum eqdma_error_idx {
+	/* Descriptor errors */
+	EQDMA_DSC_ERR_POISON,
+	EQDMA_DSC_ERR_UR_CA,
+	EQDMA_DSC_ERR_BCNT,
+	EQDMA_DSC_ERR_PARAM,
+	EQDMA_DSC_ERR_ADDR,
+	EQDMA_DSC_ERR_TAG,
+	EQDMA_DSC_ERR_FLR,
+	EQDMA_DSC_ERR_TIMEOUT,
+	EQDMA_DSC_ERR_DAT_POISON,
+	EQDMA_DSC_ERR_FLR_CANCEL,
+	EQDMA_DSC_ERR_DMA,
+	EQDMA_DSC_ERR_DSC,
+	EQDMA_DSC_ERR_RQ_CANCEL,
+	EQDMA_DSC_ERR_DBE,
+	EQDMA_DSC_ERR_SBE,
+	EQDMA_DSC_ERR_ALL,
+
+	/* TRQ Errors */
+	EQDMA_TRQ_ERR_CSR_UNMAPPED,
+	EQDMA_TRQ_ERR_VF_ACCESS,
+	EQDMA_TRQ_ERR_TCP_CSR_TIMEOUT,
+	EQDMA_TRQ_ERR_QSPC_UNMAPPED,
+	EQDMA_TRQ_ERR_QID_RANGE,
+	EQDMA_TRQ_ERR_TCP_QSPC_TIMEOUT,
+	EQDMA_TRQ_ERR_ALL,
+
+	/* C2H Errors */
+	EQDMA_ST_C2H_ERR_MTY_MISMATCH,
+	EQDMA_ST_C2H_ERR_LEN_MISMATCH,
+	EQDMA_ST_C2H_ERR_SH_CMPT_DSC,
+	EQDMA_ST_C2H_ERR_QID_MISMATCH,
+	EQDMA_ST_C2H_ERR_DESC_RSP_ERR,
+	EQDMA_ST_C2H_ERR_ENG_WPL_DATA_PAR_ERR,
+	EQDMA_ST_C2H_ERR_MSI_INT_FAIL,
+	EQDMA_ST_C2H_ERR_ERR_DESC_CNT,
+	EQDMA_ST_C2H_ERR_PORTID_CTXT_MISMATCH,
+	EQDMA_ST_C2H_ERR_CMPT_INV_Q_ERR,
+	EQDMA_ST_C2H_ERR_CMPT_QFULL_ERR,
+	EQDMA_ST_C2H_ERR_CMPT_CIDX_ERR,
+	EQDMA_ST_C2H_ERR_CMPT_PRTY_ERR,
+	EQDMA_ST_C2H_ERR_AVL_RING_DSC,
+	EQDMA_ST_C2H_ERR_HDR_ECC_UNC,
+	EQDMA_ST_C2H_ERR_HDR_ECC_COR,
+	EQDMA_ST_C2H_ERR_ALL,
+
+	/* Fatal Errors */
+	EQDMA_ST_FATAL_ERR_MTY_MISMATCH,
+	EQDMA_ST_FATAL_ERR_LEN_MISMATCH,
+	EQDMA_ST_FATAL_ERR_QID_MISMATCH,
+	EQDMA_ST_FATAL_ERR_TIMER_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_PFCH_II_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_CMPT_CTXT_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_PFCH_CTXT_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_DESC_REQ_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_INT_CTXT_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_CMPT_COAL_DATA_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_CMPT_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_QID_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_PAYLOAD_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_WPL_DATA_PAR,
+	EQDMA_ST_FATAL_ERR_AVL_RING_FIFO_RAM_RDBE,
+	EQDMA_ST_FATAL_ERR_HDR_ECC_UNC,
+	EQDMA_ST_FATAL_ERR_ALL,
+
+	/* H2C Errors */
+	EQDMA_ST_H2C_ERR_ZERO_LEN_DESC,
+	EQDMA_ST_H2C_ERR_SDI_MRKR_REQ_MOP,
+	EQDMA_ST_H2C_ERR_NO_DMA_DSC,
+	EQDMA_ST_H2C_ERR_SBE,
+	EQDMA_ST_H2C_ERR_DBE,
+	EQDMA_ST_H2C_ERR_PAR,
+	EQDMA_ST_H2C_ERR_ALL,
+
+	/* Single bit errors */
+	EQDMA_SBE_1_ERR_RC_RRQ_EVEN_RAM,
+	EQDMA_SBE_1_ERR_TAG_ODD_RAM,
+	EQDMA_SBE_1_ERR_TAG_EVEN_RAM,
+	EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_0,
+	EQDMA_SBE_1_ERR_PFCH_CTXT_CAM_RAM_1,
+	EQDMA_SBE_1_ERR_ALL,
+
+	/* Single bit errors */
+	EQDMA_SBE_ERR_MI_H2C0_DAT,
+	EQDMA_SBE_ERR_MI_H2C1_DAT,
+	EQDMA_SBE_ERR_MI_H2C2_DAT,
+	EQDMA_SBE_ERR_MI_H2C3_DAT,
+	EQDMA_SBE_ERR_MI_C2H0_DAT,
+	EQDMA_SBE_ERR_MI_C2H1_DAT,
+	EQDMA_SBE_ERR_MI_C2H2_DAT,
+	EQDMA_SBE_ERR_MI_C2H3_DAT,
+	EQDMA_SBE_ERR_H2C_RD_BRG_DAT,
+	EQDMA_SBE_ERR_H2C_WR_BRG_DAT,
+	EQDMA_SBE_ERR_C2H_RD_BRG_DAT,
+	EQDMA_SBE_ERR_C2H_WR_BRG_DAT,
+	EQDMA_SBE_ERR_FUNC_MAP,
+	EQDMA_SBE_ERR_DSC_HW_CTXT,
+	EQDMA_SBE_ERR_DSC_CRD_RCV,
+	EQDMA_SBE_ERR_DSC_SW_CTXT,
+	EQDMA_SBE_ERR_DSC_CPLI,
+	EQDMA_SBE_ERR_DSC_CPLD,
+	EQDMA_SBE_ERR_MI_TL_SLV_FIFO_RAM,
+	EQDMA_SBE_ERR_TIMER_FIFO_RAM,
+	EQDMA_SBE_ERR_QID_FIFO_RAM,
+	EQDMA_SBE_ERR_WRB_COAL_DATA_RAM,
+	EQDMA_SBE_ERR_INT_CTXT_RAM,
+	EQDMA_SBE_ERR_DESC_REQ_FIFO_RAM,
+	EQDMA_SBE_ERR_PFCH_CTXT_RAM,
+	EQDMA_SBE_ERR_WRB_CTXT_RAM,
+	EQDMA_SBE_ERR_PFCH_LL_RAM,
+	EQDMA_SBE_ERR_PEND_FIFO_RAM,
+	EQDMA_SBE_ERR_RC_RRQ_ODD_RAM,
+	EQDMA_SBE_ERR_ALL,
+
+	/* Double bit Errors */
+	EQDMA_DBE_1_ERR_RC_RRQ_EVEN_RAM,
+	EQDMA_DBE_1_ERR_TAG_ODD_RAM,
+	EQDMA_DBE_1_ERR_TAG_EVEN_RAM,
+	EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_0,
+	EQDMA_DBE_1_ERR_PFCH_CTXT_CAM_RAM_1,
+	EQDMA_DBE_1_ERR_ALL,
+
+	/* Double bit Errors */
+	EQDMA_DBE_ERR_MI_H2C0_DAT,
+	EQDMA_DBE_ERR_MI_H2C1_DAT,
+	EQDMA_DBE_ERR_MI_H2C2_DAT,
+	EQDMA_DBE_ERR_MI_H2C3_DAT,
+	EQDMA_DBE_ERR_MI_C2H0_DAT,
+	EQDMA_DBE_ERR_MI_C2H1_DAT,
+	EQDMA_DBE_ERR_MI_C2H2_DAT,
+	EQDMA_DBE_ERR_MI_C2H3_DAT,
+	EQDMA_DBE_ERR_H2C_RD_BRG_DAT,
+	EQDMA_DBE_ERR_H2C_WR_BRG_DAT,
+	EQDMA_DBE_ERR_C2H_RD_BRG_DAT,
+	EQDMA_DBE_ERR_C2H_WR_BRG_DAT,
+	EQDMA_DBE_ERR_FUNC_MAP,
+	EQDMA_DBE_ERR_DSC_HW_CTXT,
+	EQDMA_DBE_ERR_DSC_CRD_RCV,
+	EQDMA_DBE_ERR_DSC_SW_CTXT,
+	EQDMA_DBE_ERR_DSC_CPLI,
+	EQDMA_DBE_ERR_DSC_CPLD,
+	EQDMA_DBE_ERR_MI_TL_SLV_FIFO_RAM,
+	EQDMA_DBE_ERR_TIMER_FIFO_RAM,
+	EQDMA_DBE_ERR_QID_FIFO_RAM,
+	EQDMA_DBE_ERR_WRB_COAL_DATA_RAM,
+	EQDMA_DBE_ERR_INT_CTXT_RAM,
+	EQDMA_DBE_ERR_DESC_REQ_FIFO_RAM,
+	EQDMA_DBE_ERR_PFCH_CTXT_RAM,
+	EQDMA_DBE_ERR_WRB_CTXT_RAM,
+	EQDMA_DBE_ERR_PFCH_LL_RAM,
+	EQDMA_DBE_ERR_PEND_FIFO_RAM,
+	EQDMA_DBE_ERR_RC_RRQ_ODD_RAM,
+	EQDMA_DBE_ERR_ALL,
+
+	EQDMA_ERRS_ALL
+};
+
+struct eqdma_hw_err_info {
+	enum eqdma_error_idx idx;
+	const char *err_name;
+	uint32_t mask_reg_addr;
+	uint32_t stat_reg_addr;
+	uint32_t leaf_err_mask;
+	uint32_t global_err_mask;
+	void (*eqdma_hw_err_process)(void *dev_hndl);
+};
+
+#define EQDMA_OFFSET_VF_VERSION           0x5014
+#define EQDMA_OFFSET_VF_USER_BAR		  0x5018
+
+#define EQDMA_OFFSET_MBOX_BASE_PF         0x22400
+#define EQDMA_OFFSET_MBOX_BASE_VF         0x5000
+
+#define EQDMA_COMPL_CTXT_BADDR_HIGH_H_MASK             GENMASK_ULL(63, 38)
+#define EQDMA_COMPL_CTXT_BADDR_HIGH_L_MASK             GENMASK_ULL(37, 6)
+#define EQDMA_COMPL_CTXT_BADDR_LOW_MASK                GENMASK_ULL(5, 2)
+
+int eqdma_init_ctxt_memory(void *dev_hndl);
+
+int eqdma_get_version(void *dev_hndl, uint8_t is_vf,
+		struct qdma_hw_version_info *version_info);
+
+int eqdma_sw_ctx_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+			struct qdma_descq_sw_ctxt *ctxt,
+			enum qdma_hw_access_type access_type);
+
+int eqdma_hw_ctx_conf(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+				struct qdma_descq_hw_ctxt *ctxt,
+				enum qdma_hw_access_type access_type);
+
+int eqdma_credit_ctx_conf(void *dev_hndl, uint8_t c2h,
+		uint16_t hw_qid, struct qdma_descq_credit_ctxt *ctxt,
+		enum qdma_hw_access_type access_type);
+
+int eqdma_pfetch_ctx_conf(void *dev_hndl, uint16_t hw_qid,
+			struct qdma_descq_prefetch_ctxt *ctxt,
+			enum qdma_hw_access_type access_type);
+
+int eqdma_cmpt_ctx_conf(void *dev_hndl, uint16_t hw_qid,
+			struct qdma_descq_cmpt_ctxt *ctxt,
+			enum qdma_hw_access_type access_type);
+
+int eqdma_indirect_intr_ctx_conf(void *dev_hndl, uint16_t ring_index,
+			struct qdma_indirect_intr_ctxt *ctxt,
+			enum qdma_hw_access_type access_type);
+
+int eqdma_dump_config_regs(void *dev_hndl, uint8_t is_vf,
+		char *buf, uint32_t buflen);
+
+int eqdma_dump_intr_context(void *dev_hndl,
+		struct qdma_indirect_intr_ctxt *intr_ctx,
+		int ring_index,
+		char *buf, uint32_t buflen);
+
+int eqdma_dump_queue_context(void *dev_hndl,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		struct qdma_descq_context *ctxt_data,
+		char *buf, uint32_t buflen);
+
+uint32_t eqdma_reg_dump_buf_len(void);
+
+int eqdma_context_buf_len(uint8_t st,
+		enum qdma_dev_q_type q_type, uint32_t *buflen);
+
+int eqdma_hw_error_process(void *dev_hndl);
+const char *eqdma_hw_get_error_name(uint32_t err_idx);
+int eqdma_hw_error_enable(void *dev_hndl, uint32_t err_idx);
+
+int eqdma_read_dump_queue_context(void *dev_hndl,
+		uint16_t qid_hw,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		char *buf, uint32_t buflen);
+
+int eqdma_get_device_attributes(void *dev_hndl,
+		struct qdma_dev_attributes *dev_info);
+
+int eqdma_get_user_bar(void *dev_hndl, uint8_t is_vf,
+		uint8_t func_id, uint8_t *user_bar);
+
+int eqdma_dump_config_reg_list(void *dev_hndl,
+		uint32_t total_regs,
+		struct qdma_reg_data *reg_list,
+		char *buf, uint32_t buflen);
+
+int eqdma_read_reg_list(void *dev_hndl, uint8_t is_vf,
+		uint16_t reg_rd_group,
+		uint16_t *total_regs,
+		struct qdma_reg_data *reg_list);
+
+int eqdma_set_default_global_csr(void *dev_hndl);
+
+int eqdma_global_csr_conf(void *dev_hndl, uint8_t index, uint8_t count,
+				uint32_t *csr_val,
+				enum qdma_global_csr_type csr_type,
+				enum qdma_hw_access_type access_type);
+
+int eqdma_global_writeback_interval_conf(void *dev_hndl,
+				enum qdma_wrb_interval *wb_int,
+				enum qdma_hw_access_type access_type);
+
+int eqdma_mm_channel_conf(void *dev_hndl, uint8_t channel, uint8_t is_c2h,
+				uint8_t enable);
+
+int eqdma_dump_reg_info(void *dev_hndl, uint32_t reg_addr,
+			uint32_t num_regs, char *buf, uint32_t buflen);
+
+uint32_t eqdma_get_config_num_regs(void);
+
+struct xreg_info *eqdma_get_config_regs(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __EQDMA_SOFT_ACCESS_H_ */
diff --git a/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg.h b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg.h
new file mode 100644
index 0000000000..446079a8fc
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg.h
@@ -0,0 +1,1211 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __EQDMA_SOFT_REG_H
+#define __EQDMA_SOFT_REG_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "qdma_platform.h"
+
+#ifdef CHAR_BIT
+#undef CHAR_BIT
+#endif
+#define CHAR_BIT 8
+
+#ifdef BIT
+#undef BIT
+#endif
+#define BIT(n)                  (1u << (n))
+
+#ifdef BITS_PER_BYTE
+#undef BITS_PER_BYTE
+#endif
+#define BITS_PER_BYTE           CHAR_BIT
+
+#ifdef BITS_PER_LONG
+#undef BITS_PER_LONG
+#endif
+#define BITS_PER_LONG           (sizeof(uint32_t) * BITS_PER_BYTE)
+
+#ifdef BITS_PER_LONG_LONG
+#undef BITS_PER_LONG_LONG
+#endif
+#define BITS_PER_LONG_LONG      (sizeof(uint64_t) * BITS_PER_BYTE)
+
+#ifdef GENMASK
+#undef GENMASK
+#endif
+#define GENMASK(h, l) \
+	((0xFFFFFFFF << (l)) & (0xFFFFFFFF >> (BITS_PER_LONG - 1 - (h))))
+
+#ifdef GENMASK_ULL
+#undef GENMASK_ULL
+#endif
+#define GENMASK_ULL(h, l) \
+	((0xFFFFFFFFFFFFFFFF << (l)) & \
+			(0xFFFFFFFFFFFFFFFF >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#define DEBGFS_LINE_SZ			(81)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+uint32_t eqdma_config_num_regs_get(void);
+struct xreg_info *eqdma_config_regs_get(void);
+#define EQDMA_CFG_BLK_IDENTIFIER_ADDR                      0x00
+#define CFG_BLK_IDENTIFIER_MASK                           GENMASK(31, 20)
+#define CFG_BLK_IDENTIFIER_1_MASK                         GENMASK(19, 16)
+#define CFG_BLK_IDENTIFIER_RSVD_1_MASK                     GENMASK(15, 8)
+#define CFG_BLK_IDENTIFIER_VERSION_MASK                    GENMASK(7, 0)
+#define EQDMA_CFG_BLK_PCIE_MAX_PLD_SIZE_ADDR               0x08
+#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_1_MASK              GENMASK(31, 7)
+#define CFG_BLK_PCIE_MAX_PLD_SIZE_PROG_MASK                GENMASK(6, 4)
+#define CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_2_MASK              BIT(3)
+#define CFG_BLK_PCIE_MAX_PLD_SIZE_ISSUED_MASK              GENMASK(2, 0)
+#define EQDMA_CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ADDR          0x0C
+#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_1_MASK         GENMASK(31, 7)
+#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_PROG_MASK           GENMASK(6, 4)
+#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_2_MASK         BIT(3)
+#define CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ISSUED_MASK         GENMASK(2, 0)
+#define EQDMA_CFG_BLK_SYSTEM_ID_ADDR                       0x10
+#define CFG_BLK_SYSTEM_ID_RSVD_1_MASK                      GENMASK(31, 17)
+#define CFG_BLK_SYSTEM_ID_INST_TYPE_MASK                   BIT(16)
+#define CFG_BLK_SYSTEM_ID_MASK                            GENMASK(15, 0)
+#define EQDMA_CFG_BLK_MSIX_ENABLE_ADDR                     0x014
+#define CFG_BLK_MSIX_ENABLE_MASK                          GENMASK(31, 0)
+#define EQDMA_CFG_PCIE_DATA_WIDTH_ADDR                     0x18
+#define CFG_PCIE_DATA_WIDTH_RSVD_1_MASK                    GENMASK(31, 3)
+#define CFG_PCIE_DATA_WIDTH_DATAPATH_MASK                  GENMASK(2, 0)
+#define EQDMA_CFG_PCIE_CTL_ADDR                            0x1C
+#define CFG_PCIE_CTL_RSVD_1_MASK                           GENMASK(31, 18)
+#define CFG_PCIE_CTL_MGMT_AXIL_CTRL_MASK                   GENMASK(17, 16)
+#define CFG_PCIE_CTL_RSVD_2_MASK                           GENMASK(15, 2)
+#define CFG_PCIE_CTL_RRQ_DISABLE_MASK                      BIT(1)
+#define CFG_PCIE_CTL_RELAXED_ORDERING_MASK                 BIT(0)
+#define EQDMA_CFG_BLK_MSI_ENABLE_ADDR                      0x20
+#define CFG_BLK_MSI_ENABLE_MASK                           GENMASK(31, 0)
+#define EQDMA_CFG_AXI_USER_MAX_PLD_SIZE_ADDR               0x40
+#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_1_MASK              GENMASK(31, 7)
+#define CFG_AXI_USER_MAX_PLD_SIZE_ISSUED_MASK              GENMASK(6, 4)
+#define CFG_AXI_USER_MAX_PLD_SIZE_RSVD_2_MASK              BIT(3)
+#define CFG_AXI_USER_MAX_PLD_SIZE_PROG_MASK                GENMASK(2, 0)
+#define EQDMA_CFG_AXI_USER_MAX_READ_REQ_SIZE_ADDR          0x44
+#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_1_MASK         GENMASK(31, 7)
+#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USISSUED_MASK       GENMASK(6, 4)
+#define CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_2_MASK         BIT(3)
+#define CFG_AXI_USER_MAX_READ_REQ_SIZE_USPROG_MASK         GENMASK(2, 0)
+#define EQDMA_CFG_BLK_MISC_CTL_ADDR                        0x4C
+#define CFG_BLK_MISC_CTL_RSVD_1_MASK                       GENMASK(31, 24)
+#define CFG_BLK_MISC_CTL_10B_TAG_EN_MASK                   BIT(23)
+#define CFG_BLK_MISC_CTL_RSVD_2_MASK                       BIT(22)
+#define CFG_BLK_MISC_CTL_AXI_WBK_MASK                      BIT(21)
+#define CFG_BLK_MISC_CTL_AXI_DSC_MASK                      BIT(20)
+#define CFG_BLK_MISC_CTL_NUM_TAG_MASK                      GENMASK(19, 8)
+#define CFG_BLK_MISC_CTL_RSVD_3_MASK                       GENMASK(7, 5)
+#define CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK       GENMASK(4, 0)
+#define EQDMA_CFG_PL_CRED_CTL_ADDR                         0x68
+#define CFG_PL_CRED_CTL_RSVD_1_MASK                        GENMASK(31, 5)
+#define CFG_PL_CRED_CTL_SLAVE_CRD_RLS_MASK                 BIT(4)
+#define CFG_PL_CRED_CTL_RSVD_2_MASK                        GENMASK(3, 1)
+#define CFG_PL_CRED_CTL_MASTER_CRD_RST_MASK                BIT(0)
+#define EQDMA_CFG_BLK_SCRATCH_ADDR                         0x80
+#define CFG_BLK_SCRATCH_MASK                              GENMASK(31, 0)
+#define EQDMA_CFG_GIC_ADDR                                 0xA0
+#define CFG_GIC_RSVD_1_MASK                                GENMASK(31, 1)
+#define CFG_GIC_GIC_IRQ_MASK                               BIT(0)
+#define EQDMA_RAM_SBE_MSK_1_A_ADDR                         0xE0
+#define RAM_SBE_MSK_1_A_MASK                          GENMASK(31, 0)
+#define EQDMA_RAM_SBE_STS_1_A_ADDR                         0xE4
+#define RAM_SBE_STS_1_A_RSVD_MASK                          GENMASK(31, 5)
+#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK           BIT(4)
+#define RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK           BIT(3)
+#define RAM_SBE_STS_1_A_TAG_EVEN_RAM_MASK                  BIT(2)
+#define RAM_SBE_STS_1_A_TAG_ODD_RAM_MASK                   BIT(1)
+#define RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK               BIT(0)
+#define EQDMA_RAM_DBE_MSK_1_A_ADDR                         0xE8
+#define RAM_DBE_MSK_1_A_MASK                          GENMASK(31, 0)
+#define EQDMA_RAM_DBE_STS_1_A_ADDR                         0xEC
+#define RAM_DBE_STS_1_A_RSVD_MASK                          GENMASK(31, 5)
+#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK           BIT(4)
+#define RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK           BIT(3)
+#define RAM_DBE_STS_1_A_TAG_EVEN_RAM_MASK                  BIT(2)
+#define RAM_DBE_STS_1_A_TAG_ODD_RAM_MASK                   BIT(1)
+#define RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK               BIT(0)
+#define EQDMA_RAM_SBE_MSK_A_ADDR                           0xF0
+#define RAM_SBE_MSK_A_MASK                            GENMASK(31, 0)
+#define EQDMA_RAM_SBE_STS_A_ADDR                           0xF4
+#define RAM_SBE_STS_A_RC_RRQ_ODD_RAM_MASK                  BIT(31)
+#define RAM_SBE_STS_A_PEND_FIFO_RAM_MASK                   BIT(30)
+#define RAM_SBE_STS_A_PFCH_LL_RAM_MASK                     BIT(29)
+#define RAM_SBE_STS_A_WRB_CTXT_RAM_MASK                    BIT(28)
+#define RAM_SBE_STS_A_PFCH_CTXT_RAM_MASK                   BIT(27)
+#define RAM_SBE_STS_A_DESC_REQ_FIFO_RAM_MASK               BIT(26)
+#define RAM_SBE_STS_A_INT_CTXT_RAM_MASK                    BIT(25)
+#define RAM_SBE_STS_A_WRB_COAL_DATA_RAM_MASK               BIT(24)
+#define RAM_SBE_STS_A_QID_FIFO_RAM_MASK                    BIT(23)
+#define RAM_SBE_STS_A_TIMER_FIFO_RAM_MASK                  GENMASK(22, 19)
+#define RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK              BIT(18)
+#define RAM_SBE_STS_A_DSC_CPLD_MASK                        BIT(17)
+#define RAM_SBE_STS_A_DSC_CPLI_MASK                        BIT(16)
+#define RAM_SBE_STS_A_DSC_SW_CTXT_MASK                     BIT(15)
+#define RAM_SBE_STS_A_DSC_CRD_RCV_MASK                     BIT(14)
+#define RAM_SBE_STS_A_DSC_HW_CTXT_MASK                     BIT(13)
+#define RAM_SBE_STS_A_FUNC_MAP_MASK                        BIT(12)
+#define RAM_SBE_STS_A_C2H_WR_BRG_DAT_MASK                  BIT(11)
+#define RAM_SBE_STS_A_C2H_RD_BRG_DAT_MASK                  BIT(10)
+#define RAM_SBE_STS_A_H2C_WR_BRG_DAT_MASK                  BIT(9)
+#define RAM_SBE_STS_A_H2C_RD_BRG_DAT_MASK                  BIT(8)
+#define RAM_SBE_STS_A_MI_C2H3_DAT_MASK                     BIT(7)
+#define RAM_SBE_STS_A_MI_C2H2_DAT_MASK                     BIT(6)
+#define RAM_SBE_STS_A_MI_C2H1_DAT_MASK                     BIT(5)
+#define RAM_SBE_STS_A_MI_C2H0_DAT_MASK                     BIT(4)
+#define RAM_SBE_STS_A_MI_H2C3_DAT_MASK                     BIT(3)
+#define RAM_SBE_STS_A_MI_H2C2_DAT_MASK                     BIT(2)
+#define RAM_SBE_STS_A_MI_H2C1_DAT_MASK                     BIT(1)
+#define RAM_SBE_STS_A_MI_H2C0_DAT_MASK                     BIT(0)
+#define EQDMA_RAM_DBE_MSK_A_ADDR                           0xF8
+#define RAM_DBE_MSK_A_MASK                            GENMASK(31, 0)
+#define EQDMA_RAM_DBE_STS_A_ADDR                           0xFC
+#define RAM_DBE_STS_A_RC_RRQ_ODD_RAM_MASK                  BIT(31)
+#define RAM_DBE_STS_A_PEND_FIFO_RAM_MASK                   BIT(30)
+#define RAM_DBE_STS_A_PFCH_LL_RAM_MASK                     BIT(29)
+#define RAM_DBE_STS_A_WRB_CTXT_RAM_MASK                    BIT(28)
+#define RAM_DBE_STS_A_PFCH_CTXT_RAM_MASK                   BIT(27)
+#define RAM_DBE_STS_A_DESC_REQ_FIFO_RAM_MASK               BIT(26)
+#define RAM_DBE_STS_A_INT_CTXT_RAM_MASK                    BIT(25)
+#define RAM_DBE_STS_A_WRB_COAL_DATA_RAM_MASK               BIT(24)
+#define RAM_DBE_STS_A_QID_FIFO_RAM_MASK                    BIT(23)
+#define RAM_DBE_STS_A_TIMER_FIFO_RAM_MASK                  GENMASK(22, 19)
+#define RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK              BIT(18)
+#define RAM_DBE_STS_A_DSC_CPLD_MASK                        BIT(17)
+#define RAM_DBE_STS_A_DSC_CPLI_MASK                        BIT(16)
+#define RAM_DBE_STS_A_DSC_SW_CTXT_MASK                     BIT(15)
+#define RAM_DBE_STS_A_DSC_CRD_RCV_MASK                     BIT(14)
+#define RAM_DBE_STS_A_DSC_HW_CTXT_MASK                     BIT(13)
+#define RAM_DBE_STS_A_FUNC_MAP_MASK                        BIT(12)
+#define RAM_DBE_STS_A_C2H_WR_BRG_DAT_MASK                  BIT(11)
+#define RAM_DBE_STS_A_C2H_RD_BRG_DAT_MASK                  BIT(10)
+#define RAM_DBE_STS_A_H2C_WR_BRG_DAT_MASK                  BIT(9)
+#define RAM_DBE_STS_A_H2C_RD_BRG_DAT_MASK                  BIT(8)
+#define RAM_DBE_STS_A_MI_C2H3_DAT_MASK                     BIT(7)
+#define RAM_DBE_STS_A_MI_C2H2_DAT_MASK                     BIT(6)
+#define RAM_DBE_STS_A_MI_C2H1_DAT_MASK                     BIT(5)
+#define RAM_DBE_STS_A_MI_C2H0_DAT_MASK                     BIT(4)
+#define RAM_DBE_STS_A_MI_H2C3_DAT_MASK                     BIT(3)
+#define RAM_DBE_STS_A_MI_H2C2_DAT_MASK                     BIT(2)
+#define RAM_DBE_STS_A_MI_H2C1_DAT_MASK                     BIT(1)
+#define RAM_DBE_STS_A_MI_H2C0_DAT_MASK                     BIT(0)
+#define EQDMA_GLBL2_IDENTIFIER_ADDR                        0x100
+#define GLBL2_IDENTIFIER_MASK                             GENMASK(31, 8)
+#define GLBL2_IDENTIFIER_VERSION_MASK                      GENMASK(7, 0)
+#define EQDMA_GLBL2_CHANNEL_INST_ADDR                      0x114
+#define GLBL2_CHANNEL_INST_RSVD_1_MASK                     GENMASK(31, 18)
+#define GLBL2_CHANNEL_INST_C2H_ST_MASK                     BIT(17)
+#define GLBL2_CHANNEL_INST_H2C_ST_MASK                     BIT(16)
+#define GLBL2_CHANNEL_INST_RSVD_2_MASK                     GENMASK(15, 12)
+#define GLBL2_CHANNEL_INST_C2H_ENG_MASK                    GENMASK(11, 8)
+#define GLBL2_CHANNEL_INST_RSVD_3_MASK                     GENMASK(7, 4)
+#define GLBL2_CHANNEL_INST_H2C_ENG_MASK                    GENMASK(3, 0)
+#define EQDMA_GLBL2_CHANNEL_MDMA_ADDR                      0x118
+#define GLBL2_CHANNEL_MDMA_RSVD_1_MASK                     GENMASK(31, 18)
+#define GLBL2_CHANNEL_MDMA_C2H_ST_MASK                     BIT(17)
+#define GLBL2_CHANNEL_MDMA_H2C_ST_MASK                     BIT(16)
+#define GLBL2_CHANNEL_MDMA_RSVD_2_MASK                     GENMASK(15, 12)
+#define GLBL2_CHANNEL_MDMA_C2H_ENG_MASK                    GENMASK(11, 8)
+#define GLBL2_CHANNEL_MDMA_RSVD_3_MASK                     GENMASK(7, 4)
+#define GLBL2_CHANNEL_MDMA_H2C_ENG_MASK                    GENMASK(3, 0)
+#define EQDMA_GLBL2_CHANNEL_STRM_ADDR                      0x11C
+#define GLBL2_CHANNEL_STRM_RSVD_1_MASK                     GENMASK(31, 18)
+#define GLBL2_CHANNEL_STRM_C2H_ST_MASK                     BIT(17)
+#define GLBL2_CHANNEL_STRM_H2C_ST_MASK                     BIT(16)
+#define GLBL2_CHANNEL_STRM_RSVD_2_MASK                     GENMASK(15, 12)
+#define GLBL2_CHANNEL_STRM_C2H_ENG_MASK                    GENMASK(11, 8)
+#define GLBL2_CHANNEL_STRM_RSVD_3_MASK                     GENMASK(7, 4)
+#define GLBL2_CHANNEL_STRM_H2C_ENG_MASK                    GENMASK(3, 0)
+#define EQDMA_GLBL2_CHANNEL_CAP_ADDR                       0x120
+#define GLBL2_CHANNEL_CAP_RSVD_1_MASK                      GENMASK(31, 12)
+#define GLBL2_CHANNEL_CAP_MULTIQ_MAX_MASK                  GENMASK(11, 0)
+#define EQDMA_GLBL2_CHANNEL_PASID_CAP_ADDR                 0x128
+#define GLBL2_CHANNEL_PASID_CAP_RSVD_1_MASK                GENMASK(31, 2)
+#define GLBL2_CHANNEL_PASID_CAP_BRIDGEEN_MASK              BIT(1)
+#define GLBL2_CHANNEL_PASID_CAP_DMAEN_MASK                 BIT(0)
+#define EQDMA_GLBL2_SYSTEM_ID_ADDR                         0x130
+#define GLBL2_SYSTEM_ID_RSVD_1_MASK                        GENMASK(31, 16)
+#define GLBL2_SYSTEM_ID_MASK                              GENMASK(15, 0)
+#define EQDMA_GLBL2_MISC_CAP_ADDR                          0x134
+#define GLBL2_MISC_CAP_MASK                               GENMASK(31, 0)
+#define EQDMA_GLBL2_DBG_PCIE_RQ0_ADDR                      0x1B8
+#define GLBL2_PCIE_RQ0_NPH_AVL_MASK                    GENMASK(31, 20)
+#define GLBL2_PCIE_RQ0_RCB_AVL_MASK                    GENMASK(19, 9)
+#define GLBL2_PCIE_RQ0_SLV_RD_CREDS_MASK               GENMASK(8, 2)
+#define GLBL2_PCIE_RQ0_TAG_EP_MASK                     GENMASK(1, 0)
+#define EQDMA_GLBL2_DBG_PCIE_RQ1_ADDR                      0x1BC
+#define GLBL2_PCIE_RQ1_RSVD_1_MASK                     GENMASK(31, 21)
+#define GLBL2_PCIE_RQ1_TAG_FL_MASK                     GENMASK(20, 19)
+#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_FL_MASK        BIT(18)
+#define GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_EP_MASK        BIT(17)
+#define GLBL2_PCIE_RQ1_RQ_FIFO_EP_MASK                 BIT(16)
+#define GLBL2_PCIE_RQ1_RQ_FIFO_FL_MASK                 BIT(15)
+#define GLBL2_PCIE_RQ1_TLPSM_MASK                      GENMASK(14, 12)
+#define GLBL2_PCIE_RQ1_TLPSM512_MASK                   GENMASK(11, 9)
+#define GLBL2_PCIE_RQ1_RREQ_RCB_OK_MASK                BIT(8)
+#define GLBL2_PCIE_RQ1_RREQ0_SLV_MASK                  BIT(7)
+#define GLBL2_PCIE_RQ1_RREQ0_VLD_MASK                  BIT(6)
+#define GLBL2_PCIE_RQ1_RREQ0_RDY_MASK                  BIT(5)
+#define GLBL2_PCIE_RQ1_RREQ1_SLV_MASK                  BIT(4)
+#define GLBL2_PCIE_RQ1_RREQ1_VLD_MASK                  BIT(3)
+#define GLBL2_PCIE_RQ1_RREQ1_RDY_MASK                  BIT(2)
+#define GLBL2_PCIE_RQ1_WTLP_REQ_MASK                   BIT(1)
+#define GLBL2_PCIE_RQ1_WTLP_STRADDLE_MASK              BIT(0)
+#define EQDMA_GLBL2_DBG_AXIMM_WR0_ADDR                     0x1C0
+#define GLBL2_AXIMM_WR0_RSVD_1_MASK                    GENMASK(31, 27)
+#define GLBL2_AXIMM_WR0_WR_REQ_MASK                    BIT(26)
+#define GLBL2_AXIMM_WR0_WR_CHN_MASK                    GENMASK(25, 23)
+#define GLBL2_AXIMM_WR0_WTLP_DATA_FIFO_EP_MASK         BIT(22)
+#define GLBL2_AXIMM_WR0_WPL_FIFO_EP_MASK               BIT(21)
+#define GLBL2_AXIMM_WR0_BRSP_CLAIM_CHN_MASK            GENMASK(20, 18)
+#define GLBL2_AXIMM_WR0_WRREQ_CNT_MASK                 GENMASK(17, 12)
+#define GLBL2_AXIMM_WR0_BID_MASK                       GENMASK(11, 9)
+#define GLBL2_AXIMM_WR0_BVALID_MASK                    BIT(8)
+#define GLBL2_AXIMM_WR0_BREADY_MASK                    BIT(7)
+#define GLBL2_AXIMM_WR0_WVALID_MASK                    BIT(6)
+#define GLBL2_AXIMM_WR0_WREADY_MASK                    BIT(5)
+#define GLBL2_AXIMM_WR0_AWID_MASK                      GENMASK(4, 2)
+#define GLBL2_AXIMM_WR0_AWVALID_MASK                   BIT(1)
+#define GLBL2_AXIMM_WR0_AWREADY_MASK                   BIT(0)
+#define EQDMA_GLBL2_DBG_AXIMM_WR1_ADDR                     0x1C4
+#define GLBL2_AXIMM_WR1_RSVD_1_MASK                    GENMASK(31, 30)
+#define GLBL2_AXIMM_WR1_BRSP_CNT4_MASK                 GENMASK(29, 24)
+#define GLBL2_AXIMM_WR1_BRSP_CNT3_MASK                 GENMASK(23, 18)
+#define GLBL2_AXIMM_WR1_BRSP_CNT2_MASK                 GENMASK(17, 12)
+#define GLBL2_AXIMM_WR1_BRSP_CNT1_MASK                 GENMASK(11, 6)
+#define GLBL2_AXIMM_WR1_BRSP_CNT0_MASK                 GENMASK(5, 0)
+#define EQDMA_GLBL2_DBG_AXIMM_RD0_ADDR                     0x1C8
+#define GLBL2_AXIMM_RD0_RSVD_1_MASK                    GENMASK(31, 23)
+#define GLBL2_AXIMM_RD0_PND_CNT_MASK                   GENMASK(22, 17)
+#define GLBL2_AXIMM_RD0_RD_REQ_MASK                    BIT(16)
+#define GLBL2_AXIMM_RD0_RD_CHNL_MASK                   GENMASK(15, 13)
+#define GLBL2_AXIMM_RD0_RRSP_CLAIM_CHNL_MASK           GENMASK(12, 10)
+#define GLBL2_AXIMM_RD0_RID_MASK                       GENMASK(9, 7)
+#define GLBL2_AXIMM_RD0_RVALID_MASK                    BIT(6)
+#define GLBL2_AXIMM_RD0_RREADY_MASK                    BIT(5)
+#define GLBL2_AXIMM_RD0_ARID_MASK                      GENMASK(4, 2)
+#define GLBL2_AXIMM_RD0_ARVALID_MASK                   BIT(1)
+#define GLBL2_AXIMM_RD0_ARREADY_MASK                   BIT(0)
+#define EQDMA_GLBL2_DBG_AXIMM_RD1_ADDR                     0x1CC
+#define GLBL2_AXIMM_RD1_RSVD_1_MASK                    GENMASK(31, 30)
+#define GLBL2_AXIMM_RD1_RRSP_CNT4_MASK                 GENMASK(29, 24)
+#define GLBL2_AXIMM_RD1_RRSP_CNT3_MASK                 GENMASK(23, 18)
+#define GLBL2_AXIMM_RD1_RRSP_CNT2_MASK                 GENMASK(17, 12)
+#define GLBL2_AXIMM_RD1_RRSP_CNT1_MASK                 GENMASK(11, 6)
+#define GLBL2_AXIMM_RD1_RRSP_CNT0_MASK                 GENMASK(5, 0)
+#define EQDMA_GLBL2_DBG_FAB0_ADDR                          0x1D0
+#define GLBL2_FAB0_H2C_INB_CONV_IN_VLD_MASK            BIT(31)
+#define GLBL2_FAB0_H2C_INB_CONV_IN_RDY_MASK            BIT(30)
+#define GLBL2_FAB0_H2C_SEG_IN_VLD_MASK                 BIT(29)
+#define GLBL2_FAB0_H2C_SEG_IN_RDY_MASK                 BIT(28)
+#define GLBL2_FAB0_H2C_SEG_OUT_VLD_MASK                GENMASK(27, 24)
+#define GLBL2_FAB0_H2C_SEG_OUT_RDY_MASK                BIT(23)
+#define GLBL2_FAB0_H2C_MST_CRDT_STAT_MASK              GENMASK(22, 16)
+#define GLBL2_FAB0_C2H_SLV_AFIFO_FULL_MASK             BIT(15)
+#define GLBL2_FAB0_C2H_SLV_AFIFO_EMPTY_MASK            BIT(14)
+#define GLBL2_FAB0_C2H_DESEG_SEG_VLD_MASK              GENMASK(13, 10)
+#define GLBL2_FAB0_C2H_DESEG_SEG_RDY_MASK              BIT(9)
+#define GLBL2_FAB0_C2H_DESEG_OUT_VLD_MASK              BIT(8)
+#define GLBL2_FAB0_C2H_DESEG_OUT_RDY_MASK              BIT(7)
+#define GLBL2_FAB0_C2H_INB_DECONV_OUT_VLD_MASK         BIT(6)
+#define GLBL2_FAB0_C2H_INB_DECONV_OUT_RDY_MASK         BIT(5)
+#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_FULL_MASK        BIT(4)
+#define GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_EMPTY_MASK       BIT(3)
+#define GLBL2_FAB0_IRQ_IN_AFIFO_FULL_MASK              BIT(2)
+#define GLBL2_FAB0_IRQ_IN_AFIFO_EMPTY_MASK             BIT(1)
+#define GLBL2_FAB0_IMM_CRD_AFIFO_EMPTY_MASK            BIT(0)
+#define EQDMA_GLBL2_DBG_FAB1_ADDR                          0x1D4
+#define GLBL2_FAB1_BYP_OUT_CRDT_STAT_MASK              GENMASK(31, 25)
+#define GLBL2_FAB1_TM_DSC_STS_CRDT_STAT_MASK           GENMASK(24, 18)
+#define GLBL2_FAB1_C2H_CMN_AFIFO_FULL_MASK             BIT(17)
+#define GLBL2_FAB1_C2H_CMN_AFIFO_EMPTY_MASK            BIT(16)
+#define GLBL2_FAB1_RSVD_1_MASK                         GENMASK(15, 13)
+#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_FULL_MASK          BIT(12)
+#define GLBL2_FAB1_RSVD_2_MASK                         GENMASK(11, 9)
+#define GLBL2_FAB1_C2H_BYP_IN_AFIFO_EMPTY_MASK         BIT(8)
+#define GLBL2_FAB1_RSVD_3_MASK                         GENMASK(7, 5)
+#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_FULL_MASK          BIT(4)
+#define GLBL2_FAB1_RSVD_4_MASK                         GENMASK(3, 1)
+#define GLBL2_FAB1_H2C_BYP_IN_AFIFO_EMPTY_MASK         BIT(0)
+#define EQDMA_GLBL2_DBG_MATCH_SEL_ADDR                     0x1F4
+#define GLBL2_MATCH_SEL_RSV_MASK                       GENMASK(31, 18)
+#define GLBL2_MATCH_SEL_CSR_SEL_MASK                   GENMASK(17, 13)
+#define GLBL2_MATCH_SEL_CSR_EN_MASK                    BIT(12)
+#define GLBL2_MATCH_SEL_ROTATE1_MASK                   GENMASK(11, 10)
+#define GLBL2_MATCH_SEL_ROTATE0_MASK                   GENMASK(9, 8)
+#define GLBL2_MATCH_SEL_SEL_MASK                       GENMASK(7, 0)
+#define EQDMA_GLBL2_DBG_MATCH_MSK_ADDR                     0x1F8
+#define GLBL2_MATCH_MSK_MASK                      GENMASK(31, 0)
+#define EQDMA_GLBL2_DBG_MATCH_PAT_ADDR                     0x1FC
+#define GLBL2_MATCH_PAT_PATTERN_MASK                   GENMASK(31, 0)
+#define EQDMA_GLBL_RNG_SZ_1_ADDR                           0x204
+#define GLBL_RNG_SZ_1_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_1_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_2_ADDR                           0x208
+#define GLBL_RNG_SZ_2_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_2_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_3_ADDR                           0x20C
+#define GLBL_RNG_SZ_3_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_3_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_4_ADDR                           0x210
+#define GLBL_RNG_SZ_4_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_4_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_5_ADDR                           0x214
+#define GLBL_RNG_SZ_5_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_5_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_6_ADDR                           0x218
+#define GLBL_RNG_SZ_6_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_6_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_7_ADDR                           0x21C
+#define GLBL_RNG_SZ_7_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_7_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_8_ADDR                           0x220
+#define GLBL_RNG_SZ_8_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_8_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_9_ADDR                           0x224
+#define GLBL_RNG_SZ_9_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_9_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_A_ADDR                           0x228
+#define GLBL_RNG_SZ_A_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_A_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_B_ADDR                           0x22C
+#define GLBL_RNG_SZ_B_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_B_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_C_ADDR                           0x230
+#define GLBL_RNG_SZ_C_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_C_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_D_ADDR                           0x234
+#define GLBL_RNG_SZ_D_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_D_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_E_ADDR                           0x238
+#define GLBL_RNG_SZ_E_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_E_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_F_ADDR                           0x23C
+#define GLBL_RNG_SZ_F_RSVD_1_MASK                          GENMASK(31, 16)
+#define GLBL_RNG_SZ_F_RING_SIZE_MASK                       GENMASK(15, 0)
+#define EQDMA_GLBL_RNG_SZ_10_ADDR                          0x240
+#define GLBL_RNG_SZ_10_RSVD_1_MASK                         GENMASK(31, 16)
+#define GLBL_RNG_SZ_10_RING_SIZE_MASK                      GENMASK(15, 0)
+#define EQDMA_GLBL_ERR_STAT_ADDR                           0x248
+#define GLBL_ERR_STAT_RSVD_1_MASK                          GENMASK(31, 18)
+#define GLBL_ERR_STAT_ERR_FAB_MASK                         BIT(17)
+#define GLBL_ERR_STAT_ERR_H2C_ST_MASK                      BIT(16)
+#define GLBL_ERR_STAT_ERR_BDG_MASK                         BIT(15)
+#define GLBL_ERR_STAT_IND_CTXT_CMD_ERR_MASK                GENMASK(14, 9)
+#define GLBL_ERR_STAT_ERR_C2H_ST_MASK                      BIT(8)
+#define GLBL_ERR_STAT_ERR_C2H_MM_1_MASK                    BIT(7)
+#define GLBL_ERR_STAT_ERR_C2H_MM_0_MASK                    BIT(6)
+#define GLBL_ERR_STAT_ERR_H2C_MM_1_MASK                    BIT(5)
+#define GLBL_ERR_STAT_ERR_H2C_MM_0_MASK                    BIT(4)
+#define GLBL_ERR_STAT_ERR_TRQ_MASK                         BIT(3)
+#define GLBL_ERR_STAT_ERR_DSC_MASK                         BIT(2)
+#define GLBL_ERR_STAT_ERR_RAM_DBE_MASK                     BIT(1)
+#define GLBL_ERR_STAT_ERR_RAM_SBE_MASK                     BIT(0)
+#define EQDMA_GLBL_ERR_MASK_ADDR                           0x24C
+#define GLBL_ERR_MASK                            GENMASK(31, 0)
+#define EQDMA_GLBL_DSC_CFG_ADDR                            0x250
+#define GLBL_DSC_CFG_RSVD_1_MASK                           GENMASK(31, 10)
+#define GLBL_DSC_CFG_UNC_OVR_COR_MASK                      BIT(9)
+#define GLBL_DSC_CFG_CTXT_FER_DIS_MASK                     BIT(8)
+#define GLBL_DSC_CFG_RSVD_2_MASK                           GENMASK(7, 6)
+#define GLBL_DSC_CFG_MAXFETCH_MASK                         GENMASK(5, 3)
+#define GLBL_DSC_CFG_WB_ACC_INT_MASK                       GENMASK(2, 0)
+#define EQDMA_GLBL_DSC_ERR_STS_ADDR                        0x254
+#define GLBL_DSC_ERR_STS_RSVD_1_MASK                       GENMASK(31, 26)
+#define GLBL_DSC_ERR_STS_PORT_ID_MASK                      BIT(25)
+#define GLBL_DSC_ERR_STS_SBE_MASK                          BIT(24)
+#define GLBL_DSC_ERR_STS_DBE_MASK                          BIT(23)
+#define GLBL_DSC_ERR_STS_RQ_CANCEL_MASK                    BIT(22)
+#define GLBL_DSC_ERR_STS_DSC_MASK                          BIT(21)
+#define GLBL_DSC_ERR_STS_DMA_MASK                          BIT(20)
+#define GLBL_DSC_ERR_STS_FLR_CANCEL_MASK                   BIT(19)
+#define GLBL_DSC_ERR_STS_RSVD_2_MASK                       GENMASK(18, 17)
+#define GLBL_DSC_ERR_STS_DAT_POISON_MASK                   BIT(16)
+#define GLBL_DSC_ERR_STS_TIMEOUT_MASK                      BIT(9)
+#define GLBL_DSC_ERR_STS_FLR_MASK                          BIT(8)
+#define GLBL_DSC_ERR_STS_TAG_MASK                          BIT(6)
+#define GLBL_DSC_ERR_STS_ADDR_MASK                         BIT(5)
+#define GLBL_DSC_ERR_STS_PARAM_MASK                        BIT(4)
+#define GLBL_DSC_ERR_STS_BCNT_MASK                         BIT(3)
+#define GLBL_DSC_ERR_STS_UR_CA_MASK                        BIT(2)
+#define GLBL_DSC_ERR_STS_POISON_MASK                       BIT(1)
+#define EQDMA_GLBL_DSC_ERR_MSK_ADDR                        0x258
+#define GLBL_DSC_ERR_MSK_MASK                         GENMASK(31, 0)
+#define EQDMA_GLBL_DSC_ERR_LOG0_ADDR                       0x25C
+#define GLBL_DSC_ERR_LOG0_VALID_MASK                       BIT(31)
+#define GLBL_DSC_ERR_LOG0_SEL_MASK                         BIT(30)
+#define GLBL_DSC_ERR_LOG0_RSVD_1_MASK                      GENMASK(29, 13)
+#define GLBL_DSC_ERR_LOG0_QID_MASK                         GENMASK(12, 0)
+#define EQDMA_GLBL_DSC_ERR_LOG1_ADDR                       0x260
+#define GLBL_DSC_ERR_LOG1_RSVD_1_MASK                      GENMASK(31, 28)
+#define GLBL_DSC_ERR_LOG1_CIDX_MASK                        GENMASK(27, 12)
+#define GLBL_DSC_ERR_LOG1_RSVD_2_MASK                      GENMASK(11, 9)
+#define GLBL_DSC_ERR_LOG1_SUB_TYPE_MASK                    GENMASK(8, 5)
+#define GLBL_DSC_ERR_LOG1_ERR_TYPE_MASK                    GENMASK(4, 0)
+#define EQDMA_GLBL_TRQ_ERR_STS_ADDR                        0x264
+#define GLBL_TRQ_ERR_STS_RSVD_1_MASK                       GENMASK(31, 8)
+#define GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT_MASK             BIT(7)
+#define GLBL_TRQ_ERR_STS_RSVD_2_MASK                       BIT(6)
+#define GLBL_TRQ_ERR_STS_QID_RANGE_MASK                    BIT(5)
+#define GLBL_TRQ_ERR_STS_QSPC_UNMAPPED_MASK                BIT(4)
+#define GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT_MASK              BIT(3)
+#define GLBL_TRQ_ERR_STS_RSVD_3_MASK                       BIT(2)
+#define GLBL_TRQ_ERR_STS_VF_ACCESS_ERR_MASK                BIT(1)
+#define GLBL_TRQ_ERR_STS_CSR_UNMAPPED_MASK                 BIT(0)
+#define EQDMA_GLBL_TRQ_ERR_MSK_ADDR                        0x268
+#define GLBL_TRQ_ERR_MSK_MASK                         GENMASK(31, 0)
+#define EQDMA_GLBL_TRQ_ERR_LOG_ADDR                        0x26C
+#define GLBL_TRQ_ERR_LOG_SRC_MASK                          BIT(31)
+#define GLBL_TRQ_ERR_LOG_TARGET_MASK                       GENMASK(30, 27)
+#define GLBL_TRQ_ERR_LOG_FUNC_MASK                         GENMASK(26, 17)
+#define GLBL_TRQ_ERR_LOG_ADDRESS_MASK                      GENMASK(16, 0)
+#define EQDMA_GLBL_DSC_DBG_DAT0_ADDR                       0x270
+#define GLBL_DSC_DAT0_RSVD_1_MASK                      GENMASK(31, 30)
+#define GLBL_DSC_DAT0_CTXT_ARB_DIR_MASK                BIT(29)
+#define GLBL_DSC_DAT0_CTXT_ARB_QID_MASK                GENMASK(28, 17)
+#define GLBL_DSC_DAT0_CTXT_ARB_REQ_MASK                GENMASK(16, 12)
+#define GLBL_DSC_DAT0_IRQ_FIFO_FL_MASK                 BIT(11)
+#define GLBL_DSC_DAT0_TMSTALL_MASK                     BIT(10)
+#define GLBL_DSC_DAT0_RRQ_STALL_MASK                   GENMASK(9, 8)
+#define GLBL_DSC_DAT0_RCP_FIFO_SPC_STALL_MASK          GENMASK(7, 6)
+#define GLBL_DSC_DAT0_RRQ_FIFO_SPC_STALL_MASK          GENMASK(5, 4)
+#define GLBL_DSC_DAT0_FAB_MRKR_RSP_STALL_MASK          GENMASK(3, 2)
+#define GLBL_DSC_DAT0_DSC_OUT_STALL_MASK               GENMASK(1, 0)
+#define EQDMA_GLBL_DSC_DBG_DAT1_ADDR                       0x274
+#define GLBL_DSC_DAT1_RSVD_1_MASK                      GENMASK(31, 28)
+#define GLBL_DSC_DAT1_EVT_SPC_C2H_MASK                 GENMASK(27, 22)
+#define GLBL_DSC_DAT1_EVT_SP_H2C_MASK                  GENMASK(21, 16)
+#define GLBL_DSC_DAT1_DSC_SPC_C2H_MASK                 GENMASK(15, 8)
+#define GLBL_DSC_DAT1_DSC_SPC_H2C_MASK                 GENMASK(7, 0)
+#define EQDMA_GLBL_DSC_DBG_CTL_ADDR                        0x278
+#define GLBL_DSC_CTL_RSVD_1_MASK                       GENMASK(31, 3)
+#define GLBL_DSC_CTL_SELECT_MASK                       GENMASK(2, 0)
+#define EQDMA_GLBL_DSC_ERR_LOG2_ADDR                       0x27c
+#define GLBL_DSC_ERR_LOG2_OLD_PIDX_MASK                    GENMASK(31, 16)
+#define GLBL_DSC_ERR_LOG2_NEW_PIDX_MASK                    GENMASK(15, 0)
+#define EQDMA_GLBL_GLBL_INTERRUPT_CFG_ADDR                 0x2c4
+#define GLBL_GLBL_INTERRUPT_CFG_RSVD_1_MASK                GENMASK(31, 2)
+#define GLBL_GLBL_INTERRUPT_CFG_LGCY_INTR_PENDING_MASK     BIT(1)
+#define GLBL_GLBL_INTERRUPT_CFG_EN_LGCY_INTR_MASK          BIT(0)
+#define EQDMA_GLBL_VCH_HOST_PROFILE_ADDR                   0x2c8
+#define GLBL_VCH_HOST_PROFILE_RSVD_1_MASK                  GENMASK(31, 28)
+#define GLBL_VCH_HOST_PROFILE_2C_MM_MASK                   GENMASK(27, 24)
+#define GLBL_VCH_HOST_PROFILE_2C_ST_MASK                   GENMASK(23, 20)
+#define GLBL_VCH_HOST_PROFILE_VCH_DSC_MASK                 GENMASK(19, 16)
+#define GLBL_VCH_HOST_PROFILE_VCH_INT_MSG_MASK             GENMASK(15, 12)
+#define GLBL_VCH_HOST_PROFILE_VCH_INT_AGGR_MASK            GENMASK(11, 8)
+#define GLBL_VCH_HOST_PROFILE_VCH_CMPT_MASK                GENMASK(7, 4)
+#define GLBL_VCH_HOST_PROFILE_VCH_C2H_PLD_MASK             GENMASK(3, 0)
+#define EQDMA_GLBL_BRIDGE_HOST_PROFILE_ADDR                0x308
+#define GLBL_BRIDGE_HOST_PROFILE_RSVD_1_MASK               GENMASK(31, 4)
+#define GLBL_BRIDGE_HOST_PROFILE_BDGID_MASK                GENMASK(3, 0)
+#define EQDMA_AXIMM_IRQ_DEST_ADDR_ADDR                     0x30c
+#define AXIMM_IRQ_DEST_ADDR_ADDR_MASK                      GENMASK(31, 0)
+#define EQDMA_FAB_ERR_LOG_ADDR                             0x314
+#define FAB_ERR_LOG_RSVD_1_MASK                            GENMASK(31, 7)
+#define FAB_ERR_LOG_SRC_MASK                               GENMASK(6, 0)
+#define EQDMA_GLBL_REQ_ERR_STS_ADDR                        0x318
+#define GLBL_REQ_ERR_STS_RSVD_1_MASK                       GENMASK(31, 11)
+#define GLBL_REQ_ERR_STS_RC_DISCONTINUE_MASK               BIT(10)
+#define GLBL_REQ_ERR_STS_RC_PRTY_MASK                      BIT(9)
+#define GLBL_REQ_ERR_STS_RC_FLR_MASK                       BIT(8)
+#define GLBL_REQ_ERR_STS_RC_TIMEOUT_MASK                   BIT(7)
+#define GLBL_REQ_ERR_STS_RC_INV_BCNT_MASK                  BIT(6)
+#define GLBL_REQ_ERR_STS_RC_INV_TAG_MASK                   BIT(5)
+#define GLBL_REQ_ERR_STS_RC_START_ADDR_MISMCH_MASK         BIT(4)
+#define GLBL_REQ_ERR_STS_RC_RID_TC_ATTR_MISMCH_MASK        BIT(3)
+#define GLBL_REQ_ERR_STS_RC_NO_DATA_MASK                   BIT(2)
+#define GLBL_REQ_ERR_STS_RC_UR_CA_CRS_MASK                 BIT(1)
+#define GLBL_REQ_ERR_STS_RC_POISONED_MASK                  BIT(0)
+#define EQDMA_GLBL_REQ_ERR_MSK_ADDR                        0x31C
+#define GLBL_REQ_ERR_MSK_MASK                         GENMASK(31, 0)
+#define EQDMA_IND_CTXT_DATA_ADDR                           0x804
+#define IND_CTXT_DATA_DATA_MASK                            GENMASK(31, 0)
+#define EQDMA_IND_CTXT_MASK_ADDR                           0x824
+#define IND_CTXT_MASK                            GENMASK(31, 0)
+#define EQDMA_IND_CTXT_CMD_ADDR                            0x844
+#define IND_CTXT_CMD_RSVD_1_MASK                           GENMASK(31, 20)
+#define IND_CTXT_CMD_QID_MASK                              GENMASK(19, 7)
+#define IND_CTXT_CMD_OP_MASK                               GENMASK(6, 5)
+#define IND_CTXT_CMD_SEL_MASK                              GENMASK(4, 1)
+#define IND_CTXT_CMD_BUSY_MASK                             BIT(0)
+#define EQDMA_C2H_TIMER_CNT_ADDR                           0xA00
+#define C2H_TIMER_CNT_RSVD_1_MASK                          GENMASK(31, 16)
+#define C2H_TIMER_CNT_MASK                                GENMASK(15, 0)
+#define EQDMA_C2H_CNT_TH_ADDR                              0xA40
+#define C2H_CNT_TH_RSVD_1_MASK                             GENMASK(31, 16)
+#define C2H_CNT_TH_THESHOLD_CNT_MASK                       GENMASK(15, 0)
+#define EQDMA_C2H_STAT_S_AXIS_C2H_ACCEPTED_ADDR            0xA88
+#define C2H_STAT_S_AXIS_C2H_ACCEPTED_MASK                 GENMASK(31, 0)
+#define EQDMA_C2H_STAT_S_AXIS_WRB_ACCEPTED_ADDR            0xA8C
+#define C2H_STAT_S_AXIS_WRB_ACCEPTED_MASK                 GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_RSP_PKT_ACCEPTED_ADDR          0xA90
+#define C2H_STAT_DESC_RSP_PKT_ACCEPTED_D_MASK              GENMASK(31, 0)
+#define EQDMA_C2H_STAT_AXIS_PKG_CMP_ADDR                   0xA94
+#define C2H_STAT_AXIS_PKG_CMP_MASK                        GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_RSP_ACCEPTED_ADDR              0xA98
+#define C2H_STAT_DESC_RSP_ACCEPTED_D_MASK                  GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_RSP_CMP_ADDR                   0xA9C
+#define C2H_STAT_DESC_RSP_CMP_D_MASK                       GENMASK(31, 0)
+#define EQDMA_C2H_STAT_WRQ_OUT_ADDR                        0xAA0
+#define C2H_STAT_WRQ_OUT_MASK                             GENMASK(31, 0)
+#define EQDMA_C2H_STAT_WPL_REN_ACCEPTED_ADDR               0xAA4
+#define C2H_STAT_WPL_REN_ACCEPTED_MASK                    GENMASK(31, 0)
+#define EQDMA_C2H_STAT_TOTAL_WRQ_LEN_ADDR                  0xAA8
+#define C2H_STAT_TOTAL_WRQ_LEN_MASK                       GENMASK(31, 0)
+#define EQDMA_C2H_STAT_TOTAL_WPL_LEN_ADDR                  0xAAC
+#define C2H_STAT_TOTAL_WPL_LEN_MASK                       GENMASK(31, 0)
+#define EQDMA_C2H_BUF_SZ_ADDR                              0xAB0
+#define C2H_BUF_SZ_IZE_MASK                                GENMASK(31, 0)
+#define EQDMA_C2H_ERR_STAT_ADDR                            0xAF0
+#define C2H_ERR_STAT_RSVD_1_MASK                           GENMASK(31, 21)
+#define C2H_ERR_STAT_WRB_PORT_ID_ERR_MASK                  BIT(20)
+#define C2H_ERR_STAT_HDR_PAR_ERR_MASK                      BIT(19)
+#define C2H_ERR_STAT_HDR_ECC_COR_ERR_MASK                  BIT(18)
+#define C2H_ERR_STAT_HDR_ECC_UNC_ERR_MASK                  BIT(17)
+#define C2H_ERR_STAT_AVL_RING_DSC_ERR_MASK                 BIT(16)
+#define C2H_ERR_STAT_WRB_PRTY_ERR_MASK                     BIT(15)
+#define C2H_ERR_STAT_WRB_CIDX_ERR_MASK                     BIT(14)
+#define C2H_ERR_STAT_WRB_QFULL_ERR_MASK                    BIT(13)
+#define C2H_ERR_STAT_WRB_INV_Q_ERR_MASK                    BIT(12)
+#define C2H_ERR_STAT_RSVD_2_MASK                           BIT(11)
+#define C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH_MASK            BIT(10)
+#define C2H_ERR_STAT_ERR_DESC_CNT_MASK                     BIT(9)
+#define C2H_ERR_STAT_RSVD_3_MASK                           BIT(8)
+#define C2H_ERR_STAT_MSI_INT_FAIL_MASK                     BIT(7)
+#define C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR_MASK             BIT(6)
+#define C2H_ERR_STAT_RSVD_4_MASK                           BIT(5)
+#define C2H_ERR_STAT_DESC_RSP_ERR_MASK                     BIT(4)
+#define C2H_ERR_STAT_QID_MISMATCH_MASK                     BIT(3)
+#define C2H_ERR_STAT_SH_CMPT_DSC_ERR_MASK                  BIT(2)
+#define C2H_ERR_STAT_LEN_MISMATCH_MASK                     BIT(1)
+#define C2H_ERR_STAT_MTY_MISMATCH_MASK                     BIT(0)
+#define EQDMA_C2H_ERR_MASK_ADDR                            0xAF4
+#define C2H_ERR_EN_MASK                          GENMASK(31, 0)
+#define EQDMA_C2H_FATAL_ERR_STAT_ADDR                      0xAF8
+#define C2H_FATAL_ERR_STAT_RSVD_1_MASK                     GENMASK(31, 21)
+#define C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR_MASK            BIT(20)
+#define C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE_MASK     BIT(19)
+#define C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR_MASK           BIT(18)
+#define C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE_MASK          BIT(17)
+#define C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE_MASK          BIT(16)
+#define C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE_MASK         BIT(15)
+#define C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE_MASK     BIT(14)
+#define C2H_FATAL_ERR_STAT_RESERVED2_MASK                  BIT(13)
+#define C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE_MASK          BIT(12)
+#define C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE_MASK     BIT(11)
+#define C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE_MASK         BIT(10)
+#define C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE_MASK          BIT(9)
+#define C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE_MASK           BIT(8)
+#define C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE_MASK        GENMASK(7, 4)
+#define C2H_FATAL_ERR_STAT_QID_MISMATCH_MASK               BIT(3)
+#define C2H_FATAL_ERR_STAT_RESERVED1_MASK                  BIT(2)
+#define C2H_FATAL_ERR_STAT_LEN_MISMATCH_MASK               BIT(1)
+#define C2H_FATAL_ERR_STAT_MTY_MISMATCH_MASK               BIT(0)
+#define EQDMA_C2H_FATAL_ERR_MASK_ADDR                      0xAFC
+#define C2H_FATAL_ERR_C2HEN_MASK                 GENMASK(31, 0)
+#define EQDMA_C2H_FATAL_ERR_ENABLE_ADDR                    0xB00
+#define C2H_FATAL_ERR_ENABLE_RSVD_1_MASK                   GENMASK(31, 2)
+#define C2H_FATAL_ERR_ENABLE_WPL_PAR_INV_MASK             BIT(1)
+#define C2H_FATAL_ERR_ENABLE_WRQ_DIS_MASK                 BIT(0)
+#define EQDMA_GLBL_ERR_INT_ADDR                            0xB04
+#define GLBL_ERR_INT_RSVD_1_MASK                           GENMASK(31, 30)
+#define GLBL_ERR_INT_HOST_ID_MASK                          GENMASK(29, 26)
+#define GLBL_ERR_INT_DIS_INTR_ON_VF_MASK                   BIT(25)
+#define GLBL_ERR_INT_ARM_MASK                             BIT(24)
+#define GLBL_ERR_INT_EN_COAL_MASK                          BIT(23)
+#define GLBL_ERR_INT_VEC_MASK                              GENMASK(22, 12)
+#define GLBL_ERR_INT_FUNC_MASK                             GENMASK(11, 0)
+#define EQDMA_C2H_PFCH_CFG_ADDR                            0xB08
+#define C2H_PFCH_CFG_EVTFL_TH_MASK                         GENMASK(31, 16)
+#define C2H_PFCH_CFG_FL_TH_MASK                            GENMASK(15, 0)
+#define EQDMA_C2H_PFCH_CFG_1_ADDR                          0xA80
+#define C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK                    GENMASK(31, 16)
+#define C2H_PFCH_CFG_1_QCNT_MASK                           GENMASK(15, 0)
+#define EQDMA_C2H_PFCH_CFG_2_ADDR                          0xA84
+#define C2H_PFCH_CFG_2_FENCE_MASK                          BIT(31)
+#define C2H_PFCH_CFG_2_RSVD_MASK                           GENMASK(30, 29)
+#define C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK               BIT(28)
+#define C2H_PFCH_CFG_2_LL_SZ_TH_MASK                       GENMASK(27, 12)
+#define C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK                   GENMASK(11, 6)
+#define C2H_PFCH_CFG_2_NUM_MASK                            GENMASK(5, 0)
+#define EQDMA_C2H_INT_TIMER_TICK_ADDR                      0xB0C
+#define C2H_INT_TIMER_TICK_MASK                           GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_RSP_DROP_ACCEPTED_ADDR         0xB10
+#define C2H_STAT_DESC_RSP_DROP_ACCEPTED_D_MASK             GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_RSP_ERR_ACCEPTED_ADDR          0xB14
+#define C2H_STAT_DESC_RSP_ERR_ACCEPTED_D_MASK              GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DESC_REQ_ADDR                       0xB18
+#define C2H_STAT_DESC_REQ_MASK                            GENMASK(31, 0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_0_ADDR                  0xB1C
+#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TVALID_MASK      BIT(31)
+#define C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TREADY_MASK      BIT(30)
+#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TVALID_MASK      GENMASK(29, 27)
+#define C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TREADY_MASK      GENMASK(26, 24)
+#define C2H_STAT_DMA_ENG_0_PLD_FIFO_IN_RDY_MASK        BIT(23)
+#define C2H_STAT_DMA_ENG_0_QID_FIFO_IN_RDY_MASK        BIT(22)
+#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_VLD_MASK       BIT(21)
+#define C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_QID_MASK       GENMASK(20, 9)
+#define C2H_STAT_DMA_ENG_0_WRB_FIFO_IN_RDY_MASK        BIT(8)
+#define C2H_STAT_DMA_ENG_0_WRB_FIFO_OUT_CNT_MASK       GENMASK(7, 5)
+#define C2H_STAT_DMA_ENG_0_WRB_SM_CS_MASK              BIT(4)
+#define C2H_STAT_DMA_ENG_0_MAIN_SM_CS_MASK             GENMASK(3, 0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_1_ADDR                  0xB20
+#define C2H_STAT_DMA_ENG_1_RSVD_1_MASK                 GENMASK(31, 29)
+#define C2H_STAT_DMA_ENG_1_QID_FIFO_OUT_CNT_MASK       GENMASK(28, 18)
+#define C2H_STAT_DMA_ENG_1_PLD_FIFO_OUT_CNT_MASK       GENMASK(17, 7)
+#define C2H_STAT_DMA_ENG_1_PLD_ST_FIFO_CNT_MASK        GENMASK(6, 0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_2_ADDR                  0xB24
+#define C2H_STAT_DMA_ENG_2_RSVD_1_MASK                 GENMASK(31, 29)
+#define C2H_STAT_DMA_ENG_2_QID_FIFO_OUT_CNT_MASK       GENMASK(28, 18)
+#define C2H_STAT_DMA_ENG_2_PLD_FIFO_OUT_CNT_MASK       GENMASK(17, 7)
+#define C2H_STAT_DMA_ENG_2_PLD_ST_FIFO_CNT_MASK        GENMASK(6, 0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_3_ADDR                  0xB28
+#define C2H_STAT_DMA_ENG_3_RSVD_1_MASK                 GENMASK(31, 24)
+#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_CNT_MASK       GENMASK(23, 19)
+#define C2H_STAT_DMA_ENG_3_QID_FIFO_OUT_VLD_MASK       BIT(18)
+#define C2H_STAT_DMA_ENG_3_PLD_FIFO_OUT_VLD_MASK       BIT(17)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_VLD_MASK    BIT(16)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
+#define C2H_STAT_DMA_ENG_3_DESC_CNT_FIFO_IN_RDY_MASK   BIT(11)
+#define C2H_STAT_DMA_ENG_3_DESC_RSP_FIFO_IN_RDY_MASK   BIT(10)
+#define C2H_STAT_DMA_ENG_3_PLD_PKT_ID_LARGER_0_MASK    BIT(9)
+#define C2H_STAT_DMA_ENG_3_WRQ_VLD_MASK                BIT(8)
+#define C2H_STAT_DMA_ENG_3_WRQ_RDY_MASK                BIT(7)
+#define C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_RDY_MASK       BIT(6)
+#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
+#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
+#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
+#define C2H_STAT_DMA_ENG_3_WRQ_PACKET_PRE_EOR_MASK     BIT(2)
+#define C2H_STAT_DMA_ENG_3_WCP_FIFO_IN_RDY_MASK        BIT(1)
+#define C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_IN_RDY_MASK     BIT(0)
+#define EQDMA_C2H_DBG_PFCH_ERR_CTXT_ADDR                   0xB2C
+#define C2H_PFCH_ERR_CTXT_RSVD_1_MASK                  GENMASK(31, 14)
+#define C2H_PFCH_ERR_CTXT_ERR_STAT_MASK                BIT(13)
+#define C2H_PFCH_ERR_CTXT_CMD_WR_MASK                  BIT(12)
+#define C2H_PFCH_ERR_CTXT_QID_MASK                     GENMASK(11, 1)
+#define C2H_PFCH_ERR_CTXT_DONE_MASK                    BIT(0)
+#define EQDMA_C2H_FIRST_ERR_QID_ADDR                       0xB30
+#define C2H_FIRST_ERR_QID_RSVD_1_MASK                      GENMASK(31, 21)
+#define C2H_FIRST_ERR_QID_ERR_TYPE_MASK                    GENMASK(20, 16)
+#define C2H_FIRST_ERR_QID_RSVD_MASK                        GENMASK(15, 13)
+#define C2H_FIRST_ERR_QID_QID_MASK                         GENMASK(12, 0)
+#define EQDMA_STAT_NUM_WRB_IN_ADDR                         0xB34
+#define STAT_NUM_WRB_IN_RSVD_1_MASK                        GENMASK(31, 16)
+#define STAT_NUM_WRB_IN_WRB_CNT_MASK                       GENMASK(15, 0)
+#define EQDMA_STAT_NUM_WRB_OUT_ADDR                        0xB38
+#define STAT_NUM_WRB_OUT_RSVD_1_MASK                       GENMASK(31, 16)
+#define STAT_NUM_WRB_OUT_WRB_CNT_MASK                      GENMASK(15, 0)
+#define EQDMA_STAT_NUM_WRB_DRP_ADDR                        0xB3C
+#define STAT_NUM_WRB_DRP_RSVD_1_MASK                       GENMASK(31, 16)
+#define STAT_NUM_WRB_DRP_WRB_CNT_MASK                      GENMASK(15, 0)
+#define EQDMA_STAT_NUM_STAT_DESC_OUT_ADDR                  0xB40
+#define STAT_NUM_STAT_DESC_OUT_RSVD_1_MASK                 GENMASK(31, 16)
+#define STAT_NUM_STAT_DESC_OUT_CNT_MASK                    GENMASK(15, 0)
+#define EQDMA_STAT_NUM_DSC_CRDT_SENT_ADDR                  0xB44
+#define STAT_NUM_DSC_CRDT_SENT_RSVD_1_MASK                 GENMASK(31, 16)
+#define STAT_NUM_DSC_CRDT_SENT_CNT_MASK                    GENMASK(15, 0)
+#define EQDMA_STAT_NUM_FCH_DSC_RCVD_ADDR                   0xB48
+#define STAT_NUM_FCH_DSC_RCVD_RSVD_1_MASK                  GENMASK(31, 16)
+#define STAT_NUM_FCH_DSC_RCVD_DSC_CNT_MASK                 GENMASK(15, 0)
+#define EQDMA_STAT_NUM_BYP_DSC_RCVD_ADDR                   0xB4C
+#define STAT_NUM_BYP_DSC_RCVD_RSVD_1_MASK                  GENMASK(31, 11)
+#define STAT_NUM_BYP_DSC_RCVD_DSC_CNT_MASK                 GENMASK(10, 0)
+#define EQDMA_C2H_WRB_COAL_CFG_ADDR                        0xB50
+#define C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK                   GENMASK(31, 26)
+#define C2H_WRB_COAL_CFG_TICK_VAL_MASK                     GENMASK(25, 14)
+#define C2H_WRB_COAL_CFG_TICK_CNT_MASK                     GENMASK(13, 2)
+#define C2H_WRB_COAL_CFG_SET_GLB_FLUSH_MASK                BIT(1)
+#define C2H_WRB_COAL_CFG_DONE_GLB_FLUSH_MASK               BIT(0)
+#define EQDMA_C2H_INTR_H2C_REQ_ADDR                        0xB54
+#define C2H_INTR_H2C_REQ_RSVD_1_MASK                       GENMASK(31, 18)
+#define C2H_INTR_H2C_REQ_CNT_MASK                          GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_MM_REQ_ADDR                     0xB58
+#define C2H_INTR_C2H_MM_REQ_RSVD_1_MASK                    GENMASK(31, 18)
+#define C2H_INTR_C2H_MM_REQ_CNT_MASK                       GENMASK(17, 0)
+#define EQDMA_C2H_INTR_ERR_INT_REQ_ADDR                    0xB5C
+#define C2H_INTR_ERR_INT_REQ_RSVD_1_MASK                   GENMASK(31, 18)
+#define C2H_INTR_ERR_INT_REQ_CNT_MASK                      GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_ST_REQ_ADDR                     0xB60
+#define C2H_INTR_C2H_ST_REQ_RSVD_1_MASK                    GENMASK(31, 18)
+#define C2H_INTR_C2H_ST_REQ_CNT_MASK                       GENMASK(17, 0)
+#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_ADDR        0xB64
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_RSVD_1_MASK       GENMASK(31, 18)
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_CNT_MASK          GENMASK(17, 0)
+#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_ADDR       0xB68
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_RSVD_1_MASK      GENMASK(31, 18)
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_CNT_MASK         GENMASK(17, 0)
+#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_ADDR    0xB6C
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_RSVD_1_MASK   GENMASK(31, 18)
+#define C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_CNT_MASK      GENMASK(17, 0)
+#define EQDMA_C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_ADDR      0xB70
+#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_RSVD_1_MASK     GENMASK(31, 18)
+#define C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_CNT_MASK        GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_ST_MSIX_ACK_ADDR                0xB74
+#define C2H_INTR_C2H_ST_MSIX_ACK_RSVD_1_MASK               GENMASK(31, 18)
+#define C2H_INTR_C2H_ST_MSIX_ACK_CNT_MASK                  GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_ST_MSIX_FAIL_ADDR               0xB78
+#define C2H_INTR_C2H_ST_MSIX_FAIL_RSVD_1_MASK              GENMASK(31, 18)
+#define C2H_INTR_C2H_ST_MSIX_FAIL_CNT_MASK                 GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_ST_NO_MSIX_ADDR                 0xB7C
+#define C2H_INTR_C2H_ST_NO_MSIX_RSVD_1_MASK                GENMASK(31, 18)
+#define C2H_INTR_C2H_ST_NO_MSIX_CNT_MASK                   GENMASK(17, 0)
+#define EQDMA_C2H_INTR_C2H_ST_CTXT_INVAL_ADDR              0xB80
+#define C2H_INTR_C2H_ST_CTXT_INVAL_RSVD_1_MASK             GENMASK(31, 18)
+#define C2H_INTR_C2H_ST_CTXT_INVAL_CNT_MASK                GENMASK(17, 0)
+#define EQDMA_C2H_STAT_WR_CMP_ADDR                         0xB84
+#define C2H_STAT_WR_CMP_RSVD_1_MASK                        GENMASK(31, 18)
+#define C2H_STAT_WR_CMP_CNT_MASK                           GENMASK(17, 0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_4_ADDR                  0xB88
+#define C2H_STAT_DMA_ENG_4_RSVD_1_MASK                 GENMASK(31, 24)
+#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_CNT_MASK       GENMASK(23, 19)
+#define C2H_STAT_DMA_ENG_4_QID_FIFO_OUT_VLD_MASK       BIT(18)
+#define C2H_STAT_DMA_ENG_4_PLD_FIFO_OUT_VLD_MASK       BIT(17)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_VLD_MASK    BIT(16)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_EOP_MASK BIT(15)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK BIT(14)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_DROP_MASK BIT(13)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_ERR_MASK BIT(12)
+#define C2H_STAT_DMA_ENG_4_DESC_CNT_FIFO_IN_RDY_MASK   BIT(11)
+#define C2H_STAT_DMA_ENG_4_DESC_RSP_FIFO_IN_RDY_MASK   BIT(10)
+#define C2H_STAT_DMA_ENG_4_PLD_PKT_ID_LARGER_0_MASK    BIT(9)
+#define C2H_STAT_DMA_ENG_4_WRQ_VLD_MASK                BIT(8)
+#define C2H_STAT_DMA_ENG_4_WRQ_RDY_MASK                BIT(7)
+#define C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_RDY_MASK       BIT(6)
+#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_DROP_MASK BIT(5)
+#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_ERR_MASK BIT(4)
+#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_MARKER_MASK BIT(3)
+#define C2H_STAT_DMA_ENG_4_WRQ_PACKET_PRE_EOR_MASK     BIT(2)
+#define C2H_STAT_DMA_ENG_4_WCP_FIFO_IN_RDY_MASK        BIT(1)
+#define C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_IN_RDY_MASK     BIT(0)
+#define EQDMA_C2H_STAT_DBG_DMA_ENG_5_ADDR                  0xB8C
+#define C2H_STAT_DMA_ENG_5_RSVD_1_MASK                 GENMASK(31, 30)
+#define C2H_STAT_DMA_ENG_5_WRB_SM_VIRT_CH_MASK         BIT(29)
+#define C2H_STAT_DMA_ENG_5_WRB_FIFO_IN_REQ_MASK        GENMASK(28, 24)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_CNT_MASK       GENMASK(23, 22)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_LEN_MASK  GENMASK(21, 6)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VIRT_CH_MASK BIT(5)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VAR_DESC_MASK BIT(4)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_DROP_REQ_MASK BIT(3)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_NUM_BUF_OV_MASK BIT(2)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_MARKER_MASK BIT(1)
+#define C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_HAS_CMPT_MASK BIT(0)
+#define EQDMA_C2H_DBG_PFCH_QID_ADDR                        0xB90
+#define C2H_PFCH_QID_RSVD_1_MASK                       GENMASK(31, 16)
+#define C2H_PFCH_QID_ERR_CTXT_MASK                     BIT(15)
+#define C2H_PFCH_QID_TARGET_MASK                       GENMASK(14, 12)
+#define C2H_PFCH_QID_QID_OR_TAG_MASK                   GENMASK(11, 0)
+#define EQDMA_C2H_DBG_PFCH_ADDR                            0xB94
+#define C2H_PFCH_DATA_MASK                             GENMASK(31, 0)
+#define EQDMA_C2H_INT_DBG_ADDR                             0xB98
+#define C2H_INT_RSVD_1_MASK                            GENMASK(31, 8)
+#define C2H_INT_INT_COAL_SM_MASK                       GENMASK(7, 4)
+#define C2H_INT_INT_SM_MASK                            GENMASK(3, 0)
+#define EQDMA_C2H_STAT_IMM_ACCEPTED_ADDR                   0xB9C
+#define C2H_STAT_IMM_ACCEPTED_RSVD_1_MASK                  GENMASK(31, 18)
+#define C2H_STAT_IMM_ACCEPTED_CNT_MASK                     GENMASK(17, 0)
+#define EQDMA_C2H_STAT_MARKER_ACCEPTED_ADDR                0xBA0
+#define C2H_STAT_MARKER_ACCEPTED_RSVD_1_MASK               GENMASK(31, 18)
+#define C2H_STAT_MARKER_ACCEPTED_CNT_MASK                  GENMASK(17, 0)
+#define EQDMA_C2H_STAT_DISABLE_CMP_ACCEPTED_ADDR           0xBA4
+#define C2H_STAT_DISABLE_CMP_ACCEPTED_RSVD_1_MASK          GENMASK(31, 18)
+#define C2H_STAT_DISABLE_CMP_ACCEPTED_CNT_MASK             GENMASK(17, 0)
+#define EQDMA_C2H_PLD_FIFO_CRDT_CNT_ADDR                   0xBA8
+#define C2H_PLD_FIFO_CRDT_CNT_RSVD_1_MASK                  GENMASK(31, 18)
+#define C2H_PLD_FIFO_CRDT_CNT_CNT_MASK                     GENMASK(17, 0)
+#define EQDMA_C2H_INTR_DYN_REQ_ADDR                        0xBAC
+#define C2H_INTR_DYN_REQ_RSVD_1_MASK                       GENMASK(31, 18)
+#define C2H_INTR_DYN_REQ_CNT_MASK                          GENMASK(17, 0)
+#define EQDMA_C2H_INTR_DYN_MISC_ADDR                       0xBB0
+#define C2H_INTR_DYN_MISC_RSVD_1_MASK                      GENMASK(31, 18)
+#define C2H_INTR_DYN_MISC_CNT_MASK                         GENMASK(17, 0)
+#define EQDMA_C2H_DROP_LEN_MISMATCH_ADDR                   0xBB4
+#define C2H_DROP_LEN_MISMATCH_RSVD_1_MASK                  GENMASK(31, 18)
+#define C2H_DROP_LEN_MISMATCH_CNT_MASK                     GENMASK(17, 0)
+#define EQDMA_C2H_DROP_DESC_RSP_LEN_ADDR                   0xBB8
+#define C2H_DROP_DESC_RSP_LEN_RSVD_1_MASK                  GENMASK(31, 18)
+#define C2H_DROP_DESC_RSP_LEN_CNT_MASK                     GENMASK(17, 0)
+#define EQDMA_C2H_DROP_QID_FIFO_LEN_ADDR                   0xBBC
+#define C2H_DROP_QID_FIFO_LEN_RSVD_1_MASK                  GENMASK(31, 18)
+#define C2H_DROP_QID_FIFO_LEN_CNT_MASK                     GENMASK(17, 0)
+#define EQDMA_C2H_DROP_PLD_CNT_ADDR                        0xBC0
+#define C2H_DROP_PLD_CNT_RSVD_1_MASK                       GENMASK(31, 18)
+#define C2H_DROP_PLD_CNT_CNT_MASK                          GENMASK(17, 0)
+#define EQDMA_C2H_CMPT_FORMAT_0_ADDR                       0xBC4
+#define C2H_CMPT_FORMAT_0_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_0_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_1_ADDR                       0xBC8
+#define C2H_CMPT_FORMAT_1_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_1_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_2_ADDR                       0xBCC
+#define C2H_CMPT_FORMAT_2_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_2_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_3_ADDR                       0xBD0
+#define C2H_CMPT_FORMAT_3_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_3_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_4_ADDR                       0xBD4
+#define C2H_CMPT_FORMAT_4_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_4_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_5_ADDR                       0xBD8
+#define C2H_CMPT_FORMAT_5_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_5_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_CMPT_FORMAT_6_ADDR                       0xBDC
+#define C2H_CMPT_FORMAT_6_DESC_ERR_LOC_MASK                GENMASK(31, 16)
+#define C2H_CMPT_FORMAT_6_COLOR_LOC_MASK                   GENMASK(15, 0)
+#define EQDMA_C2H_PFCH_CACHE_DEPTH_ADDR                    0xBE0
+#define C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK                GENMASK(23, 16)
+#define C2H_PFCH_CACHE_DEPTH_MASK                         GENMASK(7, 0)
+#define EQDMA_C2H_WRB_COAL_BUF_DEPTH_ADDR                  0xBE4
+#define C2H_WRB_COAL_BUF_DEPTH_RSVD_1_MASK                 GENMASK(31, 8)
+#define C2H_WRB_COAL_BUF_DEPTH_BUFFER_MASK                 GENMASK(7, 0)
+#define EQDMA_C2H_PFCH_CRDT_ADDR                           0xBE8
+#define C2H_PFCH_CRDT_RSVD_1_MASK                          GENMASK(31, 1)
+#define C2H_PFCH_CRDT_RSVD_2_MASK                          BIT(0)
+#define EQDMA_C2H_STAT_HAS_CMPT_ACCEPTED_ADDR              0xBEC
+#define C2H_STAT_HAS_CMPT_ACCEPTED_RSVD_1_MASK             GENMASK(31, 18)
+#define C2H_STAT_HAS_CMPT_ACCEPTED_CNT_MASK                GENMASK(17, 0)
+#define EQDMA_C2H_STAT_HAS_PLD_ACCEPTED_ADDR               0xBF0
+#define C2H_STAT_HAS_PLD_ACCEPTED_RSVD_1_MASK              GENMASK(31, 18)
+#define C2H_STAT_HAS_PLD_ACCEPTED_CNT_MASK                 GENMASK(17, 0)
+#define EQDMA_C2H_PLD_PKT_ID_ADDR                          0xBF4
+#define C2H_PLD_PKT_ID_CMPT_WAIT_MASK                      GENMASK(31, 16)
+#define C2H_PLD_PKT_ID_DATA_MASK                           GENMASK(15, 0)
+#define EQDMA_C2H_PLD_PKT_ID_1_ADDR                        0xBF8
+#define C2H_PLD_PKT_ID_1_CMPT_WAIT_MASK                    GENMASK(31, 16)
+#define C2H_PLD_PKT_ID_1_DATA_MASK                         GENMASK(15, 0)
+#define EQDMA_C2H_DROP_PLD_CNT_1_ADDR                      0xBFC
+#define C2H_DROP_PLD_CNT_1_RSVD_1_MASK                     GENMASK(31, 18)
+#define C2H_DROP_PLD_CNT_1_CNT_MASK                        GENMASK(17, 0)
+#define EQDMA_H2C_ERR_STAT_ADDR                            0xE00
+#define H2C_ERR_STAT_RSVD_1_MASK                           GENMASK(31, 6)
+#define H2C_ERR_STAT_PAR_ERR_MASK                          BIT(5)
+#define H2C_ERR_STAT_SBE_MASK                              BIT(4)
+#define H2C_ERR_STAT_DBE_MASK                              BIT(3)
+#define H2C_ERR_STAT_NO_DMA_DS_MASK                        BIT(2)
+#define H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR_MASK             BIT(1)
+#define H2C_ERR_STAT_ZERO_LEN_DS_MASK                      BIT(0)
+#define EQDMA_H2C_ERR_MASK_ADDR                            0xE04
+#define H2C_ERR_EN_MASK                          GENMASK(31, 0)
+#define EQDMA_H2C_FIRST_ERR_QID_ADDR                       0xE08
+#define H2C_FIRST_ERR_QID_RSVD_1_MASK                      GENMASK(31, 20)
+#define H2C_FIRST_ERR_QID_ERR_TYPE_MASK                    GENMASK(19, 16)
+#define H2C_FIRST_ERR_QID_RSVD_2_MASK                      GENMASK(15, 13)
+#define H2C_FIRST_ERR_QID_QID_MASK                         GENMASK(12, 0)
+#define EQDMA_H2C_DBG_REG0_ADDR                            0xE0C
+#define H2C_REG0_NUM_DSC_RCVD_MASK                     GENMASK(31, 16)
+#define H2C_REG0_NUM_WRB_SENT_MASK                     GENMASK(15, 0)
+#define EQDMA_H2C_DBG_REG1_ADDR                            0xE10
+#define H2C_REG1_NUM_REQ_SENT_MASK                     GENMASK(31, 16)
+#define H2C_REG1_NUM_CMP_SENT_MASK                     GENMASK(15, 0)
+#define EQDMA_H2C_DBG_REG2_ADDR                            0xE14
+#define H2C_REG2_RSVD_1_MASK                           GENMASK(31, 16)
+#define H2C_REG2_NUM_ERR_DSC_RCVD_MASK                 GENMASK(15, 0)
+#define EQDMA_H2C_DBG_REG3_ADDR                            0xE18
+#define H2C_REG3_RSVD_1_MASK                           BIT(31)
+#define H2C_REG3_DSCO_FIFO_EMPTY_MASK                  BIT(30)
+#define H2C_REG3_DSCO_FIFO_FULL_MASK                   BIT(29)
+#define H2C_REG3_CUR_RC_STATE_MASK                     GENMASK(28, 26)
+#define H2C_REG3_RDREQ_LINES_MASK                      GENMASK(25, 16)
+#define H2C_REG3_RDATA_LINES_AVAIL_MASK                GENMASK(15, 6)
+#define H2C_REG3_PEND_FIFO_EMPTY_MASK                  BIT(5)
+#define H2C_REG3_PEND_FIFO_FULL_MASK                   BIT(4)
+#define H2C_REG3_CUR_RQ_STATE_MASK                     GENMASK(3, 2)
+#define H2C_REG3_DSCI_FIFO_FULL_MASK                   BIT(1)
+#define H2C_REG3_DSCI_FIFO_EMPTY_MASK                  BIT(0)
+#define EQDMA_H2C_DBG_REG4_ADDR                            0xE1C
+#define H2C_REG4_RDREQ_ADDR_MASK                       GENMASK(31, 0)
+#define EQDMA_H2C_FATAL_ERR_EN_ADDR                        0xE20
+#define H2C_FATAL_ERR_EN_RSVD_1_MASK                       GENMASK(31, 1)
+#define H2C_FATAL_ERR_EN_H2C_MASK                          BIT(0)
+#define EQDMA_H2C_REQ_THROT_PCIE_ADDR                      0xE24
+#define H2C_REQ_THROT_PCIE_EN_REQ_MASK                     BIT(31)
+#define H2C_REQ_THROT_PCIE_MASK                           GENMASK(30, 19)
+#define H2C_REQ_THROT_PCIE_EN_DATA_MASK                    BIT(18)
+#define H2C_REQ_THROT_PCIE_DATA_THRESH_MASK                GENMASK(17, 0)
+#define EQDMA_H2C_ALN_DBG_REG0_ADDR                        0xE28
+#define H2C_ALN_REG0_NUM_PKT_SENT_MASK                 GENMASK(15, 0)
+#define EQDMA_H2C_REQ_THROT_AXIMM_ADDR                     0xE2C
+#define H2C_REQ_THROT_AXIMM_EN_REQ_MASK                    BIT(31)
+#define H2C_REQ_THROT_AXIMM_MASK                          GENMASK(30, 19)
+#define H2C_REQ_THROT_AXIMM_EN_DATA_MASK                   BIT(18)
+#define H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK               GENMASK(17, 0)
+#define EQDMA_C2H_MM_CTL_ADDR                              0x1004
+#define C2H_MM_CTL_RESERVED1_MASK                          GENMASK(31, 9)
+#define C2H_MM_CTL_ERRC_EN_MASK                            BIT(8)
+#define C2H_MM_CTL_RESERVED0_MASK                          GENMASK(7, 1)
+#define C2H_MM_CTL_RUN_MASK                                BIT(0)
+#define EQDMA_C2H_MM_STATUS_ADDR                           0x1040
+#define C2H_MM_STATUS_RSVD_1_MASK                          GENMASK(31, 1)
+#define C2H_MM_STATUS_RUN_MASK                             BIT(0)
+#define EQDMA_C2H_MM_CMPL_DESC_CNT_ADDR                    0x1048
+#define C2H_MM_CMPL_DESC_CNT_C2H_CO_MASK                   GENMASK(31, 0)
+#define EQDMA_C2H_MM_ERR_CODE_ENABLE_MASK_ADDR             0x1054
+#define C2H_MM_ERR_CODE_ENABLE_RESERVED1_MASK         BIT(31)
+#define C2H_MM_ERR_CODE_ENABLE_WR_UC_RAM_MASK         BIT(30)
+#define C2H_MM_ERR_CODE_ENABLE_WR_UR_MASK             BIT(29)
+#define C2H_MM_ERR_CODE_ENABLE_WR_FLR_MASK            BIT(28)
+#define C2H_MM_ERR_CODE_ENABLE_RESERVED0_MASK         GENMASK(27, 2)
+#define C2H_MM_ERR_CODE_ENABLE_RD_SLV_ERR_MASK        BIT(1)
+#define C2H_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK        BIT(0)
+#define EQDMA_C2H_MM_ERR_CODE_ADDR                         0x1058
+#define C2H_MM_ERR_CODE_RESERVED1_MASK                     GENMASK(31, 28)
+#define C2H_MM_ERR_CODE_CIDX_MASK                          GENMASK(27, 12)
+#define C2H_MM_ERR_CODE_RESERVED0_MASK                     GENMASK(11, 10)
+#define C2H_MM_ERR_CODE_SUB_TYPE_MASK                      GENMASK(9, 5)
+#define C2H_MM_ERR_CODE_MASK                              GENMASK(4, 0)
+#define EQDMA_C2H_MM_ERR_INFO_ADDR                         0x105C
+#define C2H_MM_ERR_INFO_VALID_MASK                         BIT(31)
+#define C2H_MM_ERR_INFO_SEL_MASK                           BIT(30)
+#define C2H_MM_ERR_INFO_RSVD_1_MASK                        GENMASK(29, 24)
+#define C2H_MM_ERR_INFO_QID_MASK                           GENMASK(23, 0)
+#define EQDMA_C2H_MM_PERF_MON_CTL_ADDR                     0x10C0
+#define C2H_MM_PERF_MON_CTL_RSVD_1_MASK                    GENMASK(31, 4)
+#define C2H_MM_PERF_MON_CTL_IMM_START_MASK                 BIT(3)
+#define C2H_MM_PERF_MON_CTL_RUN_START_MASK                 BIT(2)
+#define C2H_MM_PERF_MON_CTL_IMM_CLEAR_MASK                 BIT(1)
+#define C2H_MM_PERF_MON_CTL_RUN_CLEAR_MASK                 BIT(0)
+#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT0_ADDR              0x10C4
+#define C2H_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK            GENMASK(31, 0)
+#define EQDMA_C2H_MM_PERF_MON_CYCLE_CNT1_ADDR              0x10C8
+#define C2H_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK             GENMASK(31, 10)
+#define C2H_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK            GENMASK(9, 0)
+#define EQDMA_C2H_MM_PERF_MON_DATA_CNT0_ADDR               0x10CC
+#define C2H_MM_PERF_MON_DATA_CNT0_DCNT_MASK                GENMASK(31, 0)
+#define EQDMA_C2H_MM_PERF_MON_DATA_CNT1_ADDR               0x10D0
+#define C2H_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK              GENMASK(31, 10)
+#define C2H_MM_PERF_MON_DATA_CNT1_DCNT_MASK                GENMASK(9, 0)
+#define EQDMA_C2H_MM_DBG_ADDR                              0x10E8
+#define C2H_MM_RSVD_1_MASK                             GENMASK(31, 24)
+#define C2H_MM_RRQ_ENTRIES_MASK                        GENMASK(23, 17)
+#define C2H_MM_DAT_FIFO_SPC_MASK                       GENMASK(16, 7)
+#define C2H_MM_RD_STALL_MASK                           BIT(6)
+#define C2H_MM_RRQ_FIFO_FI_MASK                        BIT(5)
+#define C2H_MM_WR_STALL_MASK                           BIT(4)
+#define C2H_MM_WRQ_FIFO_FI_MASK                        BIT(3)
+#define C2H_MM_WBK_STALL_MASK                          BIT(2)
+#define C2H_MM_DSC_FIFO_EP_MASK                        BIT(1)
+#define C2H_MM_DSC_FIFO_FL_MASK                        BIT(0)
+#define EQDMA_H2C_MM_CTL_ADDR                              0x1204
+#define H2C_MM_CTL_RESERVED1_MASK                          GENMASK(31, 9)
+#define H2C_MM_CTL_ERRC_EN_MASK                            BIT(8)
+#define H2C_MM_CTL_RESERVED0_MASK                          GENMASK(7, 1)
+#define H2C_MM_CTL_RUN_MASK                                BIT(0)
+#define EQDMA_H2C_MM_STATUS_ADDR                           0x1240
+#define H2C_MM_STATUS_RSVD_1_MASK                          GENMASK(31, 1)
+#define H2C_MM_STATUS_RUN_MASK                             BIT(0)
+#define EQDMA_H2C_MM_CMPL_DESC_CNT_ADDR                    0x1248
+#define H2C_MM_CMPL_DESC_CNT_H2C_CO_MASK                   GENMASK(31, 0)
+#define EQDMA_H2C_MM_ERR_CODE_ENABLE_MASK_ADDR             0x1254
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED5_MASK         GENMASK(31, 30)
+#define H2C_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK        BIT(29)
+#define H2C_MM_ERR_CODE_ENABLE_WR_DEC_ERR_MASK        BIT(28)
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED4_MASK         GENMASK(27, 23)
+#define H2C_MM_ERR_CODE_ENABLE_RD_RQ_DIS_ERR_MASK     BIT(22)
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED3_MASK         GENMASK(21, 17)
+#define H2C_MM_ERR_CODE_ENABLE_RD_DAT_POISON_ERR_MASK BIT(16)
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED2_MASK         GENMASK(15, 9)
+#define H2C_MM_ERR_CODE_ENABLE_RD_FLR_ERR_MASK        BIT(8)
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED1_MASK         GENMASK(7, 6)
+#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_ADR_ERR_MASK    BIT(5)
+#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_PARA_MASK       BIT(4)
+#define H2C_MM_ERR_CODE_ENABLE_RD_HDR_BYTE_ERR_MASK   BIT(3)
+#define H2C_MM_ERR_CODE_ENABLE_RD_UR_CA_MASK          BIT(2)
+#define H2C_MM_ERR_CODE_ENABLE_RD_HRD_POISON_ERR_MASK BIT(1)
+#define H2C_MM_ERR_CODE_ENABLE_RESERVED0_MASK         BIT(0)
+#define EQDMA_H2C_MM_ERR_CODE_ADDR                         0x1258
+#define H2C_MM_ERR_CODE_RSVD_1_MASK                        GENMASK(31, 28)
+#define H2C_MM_ERR_CODE_CIDX_MASK                          GENMASK(27, 12)
+#define H2C_MM_ERR_CODE_RESERVED0_MASK                     GENMASK(11, 10)
+#define H2C_MM_ERR_CODE_SUB_TYPE_MASK                      GENMASK(9, 5)
+#define H2C_MM_ERR_CODE_MASK                              GENMASK(4, 0)
+#define EQDMA_H2C_MM_ERR_INFO_ADDR                         0x125C
+#define H2C_MM_ERR_INFO_VALID_MASK                         BIT(31)
+#define H2C_MM_ERR_INFO_SEL_MASK                           BIT(30)
+#define H2C_MM_ERR_INFO_RSVD_1_MASK                        GENMASK(29, 24)
+#define H2C_MM_ERR_INFO_QID_MASK                           GENMASK(23, 0)
+#define EQDMA_H2C_MM_PERF_MON_CTL_ADDR                     0x12C0
+#define H2C_MM_PERF_MON_CTL_RSVD_1_MASK                    GENMASK(31, 4)
+#define H2C_MM_PERF_MON_CTL_IMM_START_MASK                 BIT(3)
+#define H2C_MM_PERF_MON_CTL_RUN_START_MASK                 BIT(2)
+#define H2C_MM_PERF_MON_CTL_IMM_CLEAR_MASK                 BIT(1)
+#define H2C_MM_PERF_MON_CTL_RUN_CLEAR_MASK                 BIT(0)
+#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT0_ADDR              0x12C4
+#define H2C_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK            GENMASK(31, 0)
+#define EQDMA_H2C_MM_PERF_MON_CYCLE_CNT1_ADDR              0x12C8
+#define H2C_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK             GENMASK(31, 10)
+#define H2C_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK            GENMASK(9, 0)
+#define EQDMA_H2C_MM_PERF_MON_DATA_CNT0_ADDR               0x12CC
+#define H2C_MM_PERF_MON_DATA_CNT0_DCNT_MASK                GENMASK(31, 0)
+#define EQDMA_H2C_MM_PERF_MON_DATA_CNT1_ADDR               0x12D0
+#define H2C_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK              GENMASK(31, 10)
+#define H2C_MM_PERF_MON_DATA_CNT1_DCNT_MASK                GENMASK(9, 0)
+#define EQDMA_H2C_MM_DBG_ADDR                              0x12E8
+#define H2C_MM_RSVD_1_MASK                             GENMASK(31, 24)
+#define H2C_MM_RRQ_ENTRIES_MASK                        GENMASK(23, 17)
+#define H2C_MM_DAT_FIFO_SPC_MASK                       GENMASK(16, 7)
+#define H2C_MM_RD_STALL_MASK                           BIT(6)
+#define H2C_MM_RRQ_FIFO_FI_MASK                        BIT(5)
+#define H2C_MM_WR_STALL_MASK                           BIT(4)
+#define H2C_MM_WRQ_FIFO_FI_MASK                        BIT(3)
+#define H2C_MM_WBK_STALL_MASK                          BIT(2)
+#define H2C_MM_DSC_FIFO_EP_MASK                        BIT(1)
+#define H2C_MM_DSC_FIFO_FL_MASK                        BIT(0)
+#define EQDMA_C2H_CRDT_COAL_CFG_1_ADDR                     0x1400
+#define C2H_CRDT_COAL_CFG_1_RSVD_1_MASK                    GENMASK(31, 18)
+#define C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK               GENMASK(17, 10)
+#define C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK                  GENMASK(9, 0)
+#define EQDMA_C2H_CRDT_COAL_CFG_2_ADDR                     0x1404
+#define C2H_CRDT_COAL_CFG_2_RSVD_1_MASK                    GENMASK(31, 24)
+#define C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK                   GENMASK(23, 16)
+#define C2H_CRDT_COAL_CFG_2_RESERVED1_MASK                 GENMASK(15, 11)
+#define C2H_CRDT_COAL_CFG_2_NT_TH_MASK                     GENMASK(10, 0)
+#define EQDMA_C2H_PFCH_BYP_QID_ADDR                        0x1408
+#define C2H_PFCH_BYP_QID_RSVD_1_MASK                       GENMASK(31, 12)
+#define C2H_PFCH_BYP_QID_MASK                             GENMASK(11, 0)
+#define EQDMA_C2H_PFCH_BYP_TAG_ADDR                        0x140C
+#define C2H_PFCH_BYP_TAG_RSVD_1_MASK                       GENMASK(31, 20)
+#define C2H_PFCH_BYP_TAG_BYP_QID_MASK                      GENMASK(19, 8)
+#define C2H_PFCH_BYP_TAG_RSVD_2_MASK                       BIT(7)
+#define C2H_PFCH_BYP_TAG_MASK                             GENMASK(6, 0)
+#define EQDMA_C2H_WATER_MARK_ADDR                          0x1500
+#define C2H_WATER_MARK_HIGH_WM_MASK                        GENMASK(31, 16)
+#define C2H_WATER_MARK_LOW_WM_MASK                         GENMASK(15, 0)
+#define SW_IND_CTXT_DATA_W7_VIRTIO_DSC_BASE_H_MASK        GENMASK(10, 0)
+#define SW_IND_CTXT_DATA_W6_VIRTIO_DSC_BASE_M_MASK        GENMASK(31, 0)
+#define SW_IND_CTXT_DATA_W5_VIRTIO_DSC_BASE_L_MASK        GENMASK(31, 11)
+#define SW_IND_CTXT_DATA_W5_PASID_EN_MASK                 BIT(10)
+#define SW_IND_CTXT_DATA_W5_PASID_H_MASK                  GENMASK(9, 0)
+#define SW_IND_CTXT_DATA_W4_PASID_L_MASK                  GENMASK(31, 20)
+#define SW_IND_CTXT_DATA_W4_HOST_ID_MASK                  GENMASK(19, 16)
+#define SW_IND_CTXT_DATA_W4_IRQ_BYP_MASK                  BIT(15)
+#define SW_IND_CTXT_DATA_W4_PACK_BYP_OUT_MASK             BIT(14)
+#define SW_IND_CTXT_DATA_W4_VIRTIO_EN_MASK                BIT(13)
+#define SW_IND_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK           BIT(12)
+#define SW_IND_CTXT_DATA_W4_INT_AGGR_MASK                 BIT(11)
+#define SW_IND_CTXT_DATA_W4_VEC_MASK                      GENMASK(10, 0)
+#define SW_IND_CTXT_DATA_W3_DSC_BASE_H_MASK               GENMASK(31, 0)
+#define SW_IND_CTXT_DATA_W2_DSC_BASE_L_MASK               GENMASK(31, 0)
+#define SW_IND_CTXT_DATA_W1_IS_MM_MASK                    BIT(31)
+#define SW_IND_CTXT_DATA_W1_MRKR_DIS_MASK                 BIT(30)
+#define SW_IND_CTXT_DATA_W1_IRQ_REQ_MASK                  BIT(29)
+#define SW_IND_CTXT_DATA_W1_ERR_WB_SENT_MASK              BIT(28)
+#define SW_IND_CTXT_DATA_W1_ERR_MASK                      GENMASK(27, 26)
+#define SW_IND_CTXT_DATA_W1_IRQ_NO_LAST_MASK              BIT(25)
+#define SW_IND_CTXT_DATA_W1_PORT_ID_MASK                  GENMASK(24, 22)
+#define SW_IND_CTXT_DATA_W1_IRQ_EN_MASK                   BIT(21)
+#define SW_IND_CTXT_DATA_W1_WBK_EN_MASK                   BIT(20)
+#define SW_IND_CTXT_DATA_W1_MM_CHN_MASK                   BIT(19)
+#define SW_IND_CTXT_DATA_W1_BYPASS_MASK                   BIT(18)
+#define SW_IND_CTXT_DATA_W1_DSC_SZ_MASK                   GENMASK(17, 16)
+#define SW_IND_CTXT_DATA_W1_RNG_SZ_MASK                   GENMASK(15, 12)
+#define SW_IND_CTXT_DATA_W1_RSVD_1_MASK                   GENMASK(11, 9)
+#define SW_IND_CTXT_DATA_W1_FETCH_MAX_MASK                GENMASK(8, 5)
+#define SW_IND_CTXT_DATA_W1_AT_MASK                       BIT(4)
+#define SW_IND_CTXT_DATA_W1_WBI_INTVL_EN_MASK             BIT(3)
+#define SW_IND_CTXT_DATA_W1_WBI_CHK_MASK                  BIT(2)
+#define SW_IND_CTXT_DATA_W1_FCRD_EN_MASK                  BIT(1)
+#define SW_IND_CTXT_DATA_W1_QEN_MASK                      BIT(0)
+#define SW_IND_CTXT_DATA_W0_RSV_MASK                      GENMASK(31, 29)
+#define SW_IND_CTXT_DATA_W0_FNC_MASK                      GENMASK(28, 17)
+#define SW_IND_CTXT_DATA_W0_IRQ_ARM_MASK                  BIT(16)
+#define SW_IND_CTXT_DATA_W0_PIDX_MASK                     GENMASK(15, 0)
+#define HW_IND_CTXT_DATA_W1_RSVD_1_MASK                   BIT(15)
+#define HW_IND_CTXT_DATA_W1_FETCH_PND_MASK                GENMASK(14, 11)
+#define HW_IND_CTXT_DATA_W1_EVT_PND_MASK                  BIT(10)
+#define HW_IND_CTXT_DATA_W1_IDL_STP_B_MASK                BIT(9)
+#define HW_IND_CTXT_DATA_W1_DSC_PND_MASK                  BIT(8)
+#define HW_IND_CTXT_DATA_W1_RSVD_2_MASK                   GENMASK(7, 0)
+#define HW_IND_CTXT_DATA_W0_CRD_USE_MASK                  GENMASK(31, 16)
+#define HW_IND_CTXT_DATA_W0_CIDX_MASK                     GENMASK(15, 0)
+#define CRED_CTXT_DATA_W0_RSVD_1_MASK                     GENMASK(31, 16)
+#define CRED_CTXT_DATA_W0_CREDT_MASK                      GENMASK(15, 0)
+#define PREFETCH_CTXT_DATA_W1_VALID_MASK                  BIT(13)
+#define PREFETCH_CTXT_DATA_W1_SW_CRDT_H_MASK              GENMASK(12, 0)
+#define PREFETCH_CTXT_DATA_W0_SW_CRDT_L_MASK              GENMASK(31, 29)
+#define PREFETCH_CTXT_DATA_W0_PFCH_MASK                   BIT(28)
+#define PREFETCH_CTXT_DATA_W0_PFCH_EN_MASK                BIT(27)
+#define PREFETCH_CTXT_DATA_W0_ERR_MASK                    BIT(26)
+#define PREFETCH_CTXT_DATA_W0_RSVD_MASK                   GENMASK(25, 22)
+#define PREFETCH_CTXT_DATA_W0_PFCH_NEED_MASK              GENMASK(21, 16)
+#define PREFETCH_CTXT_DATA_W0_NUM_PFCH_MASK               GENMASK(15, 10)
+#define PREFETCH_CTXT_DATA_W0_VIRTIO_MASK                 BIT(9)
+#define PREFETCH_CTXT_DATA_W0_VAR_DESC_MASK               BIT(8)
+#define PREFETCH_CTXT_DATA_W0_PORT_ID_MASK                GENMASK(7, 5)
+#define PREFETCH_CTXT_DATA_W0_BUF_SZ_IDX_MASK             GENMASK(4, 1)
+#define PREFETCH_CTXT_DATA_W0_BYPASS_MASK                 BIT(0)
+#define CMPL_CTXT_DATA_W6_RSVD_1_H_MASK                   GENMASK(7, 0)
+#define CMPL_CTXT_DATA_W5_RSVD_1_L_MASK                   GENMASK(31, 23)
+#define CMPL_CTXT_DATA_W5_PORT_ID_MASK                    GENMASK(22, 20)
+#define CMPL_CTXT_DATA_W5_SH_CMPT_MASK                    BIT(19)
+#define CMPL_CTXT_DATA_W5_VIO_EOP_MASK                    BIT(18)
+#define CMPL_CTXT_DATA_W5_BADDR4_LOW_MASK                 GENMASK(17, 14)
+#define CMPL_CTXT_DATA_W5_PASID_EN_MASK                   BIT(13)
+#define CMPL_CTXT_DATA_W5_PASID_H_MASK                    GENMASK(12, 0)
+#define CMPL_CTXT_DATA_W4_PASID_L_MASK                    GENMASK(31, 23)
+#define CMPL_CTXT_DATA_W4_HOST_ID_MASK                    GENMASK(22, 19)
+#define CMPL_CTXT_DATA_W4_DIR_C2H_MASK                    BIT(18)
+#define CMPL_CTXT_DATA_W4_VIO_MASK                        BIT(17)
+#define CMPL_CTXT_DATA_W4_DIS_INTR_ON_VF_MASK             BIT(16)
+#define CMPL_CTXT_DATA_W4_INT_AGGR_MASK                   BIT(15)
+#define CMPL_CTXT_DATA_W4_VEC_MASK                        GENMASK(14, 4)
+#define CMPL_CTXT_DATA_W4_AT_MASK                         BIT(3)
+#define CMPL_CTXT_DATA_W4_OVF_CHK_DIS_MASK                BIT(2)
+#define CMPL_CTXT_DATA_W4_FULL_UPD_MASK                   BIT(1)
+#define CMPL_CTXT_DATA_W4_TIMER_RUNNING_MASK              BIT(0)
+#define CMPL_CTXT_DATA_W3_USER_TRIG_PEND_MASK             BIT(31)
+#define CMPL_CTXT_DATA_W3_ERR_MASK                        GENMASK(30, 29)
+#define CMPL_CTXT_DATA_W3_VALID_MASK                      BIT(28)
+#define CMPL_CTXT_DATA_W3_CIDX_MASK                       GENMASK(27, 12)
+#define CMPL_CTXT_DATA_W3_PIDX_H_MASK                     GENMASK(11, 0)
+#define CMPL_CTXT_DATA_W2_PIDX_L_MASK                     GENMASK(31, 28)
+#define CMPL_CTXT_DATA_W2_DESC_SIZE_MASK                  GENMASK(27, 26)
+#define CMPL_CTXT_DATA_W2_BADDR4_HIGH_H_MASK              GENMASK(25, 0)
+#define CMPL_CTXT_DATA_W1_BADDR4_HIGH_L_MASK              GENMASK(31, 0)
+#define CMPL_CTXT_DATA_W0_QSIZE_IX_MASK                   GENMASK(31, 28)
+#define CMPL_CTXT_DATA_W0_COLOR_MASK                      BIT(27)
+#define CMPL_CTXT_DATA_W0_INT_ST_MASK                     GENMASK(26, 25)
+#define CMPL_CTXT_DATA_W0_TIMER_IX_MASK                   GENMASK(24, 21)
+#define CMPL_CTXT_DATA_W0_CNTER_IX_MASK                   GENMASK(20, 17)
+#define CMPL_CTXT_DATA_W0_FNC_ID_MASK                     GENMASK(16, 5)
+#define CMPL_CTXT_DATA_W0_TRIG_MODE_MASK                  GENMASK(4, 2)
+#define CMPL_CTXT_DATA_W0_EN_INT_MASK                     BIT(1)
+#define CMPL_CTXT_DATA_W0_EN_STAT_DESC_MASK               BIT(0)
+#define INTR_CTXT_DATA_W3_FUNC_MASK                       GENMASK(29, 18)
+#define INTR_CTXT_DATA_W3_RSVD_MASK                       GENMASK(17, 14)
+#define INTR_CTXT_DATA_W3_PASID_EN_MASK                   BIT(13)
+#define INTR_CTXT_DATA_W3_PASID_H_MASK                    GENMASK(12, 0)
+#define INTR_CTXT_DATA_W2_PASID_L_MASK                    GENMASK(31, 23)
+#define INTR_CTXT_DATA_W2_HOST_ID_MASK                    GENMASK(22, 19)
+#define INTR_CTXT_DATA_W2_AT_MASK                         BIT(18)
+#define INTR_CTXT_DATA_W2_PIDX_MASK                       GENMASK(17, 6)
+#define INTR_CTXT_DATA_W2_PAGE_SIZE_MASK                  GENMASK(5, 3)
+#define INTR_CTXT_DATA_W2_BADDR_4K_H_MASK                 GENMASK(2, 0)
+#define INTR_CTXT_DATA_W1_BADDR_4K_M_MASK                 GENMASK(31, 0)
+#define INTR_CTXT_DATA_W0_BADDR_4K_L_MASK                 GENMASK(31, 15)
+#define INTR_CTXT_DATA_W0_COLOR_MASK                      BIT(14)
+#define INTR_CTXT_DATA_W0_INT_ST_MASK                     BIT(13)
+#define INTR_CTXT_DATA_W0_RSVD1_MASK                      BIT(12)
+#define INTR_CTXT_DATA_W0_VEC_MASK                        GENMASK(11, 1)
+#define INTR_CTXT_DATA_W0_VALID_MASK                      BIT(0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c
new file mode 100644
index 0000000000..e8b2762f54
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c
@@ -0,0 +1,3908 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "eqdma_soft_reg.h"
+#include "qdma_reg_dump.h"
+
+#ifdef ENABLE_WPP_TRACING
+#include "eqdma_soft_reg_dump.tmh"
+#endif
+
+static struct regfield_info
+	cfg_blk_identifier_field_info[] = {
+	{"CFG_BLK_IDENTIFIER",
+		CFG_BLK_IDENTIFIER_MASK},
+	{"CFG_BLK_IDENTIFIER_1",
+		CFG_BLK_IDENTIFIER_1_MASK},
+	{"CFG_BLK_IDENTIFIER_RSVD_1",
+		CFG_BLK_IDENTIFIER_RSVD_1_MASK},
+	{"CFG_BLK_IDENTIFIER_VERSION",
+		CFG_BLK_IDENTIFIER_VERSION_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_pcie_max_pld_size_field_info[] = {
+	{"CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_1",
+		CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_1_MASK},
+	{"CFG_BLK_PCIE_MAX_PLD_SIZE_PROG",
+		CFG_BLK_PCIE_MAX_PLD_SIZE_PROG_MASK},
+	{"CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_2",
+		CFG_BLK_PCIE_MAX_PLD_SIZE_RSVD_2_MASK},
+	{"CFG_BLK_PCIE_MAX_PLD_SIZE_ISSUED",
+		CFG_BLK_PCIE_MAX_PLD_SIZE_ISSUED_MASK},
+};
+
+
+static struct regfield_info
+	cfg_blk_pcie_max_read_req_size_field_info[] = {
+	{"CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_1",
+		CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_1_MASK},
+	{"CFG_BLK_PCIE_MAX_READ_REQ_SIZE_PROG",
+		CFG_BLK_PCIE_MAX_READ_REQ_SIZE_PROG_MASK},
+	{"CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_2",
+		CFG_BLK_PCIE_MAX_READ_REQ_SIZE_RSVD_2_MASK},
+	{"CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ISSUED",
+		CFG_BLK_PCIE_MAX_READ_REQ_SIZE_ISSUED_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_system_id_field_info[] = {
+	{"CFG_BLK_SYSTEM_ID_RSVD_1",
+		CFG_BLK_SYSTEM_ID_RSVD_1_MASK},
+	{"CFG_BLK_SYSTEM_ID_INST_TYPE",
+		CFG_BLK_SYSTEM_ID_INST_TYPE_MASK},
+	{"CFG_BLK_SYSTEM_ID",
+		CFG_BLK_SYSTEM_ID_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_msix_enable_field_info[] = {
+	{"CFG_BLK_MSIX_ENABLE",
+		CFG_BLK_MSIX_ENABLE_MASK},
+};
+
+static struct regfield_info
+	cfg_pcie_data_width_field_info[] = {
+	{"CFG_PCIE_DATA_WIDTH_RSVD_1",
+		CFG_PCIE_DATA_WIDTH_RSVD_1_MASK},
+	{"CFG_PCIE_DATA_WIDTH_DATAPATH",
+		CFG_PCIE_DATA_WIDTH_DATAPATH_MASK},
+};
+
+static struct regfield_info
+	cfg_pcie_ctl_field_info[] = {
+	{"CFG_PCIE_CTL_RSVD_1",
+		CFG_PCIE_CTL_RSVD_1_MASK},
+	{"CFG_PCIE_CTL_MGMT_AXIL_CTRL",
+		CFG_PCIE_CTL_MGMT_AXIL_CTRL_MASK},
+	{"CFG_PCIE_CTL_RSVD_2",
+		CFG_PCIE_CTL_RSVD_2_MASK},
+	{"CFG_PCIE_CTL_RRQ_DISABLE",
+		CFG_PCIE_CTL_RRQ_DISABLE_MASK},
+	{"CFG_PCIE_CTL_RELAXED_ORDERING",
+		CFG_PCIE_CTL_RELAXED_ORDERING_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_msi_enable_field_info[] = {
+	{"CFG_BLK_MSI_ENABLE",
+		CFG_BLK_MSI_ENABLE_MASK},
+};
+
+static struct regfield_info
+	cfg_axi_user_max_pld_size_field_info[] = {
+	{"CFG_AXI_USER_MAX_PLD_SIZE_RSVD_1",
+		CFG_AXI_USER_MAX_PLD_SIZE_RSVD_1_MASK},
+	{"CFG_AXI_USER_MAX_PLD_SIZE_ISSUED",
+		CFG_AXI_USER_MAX_PLD_SIZE_ISSUED_MASK},
+	{"CFG_AXI_USER_MAX_PLD_SIZE_RSVD_2",
+		CFG_AXI_USER_MAX_PLD_SIZE_RSVD_2_MASK},
+	{"CFG_AXI_USER_MAX_PLD_SIZE_PROG",
+		CFG_AXI_USER_MAX_PLD_SIZE_PROG_MASK},
+};
+
+static struct regfield_info
+	cfg_axi_user_max_read_req_size_field_info[] = {
+	{"CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_1",
+		CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_1_MASK},
+	{"CFG_AXI_USER_MAX_READ_REQ_SIZE_USISSUED",
+		CFG_AXI_USER_MAX_READ_REQ_SIZE_USISSUED_MASK},
+	{"CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_2",
+		CFG_AXI_USER_MAX_READ_REQ_SIZE_RSVD_2_MASK},
+	{"CFG_AXI_USER_MAX_READ_REQ_SIZE_USPROG",
+		CFG_AXI_USER_MAX_READ_REQ_SIZE_USPROG_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_misc_ctl_field_info[] = {
+	{"CFG_BLK_MISC_CTL_RSVD_1",
+		CFG_BLK_MISC_CTL_RSVD_1_MASK},
+	{"CFG_BLK_MISC_CTL_10B_TAG_EN",
+		CFG_BLK_MISC_CTL_10B_TAG_EN_MASK},
+	{"CFG_BLK_MISC_CTL_RSVD_2",
+		CFG_BLK_MISC_CTL_RSVD_2_MASK},
+	{"CFG_BLK_MISC_CTL_AXI_WBK",
+		CFG_BLK_MISC_CTL_AXI_WBK_MASK},
+	{"CFG_BLK_MISC_CTL_AXI_DSC",
+		CFG_BLK_MISC_CTL_AXI_DSC_MASK},
+	{"CFG_BLK_MISC_CTL_NUM_TAG",
+		CFG_BLK_MISC_CTL_NUM_TAG_MASK},
+	{"CFG_BLK_MISC_CTL_RSVD_3",
+		CFG_BLK_MISC_CTL_RSVD_3_MASK},
+	{"CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER",
+		CFG_BLK_MISC_CTL_RQ_METERING_MULTIPLIER_MASK},
+};
+
+static struct regfield_info
+	cfg_pl_cred_ctl_field_info[] = {
+	{"CFG_PL_CRED_CTL_RSVD_1",
+		CFG_PL_CRED_CTL_RSVD_1_MASK},
+	{"CFG_PL_CRED_CTL_SLAVE_CRD_RLS",
+		CFG_PL_CRED_CTL_SLAVE_CRD_RLS_MASK},
+	{"CFG_PL_CRED_CTL_RSVD_2",
+		CFG_PL_CRED_CTL_RSVD_2_MASK},
+	{"CFG_PL_CRED_CTL_MASTER_CRD_RST",
+		CFG_PL_CRED_CTL_MASTER_CRD_RST_MASK},
+};
+
+static struct regfield_info
+	cfg_blk_scratch_field_info[] = {
+	{"CFG_BLK_SCRATCH",
+		CFG_BLK_SCRATCH_MASK},
+};
+
+static struct regfield_info
+	cfg_gic_field_info[] = {
+	{"CFG_GIC_RSVD_1",
+		CFG_GIC_RSVD_1_MASK},
+	{"CFG_GIC_GIC_IRQ",
+		CFG_GIC_GIC_IRQ_MASK},
+};
+
+static struct regfield_info
+	ram_sbe_msk_1_a_field_info[] = {
+	{"RAM_SBE_MSK_1_A",
+		RAM_SBE_MSK_1_A_MASK},
+};
+
+static struct regfield_info
+	ram_sbe_sts_1_a_field_info[] = {
+	{"RAM_SBE_STS_1_A_RSVD",
+		RAM_SBE_STS_1_A_RSVD_MASK},
+	{"RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1",
+		RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK},
+	{"RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0",
+		RAM_SBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK},
+	{"RAM_SBE_STS_1_A_TAG_EVEN_RAM",
+		RAM_SBE_STS_1_A_TAG_EVEN_RAM_MASK},
+	{"RAM_SBE_STS_1_A_TAG_ODD_RAM",
+		RAM_SBE_STS_1_A_TAG_ODD_RAM_MASK},
+	{"RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM",
+		RAM_SBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK},
+};
+
+static struct regfield_info
+	ram_dbe_msk_1_a_field_info[] = {
+	{"RAM_DBE_MSK_1_A",
+		RAM_DBE_MSK_1_A_MASK},
+};
+
+
+static struct regfield_info
+	ram_dbe_sts_1_a_field_info[] = {
+	{"RAM_DBE_STS_1_A_RSVD",
+		RAM_DBE_STS_1_A_RSVD_MASK},
+	{"RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_1",
+		RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_1_MASK},
+	{"RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0",
+		RAM_DBE_STS_1_A_PFCH_CTXT_CAM_RAM_0_MASK},
+	{"RAM_DBE_STS_1_A_TAG_EVEN_RAM",
+		RAM_DBE_STS_1_A_TAG_EVEN_RAM_MASK},
+	{"RAM_DBE_STS_1_A_TAG_ODD_RAM",
+		RAM_DBE_STS_1_A_TAG_ODD_RAM_MASK},
+	{"RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM",
+		RAM_DBE_STS_1_A_RC_RRQ_EVEN_RAM_MASK},
+};
+
+
+static struct regfield_info
+	ram_sbe_msk_a_field_info[] = {
+	{"RAM_SBE_MSK_A",
+		RAM_SBE_MSK_A_MASK},
+};
+
+
+static struct regfield_info
+	ram_sbe_sts_a_field_info[] = {
+	{"RAM_SBE_STS_A_RC_RRQ_ODD_RAM",
+		RAM_SBE_STS_A_RC_RRQ_ODD_RAM_MASK},
+	{"RAM_SBE_STS_A_PEND_FIFO_RAM",
+		RAM_SBE_STS_A_PEND_FIFO_RAM_MASK},
+	{"RAM_SBE_STS_A_PFCH_LL_RAM",
+		RAM_SBE_STS_A_PFCH_LL_RAM_MASK},
+	{"RAM_SBE_STS_A_WRB_CTXT_RAM",
+		RAM_SBE_STS_A_WRB_CTXT_RAM_MASK},
+	{"RAM_SBE_STS_A_PFCH_CTXT_RAM",
+		RAM_SBE_STS_A_PFCH_CTXT_RAM_MASK},
+	{"RAM_SBE_STS_A_DESC_REQ_FIFO_RAM",
+		RAM_SBE_STS_A_DESC_REQ_FIFO_RAM_MASK},
+	{"RAM_SBE_STS_A_INT_CTXT_RAM",
+		RAM_SBE_STS_A_INT_CTXT_RAM_MASK},
+	{"RAM_SBE_STS_A_WRB_COAL_DATA_RAM",
+		RAM_SBE_STS_A_WRB_COAL_DATA_RAM_MASK},
+	{"RAM_SBE_STS_A_QID_FIFO_RAM",
+		RAM_SBE_STS_A_QID_FIFO_RAM_MASK},
+	{"RAM_SBE_STS_A_TIMER_FIFO_RAM",
+		RAM_SBE_STS_A_TIMER_FIFO_RAM_MASK},
+	{"RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM",
+		RAM_SBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK},
+	{"RAM_SBE_STS_A_DSC_CPLD",
+		RAM_SBE_STS_A_DSC_CPLD_MASK},
+	{"RAM_SBE_STS_A_DSC_CPLI",
+		RAM_SBE_STS_A_DSC_CPLI_MASK},
+	{"RAM_SBE_STS_A_DSC_SW_CTXT",
+		RAM_SBE_STS_A_DSC_SW_CTXT_MASK},
+	{"RAM_SBE_STS_A_DSC_CRD_RCV",
+		RAM_SBE_STS_A_DSC_CRD_RCV_MASK},
+	{"RAM_SBE_STS_A_DSC_HW_CTXT",
+		RAM_SBE_STS_A_DSC_HW_CTXT_MASK},
+	{"RAM_SBE_STS_A_FUNC_MAP",
+		RAM_SBE_STS_A_FUNC_MAP_MASK},
+	{"RAM_SBE_STS_A_C2H_WR_BRG_DAT",
+		RAM_SBE_STS_A_C2H_WR_BRG_DAT_MASK},
+	{"RAM_SBE_STS_A_C2H_RD_BRG_DAT",
+		RAM_SBE_STS_A_C2H_RD_BRG_DAT_MASK},
+	{"RAM_SBE_STS_A_H2C_WR_BRG_DAT",
+		RAM_SBE_STS_A_H2C_WR_BRG_DAT_MASK},
+	{"RAM_SBE_STS_A_H2C_RD_BRG_DAT",
+		RAM_SBE_STS_A_H2C_RD_BRG_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_C2H3_DAT",
+		RAM_SBE_STS_A_MI_C2H3_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_C2H2_DAT",
+		RAM_SBE_STS_A_MI_C2H2_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_C2H1_DAT",
+		RAM_SBE_STS_A_MI_C2H1_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_C2H0_DAT",
+		RAM_SBE_STS_A_MI_C2H0_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_H2C3_DAT",
+		RAM_SBE_STS_A_MI_H2C3_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_H2C2_DAT",
+		RAM_SBE_STS_A_MI_H2C2_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_H2C1_DAT",
+		RAM_SBE_STS_A_MI_H2C1_DAT_MASK},
+	{"RAM_SBE_STS_A_MI_H2C0_DAT",
+		RAM_SBE_STS_A_MI_H2C0_DAT_MASK},
+};
+
+
+static struct regfield_info
+	ram_dbe_msk_a_field_info[] = {
+	{"RAM_DBE_MSK_A",
+		RAM_DBE_MSK_A_MASK},
+};
+
+
+static struct regfield_info
+	ram_dbe_sts_a_field_info[] = {
+	{"RAM_DBE_STS_A_RC_RRQ_ODD_RAM",
+		RAM_DBE_STS_A_RC_RRQ_ODD_RAM_MASK},
+	{"RAM_DBE_STS_A_PEND_FIFO_RAM",
+		RAM_DBE_STS_A_PEND_FIFO_RAM_MASK},
+	{"RAM_DBE_STS_A_PFCH_LL_RAM",
+		RAM_DBE_STS_A_PFCH_LL_RAM_MASK},
+	{"RAM_DBE_STS_A_WRB_CTXT_RAM",
+		RAM_DBE_STS_A_WRB_CTXT_RAM_MASK},
+	{"RAM_DBE_STS_A_PFCH_CTXT_RAM",
+		RAM_DBE_STS_A_PFCH_CTXT_RAM_MASK},
+	{"RAM_DBE_STS_A_DESC_REQ_FIFO_RAM",
+		RAM_DBE_STS_A_DESC_REQ_FIFO_RAM_MASK},
+	{"RAM_DBE_STS_A_INT_CTXT_RAM",
+		RAM_DBE_STS_A_INT_CTXT_RAM_MASK},
+	{"RAM_DBE_STS_A_WRB_COAL_DATA_RAM",
+		RAM_DBE_STS_A_WRB_COAL_DATA_RAM_MASK},
+	{"RAM_DBE_STS_A_QID_FIFO_RAM",
+		RAM_DBE_STS_A_QID_FIFO_RAM_MASK},
+	{"RAM_DBE_STS_A_TIMER_FIFO_RAM",
+		RAM_DBE_STS_A_TIMER_FIFO_RAM_MASK},
+	{"RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM",
+		RAM_DBE_STS_A_MI_TL_SLV_FIFO_RAM_MASK},
+	{"RAM_DBE_STS_A_DSC_CPLD",
+		RAM_DBE_STS_A_DSC_CPLD_MASK},
+	{"RAM_DBE_STS_A_DSC_CPLI",
+		RAM_DBE_STS_A_DSC_CPLI_MASK},
+	{"RAM_DBE_STS_A_DSC_SW_CTXT",
+		RAM_DBE_STS_A_DSC_SW_CTXT_MASK},
+	{"RAM_DBE_STS_A_DSC_CRD_RCV",
+		RAM_DBE_STS_A_DSC_CRD_RCV_MASK},
+	{"RAM_DBE_STS_A_DSC_HW_CTXT",
+		RAM_DBE_STS_A_DSC_HW_CTXT_MASK},
+	{"RAM_DBE_STS_A_FUNC_MAP",
+		RAM_DBE_STS_A_FUNC_MAP_MASK},
+	{"RAM_DBE_STS_A_C2H_WR_BRG_DAT",
+		RAM_DBE_STS_A_C2H_WR_BRG_DAT_MASK},
+	{"RAM_DBE_STS_A_C2H_RD_BRG_DAT",
+		RAM_DBE_STS_A_C2H_RD_BRG_DAT_MASK},
+	{"RAM_DBE_STS_A_H2C_WR_BRG_DAT",
+		RAM_DBE_STS_A_H2C_WR_BRG_DAT_MASK},
+	{"RAM_DBE_STS_A_H2C_RD_BRG_DAT",
+		RAM_DBE_STS_A_H2C_RD_BRG_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_C2H3_DAT",
+		RAM_DBE_STS_A_MI_C2H3_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_C2H2_DAT",
+		RAM_DBE_STS_A_MI_C2H2_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_C2H1_DAT",
+		RAM_DBE_STS_A_MI_C2H1_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_C2H0_DAT",
+		RAM_DBE_STS_A_MI_C2H0_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_H2C3_DAT",
+		RAM_DBE_STS_A_MI_H2C3_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_H2C2_DAT",
+		RAM_DBE_STS_A_MI_H2C2_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_H2C1_DAT",
+		RAM_DBE_STS_A_MI_H2C1_DAT_MASK},
+	{"RAM_DBE_STS_A_MI_H2C0_DAT",
+		RAM_DBE_STS_A_MI_H2C0_DAT_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_identifier_field_info[] = {
+	{"GLBL2_IDENTIFIER",
+		GLBL2_IDENTIFIER_MASK},
+	{"GLBL2_IDENTIFIER_VERSION",
+		GLBL2_IDENTIFIER_VERSION_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_channel_inst_field_info[] = {
+	{"GLBL2_CHANNEL_INST_RSVD_1",
+		GLBL2_CHANNEL_INST_RSVD_1_MASK},
+	{"GLBL2_CHANNEL_INST_C2H_ST",
+		GLBL2_CHANNEL_INST_C2H_ST_MASK},
+	{"GLBL2_CHANNEL_INST_H2C_ST",
+		GLBL2_CHANNEL_INST_H2C_ST_MASK},
+	{"GLBL2_CHANNEL_INST_RSVD_2",
+		GLBL2_CHANNEL_INST_RSVD_2_MASK},
+	{"GLBL2_CHANNEL_INST_C2H_ENG",
+		GLBL2_CHANNEL_INST_C2H_ENG_MASK},
+	{"GLBL2_CHANNEL_INST_RSVD_3",
+		GLBL2_CHANNEL_INST_RSVD_3_MASK},
+	{"GLBL2_CHANNEL_INST_H2C_ENG",
+		GLBL2_CHANNEL_INST_H2C_ENG_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_channel_mdma_field_info[] = {
+	{"GLBL2_CHANNEL_MDMA_RSVD_1",
+		GLBL2_CHANNEL_MDMA_RSVD_1_MASK},
+	{"GLBL2_CHANNEL_MDMA_C2H_ST",
+		GLBL2_CHANNEL_MDMA_C2H_ST_MASK},
+	{"GLBL2_CHANNEL_MDMA_H2C_ST",
+		GLBL2_CHANNEL_MDMA_H2C_ST_MASK},
+	{"GLBL2_CHANNEL_MDMA_RSVD_2",
+		GLBL2_CHANNEL_MDMA_RSVD_2_MASK},
+	{"GLBL2_CHANNEL_MDMA_C2H_ENG",
+		GLBL2_CHANNEL_MDMA_C2H_ENG_MASK},
+	{"GLBL2_CHANNEL_MDMA_RSVD_3",
+		GLBL2_CHANNEL_MDMA_RSVD_3_MASK},
+	{"GLBL2_CHANNEL_MDMA_H2C_ENG",
+		GLBL2_CHANNEL_MDMA_H2C_ENG_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_channel_strm_field_info[] = {
+	{"GLBL2_CHANNEL_STRM_RSVD_1",
+		GLBL2_CHANNEL_STRM_RSVD_1_MASK},
+	{"GLBL2_CHANNEL_STRM_C2H_ST",
+		GLBL2_CHANNEL_STRM_C2H_ST_MASK},
+	{"GLBL2_CHANNEL_STRM_H2C_ST",
+		GLBL2_CHANNEL_STRM_H2C_ST_MASK},
+	{"GLBL2_CHANNEL_STRM_RSVD_2",
+		GLBL2_CHANNEL_STRM_RSVD_2_MASK},
+	{"GLBL2_CHANNEL_STRM_C2H_ENG",
+		GLBL2_CHANNEL_STRM_C2H_ENG_MASK},
+	{"GLBL2_CHANNEL_STRM_RSVD_3",
+		GLBL2_CHANNEL_STRM_RSVD_3_MASK},
+	{"GLBL2_CHANNEL_STRM_H2C_ENG",
+		GLBL2_CHANNEL_STRM_H2C_ENG_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_channel_cap_field_info[] = {
+	{"GLBL2_CHANNEL_CAP_RSVD_1",
+		GLBL2_CHANNEL_CAP_RSVD_1_MASK},
+	{"GLBL2_CHANNEL_CAP_MULTIQ_MAX",
+		GLBL2_CHANNEL_CAP_MULTIQ_MAX_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_channel_pasid_cap_field_info[] = {
+	{"GLBL2_CHANNEL_PASID_CAP_RSVD_1",
+		GLBL2_CHANNEL_PASID_CAP_RSVD_1_MASK},
+	{"GLBL2_CHANNEL_PASID_CAP_BRIDGEEN",
+		GLBL2_CHANNEL_PASID_CAP_BRIDGEEN_MASK},
+	{"GLBL2_CHANNEL_PASID_CAP_DMAEN",
+		GLBL2_CHANNEL_PASID_CAP_DMAEN_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_system_id_field_info[] = {
+	{"GLBL2_SYSTEM_ID_RSVD_1",
+		GLBL2_SYSTEM_ID_RSVD_1_MASK},
+	{"GLBL2_SYSTEM_ID",
+		GLBL2_SYSTEM_ID_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_misc_cap_field_info[] = {
+	{"GLBL2_MISC_CAP",
+		GLBL2_MISC_CAP_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_pcie_rq0_field_info[] = {
+	{"GLBL2_PCIE_RQ0_NPH_AVL",
+		GLBL2_PCIE_RQ0_NPH_AVL_MASK},
+	{"GLBL2_PCIE_RQ0_RCB_AVL",
+		GLBL2_PCIE_RQ0_RCB_AVL_MASK},
+	{"GLBL2_PCIE_RQ0_SLV_RD_CREDS",
+		GLBL2_PCIE_RQ0_SLV_RD_CREDS_MASK},
+	{"GLBL2_PCIE_RQ0_TAG_EP",
+		GLBL2_PCIE_RQ0_TAG_EP_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_pcie_rq1_field_info[] = {
+	{"GLBL2_PCIE_RQ1_RSVD_1",
+		GLBL2_PCIE_RQ1_RSVD_1_MASK},
+	{"GLBL2_PCIE_RQ1_TAG_FL",
+		GLBL2_PCIE_RQ1_TAG_FL_MASK},
+	{"GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_FL",
+		GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_FL_MASK},
+	{"GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_EP",
+		GLBL2_PCIE_RQ1_WTLP_HEADER_FIFO_EP_MASK},
+	{"GLBL2_PCIE_RQ1_RQ_FIFO_EP",
+		GLBL2_PCIE_RQ1_RQ_FIFO_EP_MASK},
+	{"GLBL2_PCIE_RQ1_RQ_FIFO_FL",
+		GLBL2_PCIE_RQ1_RQ_FIFO_FL_MASK},
+	{"GLBL2_PCIE_RQ1_TLPSM",
+		GLBL2_PCIE_RQ1_TLPSM_MASK},
+	{"GLBL2_PCIE_RQ1_TLPSM512",
+		GLBL2_PCIE_RQ1_TLPSM512_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ_RCB_OK",
+		GLBL2_PCIE_RQ1_RREQ_RCB_OK_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ0_SLV",
+		GLBL2_PCIE_RQ1_RREQ0_SLV_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ0_VLD",
+		GLBL2_PCIE_RQ1_RREQ0_VLD_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ0_RDY",
+		GLBL2_PCIE_RQ1_RREQ0_RDY_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ1_SLV",
+		GLBL2_PCIE_RQ1_RREQ1_SLV_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ1_VLD",
+		GLBL2_PCIE_RQ1_RREQ1_VLD_MASK},
+	{"GLBL2_PCIE_RQ1_RREQ1_RDY",
+		GLBL2_PCIE_RQ1_RREQ1_RDY_MASK},
+	{"GLBL2_PCIE_RQ1_WTLP_REQ",
+		GLBL2_PCIE_RQ1_WTLP_REQ_MASK},
+	{"GLBL2_PCIE_RQ1_WTLP_STRADDLE",
+		GLBL2_PCIE_RQ1_WTLP_STRADDLE_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_aximm_wr0_field_info[] = {
+	{"GLBL2_AXIMM_WR0_RSVD_1",
+		GLBL2_AXIMM_WR0_RSVD_1_MASK},
+	{"GLBL2_AXIMM_WR0_WR_REQ",
+		GLBL2_AXIMM_WR0_WR_REQ_MASK},
+	{"GLBL2_AXIMM_WR0_WR_CHN",
+		GLBL2_AXIMM_WR0_WR_CHN_MASK},
+	{"GLBL2_AXIMM_WR0_WTLP_DATA_FIFO_EP",
+		GLBL2_AXIMM_WR0_WTLP_DATA_FIFO_EP_MASK},
+	{"GLBL2_AXIMM_WR0_WPL_FIFO_EP",
+		GLBL2_AXIMM_WR0_WPL_FIFO_EP_MASK},
+	{"GLBL2_AXIMM_WR0_BRSP_CLAIM_CHN",
+		GLBL2_AXIMM_WR0_BRSP_CLAIM_CHN_MASK},
+	{"GLBL2_AXIMM_WR0_WRREQ_CNT",
+		GLBL2_AXIMM_WR0_WRREQ_CNT_MASK},
+	{"GLBL2_AXIMM_WR0_BID",
+		GLBL2_AXIMM_WR0_BID_MASK},
+	{"GLBL2_AXIMM_WR0_BVALID",
+		GLBL2_AXIMM_WR0_BVALID_MASK},
+	{"GLBL2_AXIMM_WR0_BREADY",
+		GLBL2_AXIMM_WR0_BREADY_MASK},
+	{"GLBL2_AXIMM_WR0_WVALID",
+		GLBL2_AXIMM_WR0_WVALID_MASK},
+	{"GLBL2_AXIMM_WR0_WREADY",
+		GLBL2_AXIMM_WR0_WREADY_MASK},
+	{"GLBL2_AXIMM_WR0_AWID",
+		GLBL2_AXIMM_WR0_AWID_MASK},
+	{"GLBL2_AXIMM_WR0_AWVALID",
+		GLBL2_AXIMM_WR0_AWVALID_MASK},
+	{"GLBL2_AXIMM_WR0_AWREADY",
+		GLBL2_AXIMM_WR0_AWREADY_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_aximm_wr1_field_info[] = {
+	{"GLBL2_AXIMM_WR1_RSVD_1",
+		GLBL2_AXIMM_WR1_RSVD_1_MASK},
+	{"GLBL2_AXIMM_WR1_BRSP_CNT4",
+		GLBL2_AXIMM_WR1_BRSP_CNT4_MASK},
+	{"GLBL2_AXIMM_WR1_BRSP_CNT3",
+		GLBL2_AXIMM_WR1_BRSP_CNT3_MASK},
+	{"GLBL2_AXIMM_WR1_BRSP_CNT2",
+		GLBL2_AXIMM_WR1_BRSP_CNT2_MASK},
+	{"GLBL2_AXIMM_WR1_BRSP_CNT1",
+		GLBL2_AXIMM_WR1_BRSP_CNT1_MASK},
+	{"GLBL2_AXIMM_WR1_BRSP_CNT0",
+		GLBL2_AXIMM_WR1_BRSP_CNT0_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_aximm_rd0_field_info[] = {
+	{"GLBL2_AXIMM_RD0_RSVD_1",
+		GLBL2_AXIMM_RD0_RSVD_1_MASK},
+	{"GLBL2_AXIMM_RD0_PND_CNT",
+		GLBL2_AXIMM_RD0_PND_CNT_MASK},
+	{"GLBL2_AXIMM_RD0_RD_REQ",
+		GLBL2_AXIMM_RD0_RD_REQ_MASK},
+	{"GLBL2_AXIMM_RD0_RD_CHNL",
+		GLBL2_AXIMM_RD0_RD_CHNL_MASK},
+	{"GLBL2_AXIMM_RD0_RRSP_CLAIM_CHNL",
+		GLBL2_AXIMM_RD0_RRSP_CLAIM_CHNL_MASK},
+	{"GLBL2_AXIMM_RD0_RID",
+		GLBL2_AXIMM_RD0_RID_MASK},
+	{"GLBL2_AXIMM_RD0_RVALID",
+		GLBL2_AXIMM_RD0_RVALID_MASK},
+	{"GLBL2_AXIMM_RD0_RREADY",
+		GLBL2_AXIMM_RD0_RREADY_MASK},
+	{"GLBL2_AXIMM_RD0_ARID",
+		GLBL2_AXIMM_RD0_ARID_MASK},
+	{"GLBL2_AXIMM_RD0_ARVALID",
+		GLBL2_AXIMM_RD0_ARVALID_MASK},
+	{"GLBL2_AXIMM_RD0_ARREADY",
+		GLBL2_AXIMM_RD0_ARREADY_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_aximm_rd1_field_info[] = {
+	{"GLBL2_AXIMM_RD1_RSVD_1",
+		GLBL2_AXIMM_RD1_RSVD_1_MASK},
+	{"GLBL2_AXIMM_RD1_RRSP_CNT4",
+		GLBL2_AXIMM_RD1_RRSP_CNT4_MASK},
+	{"GLBL2_AXIMM_RD1_RRSP_CNT3",
+		GLBL2_AXIMM_RD1_RRSP_CNT3_MASK},
+	{"GLBL2_AXIMM_RD1_RRSP_CNT2",
+		GLBL2_AXIMM_RD1_RRSP_CNT2_MASK},
+	{"GLBL2_AXIMM_RD1_RRSP_CNT1",
+		GLBL2_AXIMM_RD1_RRSP_CNT1_MASK},
+	{"GLBL2_AXIMM_RD1_RRSP_CNT0",
+		GLBL2_AXIMM_RD1_RRSP_CNT0_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_fab0_field_info[] = {
+	{"GLBL2_FAB0_H2C_INB_CONV_IN_VLD",
+		GLBL2_FAB0_H2C_INB_CONV_IN_VLD_MASK},
+	{"GLBL2_FAB0_H2C_INB_CONV_IN_RDY",
+		GLBL2_FAB0_H2C_INB_CONV_IN_RDY_MASK},
+	{"GLBL2_FAB0_H2C_SEG_IN_VLD",
+		GLBL2_FAB0_H2C_SEG_IN_VLD_MASK},
+	{"GLBL2_FAB0_H2C_SEG_IN_RDY",
+		GLBL2_FAB0_H2C_SEG_IN_RDY_MASK},
+	{"GLBL2_FAB0_H2C_SEG_OUT_VLD",
+		GLBL2_FAB0_H2C_SEG_OUT_VLD_MASK},
+	{"GLBL2_FAB0_H2C_SEG_OUT_RDY",
+		GLBL2_FAB0_H2C_SEG_OUT_RDY_MASK},
+	{"GLBL2_FAB0_H2C_MST_CRDT_STAT",
+		GLBL2_FAB0_H2C_MST_CRDT_STAT_MASK},
+	{"GLBL2_FAB0_C2H_SLV_AFIFO_FULL",
+		GLBL2_FAB0_C2H_SLV_AFIFO_FULL_MASK},
+	{"GLBL2_FAB0_C2H_SLV_AFIFO_EMPTY",
+		GLBL2_FAB0_C2H_SLV_AFIFO_EMPTY_MASK},
+	{"GLBL2_FAB0_C2H_DESEG_SEG_VLD",
+		GLBL2_FAB0_C2H_DESEG_SEG_VLD_MASK},
+	{"GLBL2_FAB0_C2H_DESEG_SEG_RDY",
+		GLBL2_FAB0_C2H_DESEG_SEG_RDY_MASK},
+	{"GLBL2_FAB0_C2H_DESEG_OUT_VLD",
+		GLBL2_FAB0_C2H_DESEG_OUT_VLD_MASK},
+	{"GLBL2_FAB0_C2H_DESEG_OUT_RDY",
+		GLBL2_FAB0_C2H_DESEG_OUT_RDY_MASK},
+	{"GLBL2_FAB0_C2H_INB_DECONV_OUT_VLD",
+		GLBL2_FAB0_C2H_INB_DECONV_OUT_VLD_MASK},
+	{"GLBL2_FAB0_C2H_INB_DECONV_OUT_RDY",
+		GLBL2_FAB0_C2H_INB_DECONV_OUT_RDY_MASK},
+	{"GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_FULL",
+		GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_FULL_MASK},
+	{"GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_EMPTY",
+		GLBL2_FAB0_C2H_DSC_CRDT_AFIFO_EMPTY_MASK},
+	{"GLBL2_FAB0_IRQ_IN_AFIFO_FULL",
+		GLBL2_FAB0_IRQ_IN_AFIFO_FULL_MASK},
+	{"GLBL2_FAB0_IRQ_IN_AFIFO_EMPTY",
+		GLBL2_FAB0_IRQ_IN_AFIFO_EMPTY_MASK},
+	{"GLBL2_FAB0_IMM_CRD_AFIFO_EMPTY",
+		GLBL2_FAB0_IMM_CRD_AFIFO_EMPTY_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_fab1_field_info[] = {
+	{"GLBL2_FAB1_BYP_OUT_CRDT_STAT",
+		GLBL2_FAB1_BYP_OUT_CRDT_STAT_MASK},
+	{"GLBL2_FAB1_TM_DSC_STS_CRDT_STAT",
+		GLBL2_FAB1_TM_DSC_STS_CRDT_STAT_MASK},
+	{"GLBL2_FAB1_C2H_CMN_AFIFO_FULL",
+		GLBL2_FAB1_C2H_CMN_AFIFO_FULL_MASK},
+	{"GLBL2_FAB1_C2H_CMN_AFIFO_EMPTY",
+		GLBL2_FAB1_C2H_CMN_AFIFO_EMPTY_MASK},
+	{"GLBL2_FAB1_RSVD_1",
+		GLBL2_FAB1_RSVD_1_MASK},
+	{"GLBL2_FAB1_C2H_BYP_IN_AFIFO_FULL",
+		GLBL2_FAB1_C2H_BYP_IN_AFIFO_FULL_MASK},
+	{"GLBL2_FAB1_RSVD_2",
+		GLBL2_FAB1_RSVD_2_MASK},
+	{"GLBL2_FAB1_C2H_BYP_IN_AFIFO_EMPTY",
+		GLBL2_FAB1_C2H_BYP_IN_AFIFO_EMPTY_MASK},
+	{"GLBL2_FAB1_RSVD_3",
+		GLBL2_FAB1_RSVD_3_MASK},
+	{"GLBL2_FAB1_H2C_BYP_IN_AFIFO_FULL",
+		GLBL2_FAB1_H2C_BYP_IN_AFIFO_FULL_MASK},
+	{"GLBL2_FAB1_RSVD_4",
+		GLBL2_FAB1_RSVD_4_MASK},
+	{"GLBL2_FAB1_H2C_BYP_IN_AFIFO_EMPTY",
+		GLBL2_FAB1_H2C_BYP_IN_AFIFO_EMPTY_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_match_sel_field_info[] = {
+	{"GLBL2_MATCH_SEL_RSV",
+		GLBL2_MATCH_SEL_RSV_MASK},
+	{"GLBL2_MATCH_SEL_CSR_SEL",
+		GLBL2_MATCH_SEL_CSR_SEL_MASK},
+	{"GLBL2_MATCH_SEL_CSR_EN",
+		GLBL2_MATCH_SEL_CSR_EN_MASK},
+	{"GLBL2_MATCH_SEL_ROTATE1",
+		GLBL2_MATCH_SEL_ROTATE1_MASK},
+	{"GLBL2_MATCH_SEL_ROTATE0",
+		GLBL2_MATCH_SEL_ROTATE0_MASK},
+	{"GLBL2_MATCH_SEL_SEL",
+		GLBL2_MATCH_SEL_SEL_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_match_msk_field_info[] = {
+	{"GLBL2_MATCH_MSK",
+		GLBL2_MATCH_MSK_MASK},
+};
+
+
+static struct regfield_info
+	glbl2_dbg_match_pat_field_info[] = {
+	{"GLBL2_MATCH_PAT_PATTERN",
+		GLBL2_MATCH_PAT_PATTERN_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_1_field_info[] = {
+	{"GLBL_RNG_SZ_1_RSVD_1",
+		GLBL_RNG_SZ_1_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_1_RING_SIZE",
+		GLBL_RNG_SZ_1_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_2_field_info[] = {
+	{"GLBL_RNG_SZ_2_RSVD_1",
+		GLBL_RNG_SZ_2_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_2_RING_SIZE",
+		GLBL_RNG_SZ_2_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_3_field_info[] = {
+	{"GLBL_RNG_SZ_3_RSVD_1",
+		GLBL_RNG_SZ_3_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_3_RING_SIZE",
+		GLBL_RNG_SZ_3_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_4_field_info[] = {
+	{"GLBL_RNG_SZ_4_RSVD_1",
+		GLBL_RNG_SZ_4_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_4_RING_SIZE",
+		GLBL_RNG_SZ_4_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_5_field_info[] = {
+	{"GLBL_RNG_SZ_5_RSVD_1",
+		GLBL_RNG_SZ_5_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_5_RING_SIZE",
+		GLBL_RNG_SZ_5_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_6_field_info[] = {
+	{"GLBL_RNG_SZ_6_RSVD_1",
+		GLBL_RNG_SZ_6_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_6_RING_SIZE",
+		GLBL_RNG_SZ_6_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_7_field_info[] = {
+	{"GLBL_RNG_SZ_7_RSVD_1",
+		GLBL_RNG_SZ_7_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_7_RING_SIZE",
+		GLBL_RNG_SZ_7_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_8_field_info[] = {
+	{"GLBL_RNG_SZ_8_RSVD_1",
+		GLBL_RNG_SZ_8_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_8_RING_SIZE",
+		GLBL_RNG_SZ_8_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_9_field_info[] = {
+	{"GLBL_RNG_SZ_9_RSVD_1",
+		GLBL_RNG_SZ_9_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_9_RING_SIZE",
+		GLBL_RNG_SZ_9_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_a_field_info[] = {
+	{"GLBL_RNG_SZ_A_RSVD_1",
+		GLBL_RNG_SZ_A_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_A_RING_SIZE",
+		GLBL_RNG_SZ_A_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_b_field_info[] = {
+	{"GLBL_RNG_SZ_B_RSVD_1",
+		GLBL_RNG_SZ_B_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_B_RING_SIZE",
+		GLBL_RNG_SZ_B_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_c_field_info[] = {
+	{"GLBL_RNG_SZ_C_RSVD_1",
+		GLBL_RNG_SZ_C_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_C_RING_SIZE",
+		GLBL_RNG_SZ_C_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_d_field_info[] = {
+	{"GLBL_RNG_SZ_D_RSVD_1",
+		GLBL_RNG_SZ_D_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_D_RING_SIZE",
+		GLBL_RNG_SZ_D_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_e_field_info[] = {
+	{"GLBL_RNG_SZ_E_RSVD_1",
+		GLBL_RNG_SZ_E_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_E_RING_SIZE",
+		GLBL_RNG_SZ_E_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_f_field_info[] = {
+	{"GLBL_RNG_SZ_F_RSVD_1",
+		GLBL_RNG_SZ_F_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_F_RING_SIZE",
+		GLBL_RNG_SZ_F_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_rng_sz_10_field_info[] = {
+	{"GLBL_RNG_SZ_10_RSVD_1",
+		GLBL_RNG_SZ_10_RSVD_1_MASK},
+	{"GLBL_RNG_SZ_10_RING_SIZE",
+		GLBL_RNG_SZ_10_RING_SIZE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_err_stat_field_info[] = {
+	{"GLBL_ERR_STAT_RSVD_1",
+		GLBL_ERR_STAT_RSVD_1_MASK},
+	{"GLBL_ERR_STAT_ERR_FAB",
+		GLBL_ERR_STAT_ERR_FAB_MASK},
+	{"GLBL_ERR_STAT_ERR_H2C_ST",
+		GLBL_ERR_STAT_ERR_H2C_ST_MASK},
+	{"GLBL_ERR_STAT_ERR_BDG",
+		GLBL_ERR_STAT_ERR_BDG_MASK},
+	{"GLBL_ERR_STAT_IND_CTXT_CMD_ERR",
+		GLBL_ERR_STAT_IND_CTXT_CMD_ERR_MASK},
+	{"GLBL_ERR_STAT_ERR_C2H_ST",
+		GLBL_ERR_STAT_ERR_C2H_ST_MASK},
+	{"GLBL_ERR_STAT_ERR_C2H_MM_1",
+		GLBL_ERR_STAT_ERR_C2H_MM_1_MASK},
+	{"GLBL_ERR_STAT_ERR_C2H_MM_0",
+		GLBL_ERR_STAT_ERR_C2H_MM_0_MASK},
+	{"GLBL_ERR_STAT_ERR_H2C_MM_1",
+		GLBL_ERR_STAT_ERR_H2C_MM_1_MASK},
+	{"GLBL_ERR_STAT_ERR_H2C_MM_0",
+		GLBL_ERR_STAT_ERR_H2C_MM_0_MASK},
+	{"GLBL_ERR_STAT_ERR_TRQ",
+		GLBL_ERR_STAT_ERR_TRQ_MASK},
+	{"GLBL_ERR_STAT_ERR_DSC",
+		GLBL_ERR_STAT_ERR_DSC_MASK},
+	{"GLBL_ERR_STAT_ERR_RAM_DBE",
+		GLBL_ERR_STAT_ERR_RAM_DBE_MASK},
+	{"GLBL_ERR_STAT_ERR_RAM_SBE",
+		GLBL_ERR_STAT_ERR_RAM_SBE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_err_mask_field_info[] = {
+	{"GLBL_ERR",
+		GLBL_ERR_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_cfg_field_info[] = {
+	{"GLBL_DSC_CFG_RSVD_1",
+		GLBL_DSC_CFG_RSVD_1_MASK},
+	{"GLBL_DSC_CFG_UNC_OVR_COR",
+		GLBL_DSC_CFG_UNC_OVR_COR_MASK},
+	{"GLBL_DSC_CFG_CTXT_FER_DIS",
+		GLBL_DSC_CFG_CTXT_FER_DIS_MASK},
+	{"GLBL_DSC_CFG_RSVD_2",
+		GLBL_DSC_CFG_RSVD_2_MASK},
+	{"GLBL_DSC_CFG_MAXFETCH",
+		GLBL_DSC_CFG_MAXFETCH_MASK},
+	{"GLBL_DSC_CFG_WB_ACC_INT",
+		GLBL_DSC_CFG_WB_ACC_INT_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_err_sts_field_info[] = {
+	{"GLBL_DSC_ERR_STS_RSVD_1",
+		GLBL_DSC_ERR_STS_RSVD_1_MASK},
+	{"GLBL_DSC_ERR_STS_PORT_ID",
+		GLBL_DSC_ERR_STS_PORT_ID_MASK},
+	{"GLBL_DSC_ERR_STS_SBE",
+		GLBL_DSC_ERR_STS_SBE_MASK},
+	{"GLBL_DSC_ERR_STS_DBE",
+		GLBL_DSC_ERR_STS_DBE_MASK},
+	{"GLBL_DSC_ERR_STS_RQ_CANCEL",
+		GLBL_DSC_ERR_STS_RQ_CANCEL_MASK},
+	{"GLBL_DSC_ERR_STS_DSC",
+		GLBL_DSC_ERR_STS_DSC_MASK},
+	{"GLBL_DSC_ERR_STS_DMA",
+		GLBL_DSC_ERR_STS_DMA_MASK},
+	{"GLBL_DSC_ERR_STS_FLR_CANCEL",
+		GLBL_DSC_ERR_STS_FLR_CANCEL_MASK},
+	{"GLBL_DSC_ERR_STS_RSVD_2",
+		GLBL_DSC_ERR_STS_RSVD_2_MASK},
+	{"GLBL_DSC_ERR_STS_DAT_POISON",
+		GLBL_DSC_ERR_STS_DAT_POISON_MASK},
+	{"GLBL_DSC_ERR_STS_TIMEOUT",
+		GLBL_DSC_ERR_STS_TIMEOUT_MASK},
+	{"GLBL_DSC_ERR_STS_FLR",
+		GLBL_DSC_ERR_STS_FLR_MASK},
+	{"GLBL_DSC_ERR_STS_TAG",
+		GLBL_DSC_ERR_STS_TAG_MASK},
+	{"GLBL_DSC_ERR_STS_ADDR",
+		GLBL_DSC_ERR_STS_ADDR_MASK},
+	{"GLBL_DSC_ERR_STS_PARAM",
+		GLBL_DSC_ERR_STS_PARAM_MASK},
+	{"GLBL_DSC_ERR_STS_BCNT",
+		GLBL_DSC_ERR_STS_BCNT_MASK},
+	{"GLBL_DSC_ERR_STS_UR_CA",
+		GLBL_DSC_ERR_STS_UR_CA_MASK},
+	{"GLBL_DSC_ERR_STS_POISON",
+		GLBL_DSC_ERR_STS_POISON_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_err_msk_field_info[] = {
+	{"GLBL_DSC_ERR_MSK",
+		GLBL_DSC_ERR_MSK_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_err_log0_field_info[] = {
+	{"GLBL_DSC_ERR_LOG0_VALID",
+		GLBL_DSC_ERR_LOG0_VALID_MASK},
+	{"GLBL_DSC_ERR_LOG0_SEL",
+		GLBL_DSC_ERR_LOG0_SEL_MASK},
+	{"GLBL_DSC_ERR_LOG0_RSVD_1",
+		GLBL_DSC_ERR_LOG0_RSVD_1_MASK},
+	{"GLBL_DSC_ERR_LOG0_QID",
+		GLBL_DSC_ERR_LOG0_QID_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_err_log1_field_info[] = {
+	{"GLBL_DSC_ERR_LOG1_RSVD_1",
+		GLBL_DSC_ERR_LOG1_RSVD_1_MASK},
+	{"GLBL_DSC_ERR_LOG1_CIDX",
+		GLBL_DSC_ERR_LOG1_CIDX_MASK},
+	{"GLBL_DSC_ERR_LOG1_RSVD_2",
+		GLBL_DSC_ERR_LOG1_RSVD_2_MASK},
+	{"GLBL_DSC_ERR_LOG1_SUB_TYPE",
+		GLBL_DSC_ERR_LOG1_SUB_TYPE_MASK},
+	{"GLBL_DSC_ERR_LOG1_ERR_TYPE",
+		GLBL_DSC_ERR_LOG1_ERR_TYPE_MASK},
+};
+
+
+static struct regfield_info
+	glbl_trq_err_sts_field_info[] = {
+	{"GLBL_TRQ_ERR_STS_RSVD_1",
+		GLBL_TRQ_ERR_STS_RSVD_1_MASK},
+	{"GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT",
+		GLBL_TRQ_ERR_STS_TCP_QSPC_TIMEOUT_MASK},
+	{"GLBL_TRQ_ERR_STS_RSVD_2",
+		GLBL_TRQ_ERR_STS_RSVD_2_MASK},
+	{"GLBL_TRQ_ERR_STS_QID_RANGE",
+		GLBL_TRQ_ERR_STS_QID_RANGE_MASK},
+	{"GLBL_TRQ_ERR_STS_QSPC_UNMAPPED",
+		GLBL_TRQ_ERR_STS_QSPC_UNMAPPED_MASK},
+	{"GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT",
+		GLBL_TRQ_ERR_STS_TCP_CSR_TIMEOUT_MASK},
+	{"GLBL_TRQ_ERR_STS_RSVD_3",
+		GLBL_TRQ_ERR_STS_RSVD_3_MASK},
+	{"GLBL_TRQ_ERR_STS_VF_ACCESS_ERR",
+		GLBL_TRQ_ERR_STS_VF_ACCESS_ERR_MASK},
+	{"GLBL_TRQ_ERR_STS_CSR_UNMAPPED",
+		GLBL_TRQ_ERR_STS_CSR_UNMAPPED_MASK},
+};
+
+
+static struct regfield_info
+	glbl_trq_err_msk_field_info[] = {
+	{"GLBL_TRQ_ERR_MSK",
+		GLBL_TRQ_ERR_MSK_MASK},
+};
+
+
+static struct regfield_info
+	glbl_trq_err_log_field_info[] = {
+	{"GLBL_TRQ_ERR_LOG_SRC",
+		GLBL_TRQ_ERR_LOG_SRC_MASK},
+	{"GLBL_TRQ_ERR_LOG_TARGET",
+		GLBL_TRQ_ERR_LOG_TARGET_MASK},
+	{"GLBL_TRQ_ERR_LOG_FUNC",
+		GLBL_TRQ_ERR_LOG_FUNC_MASK},
+	{"GLBL_TRQ_ERR_LOG_ADDRESS",
+		GLBL_TRQ_ERR_LOG_ADDRESS_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_dbg_dat0_field_info[] = {
+	{"GLBL_DSC_DAT0_RSVD_1",
+		GLBL_DSC_DAT0_RSVD_1_MASK},
+	{"GLBL_DSC_DAT0_CTXT_ARB_DIR",
+		GLBL_DSC_DAT0_CTXT_ARB_DIR_MASK},
+	{"GLBL_DSC_DAT0_CTXT_ARB_QID",
+		GLBL_DSC_DAT0_CTXT_ARB_QID_MASK},
+	{"GLBL_DSC_DAT0_CTXT_ARB_REQ",
+		GLBL_DSC_DAT0_CTXT_ARB_REQ_MASK},
+	{"GLBL_DSC_DAT0_IRQ_FIFO_FL",
+		GLBL_DSC_DAT0_IRQ_FIFO_FL_MASK},
+	{"GLBL_DSC_DAT0_TMSTALL",
+		GLBL_DSC_DAT0_TMSTALL_MASK},
+	{"GLBL_DSC_DAT0_RRQ_STALL",
+		GLBL_DSC_DAT0_RRQ_STALL_MASK},
+	{"GLBL_DSC_DAT0_RCP_FIFO_SPC_STALL",
+		GLBL_DSC_DAT0_RCP_FIFO_SPC_STALL_MASK},
+	{"GLBL_DSC_DAT0_RRQ_FIFO_SPC_STALL",
+		GLBL_DSC_DAT0_RRQ_FIFO_SPC_STALL_MASK},
+	{"GLBL_DSC_DAT0_FAB_MRKR_RSP_STALL",
+		GLBL_DSC_DAT0_FAB_MRKR_RSP_STALL_MASK},
+	{"GLBL_DSC_DAT0_DSC_OUT_STALL",
+		GLBL_DSC_DAT0_DSC_OUT_STALL_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_dbg_dat1_field_info[] = {
+	{"GLBL_DSC_DAT1_RSVD_1",
+		GLBL_DSC_DAT1_RSVD_1_MASK},
+	{"GLBL_DSC_DAT1_EVT_SPC_C2H",
+		GLBL_DSC_DAT1_EVT_SPC_C2H_MASK},
+	{"GLBL_DSC_DAT1_EVT_SP_H2C",
+		GLBL_DSC_DAT1_EVT_SP_H2C_MASK},
+	{"GLBL_DSC_DAT1_DSC_SPC_C2H",
+		GLBL_DSC_DAT1_DSC_SPC_C2H_MASK},
+	{"GLBL_DSC_DAT1_DSC_SPC_H2C",
+		GLBL_DSC_DAT1_DSC_SPC_H2C_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_dbg_ctl_field_info[] = {
+	{"GLBL_DSC_CTL_RSVD_1",
+		GLBL_DSC_CTL_RSVD_1_MASK},
+	{"GLBL_DSC_CTL_SELECT",
+		GLBL_DSC_CTL_SELECT_MASK},
+};
+
+
+static struct regfield_info
+	glbl_dsc_err_log2_field_info[] = {
+	{"GLBL_DSC_ERR_LOG2_OLD_PIDX",
+		GLBL_DSC_ERR_LOG2_OLD_PIDX_MASK},
+	{"GLBL_DSC_ERR_LOG2_NEW_PIDX",
+		GLBL_DSC_ERR_LOG2_NEW_PIDX_MASK},
+};
+
+
+static struct regfield_info
+	glbl_glbl_interrupt_cfg_field_info[] = {
+	{"GLBL_GLBL_INTERRUPT_CFG_RSVD_1",
+		GLBL_GLBL_INTERRUPT_CFG_RSVD_1_MASK},
+	{"GLBL_GLBL_INTERRUPT_CFG_LGCY_INTR_PENDING",
+		GLBL_GLBL_INTERRUPT_CFG_LGCY_INTR_PENDING_MASK},
+	{"GLBL_GLBL_INTERRUPT_CFG_EN_LGCY_INTR",
+		GLBL_GLBL_INTERRUPT_CFG_EN_LGCY_INTR_MASK},
+};
+
+
+static struct regfield_info
+	glbl_vch_host_profile_field_info[] = {
+	{"GLBL_VCH_HOST_PROFILE_RSVD_1",
+		GLBL_VCH_HOST_PROFILE_RSVD_1_MASK},
+	{"GLBL_VCH_HOST_PROFILE_2C_MM",
+		GLBL_VCH_HOST_PROFILE_2C_MM_MASK},
+	{"GLBL_VCH_HOST_PROFILE_2C_ST",
+		GLBL_VCH_HOST_PROFILE_2C_ST_MASK},
+	{"GLBL_VCH_HOST_PROFILE_VCH_DSC",
+		GLBL_VCH_HOST_PROFILE_VCH_DSC_MASK},
+	{"GLBL_VCH_HOST_PROFILE_VCH_INT_MSG",
+		GLBL_VCH_HOST_PROFILE_VCH_INT_MSG_MASK},
+	{"GLBL_VCH_HOST_PROFILE_VCH_INT_AGGR",
+		GLBL_VCH_HOST_PROFILE_VCH_INT_AGGR_MASK},
+	{"GLBL_VCH_HOST_PROFILE_VCH_CMPT",
+		GLBL_VCH_HOST_PROFILE_VCH_CMPT_MASK},
+	{"GLBL_VCH_HOST_PROFILE_VCH_C2H_PLD",
+		GLBL_VCH_HOST_PROFILE_VCH_C2H_PLD_MASK},
+};
+
+
+static struct regfield_info
+	glbl_bridge_host_profile_field_info[] = {
+	{"GLBL_BRIDGE_HOST_PROFILE_RSVD_1",
+		GLBL_BRIDGE_HOST_PROFILE_RSVD_1_MASK},
+	{"GLBL_BRIDGE_HOST_PROFILE_BDGID",
+		GLBL_BRIDGE_HOST_PROFILE_BDGID_MASK},
+};
+
+
+static struct regfield_info
+	aximm_irq_dest_addr_field_info[] = {
+	{"AXIMM_IRQ_DEST_ADDR_ADDR",
+		AXIMM_IRQ_DEST_ADDR_ADDR_MASK},
+};
+
+
+static struct regfield_info
+	fab_err_log_field_info[] = {
+	{"FAB_ERR_LOG_RSVD_1",
+		FAB_ERR_LOG_RSVD_1_MASK},
+	{"FAB_ERR_LOG_SRC",
+		FAB_ERR_LOG_SRC_MASK},
+};
+
+
+static struct regfield_info
+	glbl_req_err_sts_field_info[] = {
+	{"GLBL_REQ_ERR_STS_RSVD_1",
+		GLBL_REQ_ERR_STS_RSVD_1_MASK},
+	{"GLBL_REQ_ERR_STS_RC_DISCONTINUE",
+		GLBL_REQ_ERR_STS_RC_DISCONTINUE_MASK},
+	{"GLBL_REQ_ERR_STS_RC_PRTY",
+		GLBL_REQ_ERR_STS_RC_PRTY_MASK},
+	{"GLBL_REQ_ERR_STS_RC_FLR",
+		GLBL_REQ_ERR_STS_RC_FLR_MASK},
+	{"GLBL_REQ_ERR_STS_RC_TIMEOUT",
+		GLBL_REQ_ERR_STS_RC_TIMEOUT_MASK},
+	{"GLBL_REQ_ERR_STS_RC_INV_BCNT",
+		GLBL_REQ_ERR_STS_RC_INV_BCNT_MASK},
+	{"GLBL_REQ_ERR_STS_RC_INV_TAG",
+		GLBL_REQ_ERR_STS_RC_INV_TAG_MASK},
+	{"GLBL_REQ_ERR_STS_RC_START_ADDR_MISMCH",
+		GLBL_REQ_ERR_STS_RC_START_ADDR_MISMCH_MASK},
+	{"GLBL_REQ_ERR_STS_RC_RID_TC_ATTR_MISMCH",
+		GLBL_REQ_ERR_STS_RC_RID_TC_ATTR_MISMCH_MASK},
+	{"GLBL_REQ_ERR_STS_RC_NO_DATA",
+		GLBL_REQ_ERR_STS_RC_NO_DATA_MASK},
+	{"GLBL_REQ_ERR_STS_RC_UR_CA_CRS",
+		GLBL_REQ_ERR_STS_RC_UR_CA_CRS_MASK},
+	{"GLBL_REQ_ERR_STS_RC_POISONED",
+		GLBL_REQ_ERR_STS_RC_POISONED_MASK},
+};
+
+
+static struct regfield_info
+	glbl_req_err_msk_field_info[] = {
+	{"GLBL_REQ_ERR_MSK",
+		GLBL_REQ_ERR_MSK_MASK},
+};
+
+
+static struct regfield_info
+	ind_ctxt_data_field_info[] = {
+	{"IND_CTXT_DATA_DATA",
+		IND_CTXT_DATA_DATA_MASK},
+};
+
+
+static struct regfield_info
+	ind_ctxt_mask_field_info[] = {
+	{"IND_CTXT",
+		IND_CTXT_MASK},
+};
+
+
+static struct regfield_info
+	ind_ctxt_cmd_field_info[] = {
+	{"IND_CTXT_CMD_RSVD_1",
+		IND_CTXT_CMD_RSVD_1_MASK},
+	{"IND_CTXT_CMD_QID",
+		IND_CTXT_CMD_QID_MASK},
+	{"IND_CTXT_CMD_OP",
+		IND_CTXT_CMD_OP_MASK},
+	{"IND_CTXT_CMD_SEL",
+		IND_CTXT_CMD_SEL_MASK},
+	{"IND_CTXT_CMD_BUSY",
+		IND_CTXT_CMD_BUSY_MASK},
+};
+
+
+static struct regfield_info
+	c2h_timer_cnt_field_info[] = {
+	{"C2H_TIMER_CNT_RSVD_1",
+		C2H_TIMER_CNT_RSVD_1_MASK},
+	{"C2H_TIMER_CNT",
+		C2H_TIMER_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cnt_th_field_info[] = {
+	{"C2H_CNT_TH_RSVD_1",
+		C2H_CNT_TH_RSVD_1_MASK},
+	{"C2H_CNT_TH_THESHOLD_CNT",
+		C2H_CNT_TH_THESHOLD_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_s_axis_c2h_accepted_field_info[] = {
+	{"C2H_STAT_S_AXIS_C2H_ACCEPTED",
+		C2H_STAT_S_AXIS_C2H_ACCEPTED_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_s_axis_wrb_accepted_field_info[] = {
+	{"C2H_STAT_S_AXIS_WRB_ACCEPTED",
+		C2H_STAT_S_AXIS_WRB_ACCEPTED_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_rsp_pkt_accepted_field_info[] = {
+	{"C2H_STAT_DESC_RSP_PKT_ACCEPTED_D",
+		C2H_STAT_DESC_RSP_PKT_ACCEPTED_D_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_axis_pkg_cmp_field_info[] = {
+	{"C2H_STAT_AXIS_PKG_CMP",
+		C2H_STAT_AXIS_PKG_CMP_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_rsp_accepted_field_info[] = {
+	{"C2H_STAT_DESC_RSP_ACCEPTED_D",
+		C2H_STAT_DESC_RSP_ACCEPTED_D_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_rsp_cmp_field_info[] = {
+	{"C2H_STAT_DESC_RSP_CMP_D",
+		C2H_STAT_DESC_RSP_CMP_D_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_wrq_out_field_info[] = {
+	{"C2H_STAT_WRQ_OUT",
+		C2H_STAT_WRQ_OUT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_wpl_ren_accepted_field_info[] = {
+	{"C2H_STAT_WPL_REN_ACCEPTED",
+		C2H_STAT_WPL_REN_ACCEPTED_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_total_wrq_len_field_info[] = {
+	{"C2H_STAT_TOTAL_WRQ_LEN",
+		C2H_STAT_TOTAL_WRQ_LEN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_total_wpl_len_field_info[] = {
+	{"C2H_STAT_TOTAL_WPL_LEN",
+		C2H_STAT_TOTAL_WPL_LEN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_buf_sz_field_info[] = {
+	{"C2H_BUF_SZ_IZE",
+		C2H_BUF_SZ_IZE_MASK},
+};
+
+
+static struct regfield_info
+	c2h_err_stat_field_info[] = {
+	{"C2H_ERR_STAT_RSVD_1",
+		C2H_ERR_STAT_RSVD_1_MASK},
+	{"C2H_ERR_STAT_WRB_PORT_ID_ERR",
+		C2H_ERR_STAT_WRB_PORT_ID_ERR_MASK},
+	{"C2H_ERR_STAT_HDR_PAR_ERR",
+		C2H_ERR_STAT_HDR_PAR_ERR_MASK},
+	{"C2H_ERR_STAT_HDR_ECC_COR_ERR",
+		C2H_ERR_STAT_HDR_ECC_COR_ERR_MASK},
+	{"C2H_ERR_STAT_HDR_ECC_UNC_ERR",
+		C2H_ERR_STAT_HDR_ECC_UNC_ERR_MASK},
+	{"C2H_ERR_STAT_AVL_RING_DSC_ERR",
+		C2H_ERR_STAT_AVL_RING_DSC_ERR_MASK},
+	{"C2H_ERR_STAT_WRB_PRTY_ERR",
+		C2H_ERR_STAT_WRB_PRTY_ERR_MASK},
+	{"C2H_ERR_STAT_WRB_CIDX_ERR",
+		C2H_ERR_STAT_WRB_CIDX_ERR_MASK},
+	{"C2H_ERR_STAT_WRB_QFULL_ERR",
+		C2H_ERR_STAT_WRB_QFULL_ERR_MASK},
+	{"C2H_ERR_STAT_WRB_INV_Q_ERR",
+		C2H_ERR_STAT_WRB_INV_Q_ERR_MASK},
+	{"C2H_ERR_STAT_RSVD_2",
+		C2H_ERR_STAT_RSVD_2_MASK},
+	{"C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH",
+		C2H_ERR_STAT_PORT_ID_CTXT_MISMATCH_MASK},
+	{"C2H_ERR_STAT_ERR_DESC_CNT",
+		C2H_ERR_STAT_ERR_DESC_CNT_MASK},
+	{"C2H_ERR_STAT_RSVD_3",
+		C2H_ERR_STAT_RSVD_3_MASK},
+	{"C2H_ERR_STAT_MSI_INT_FAIL",
+		C2H_ERR_STAT_MSI_INT_FAIL_MASK},
+	{"C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR",
+		C2H_ERR_STAT_ENG_WPL_DATA_PAR_ERR_MASK},
+	{"C2H_ERR_STAT_RSVD_4",
+		C2H_ERR_STAT_RSVD_4_MASK},
+	{"C2H_ERR_STAT_DESC_RSP_ERR",
+		C2H_ERR_STAT_DESC_RSP_ERR_MASK},
+	{"C2H_ERR_STAT_QID_MISMATCH",
+		C2H_ERR_STAT_QID_MISMATCH_MASK},
+	{"C2H_ERR_STAT_SH_CMPT_DSC_ERR",
+		C2H_ERR_STAT_SH_CMPT_DSC_ERR_MASK},
+	{"C2H_ERR_STAT_LEN_MISMATCH",
+		C2H_ERR_STAT_LEN_MISMATCH_MASK},
+	{"C2H_ERR_STAT_MTY_MISMATCH",
+		C2H_ERR_STAT_MTY_MISMATCH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_err_mask_field_info[] = {
+	{"C2H_ERR_EN",
+		C2H_ERR_EN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_fatal_err_stat_field_info[] = {
+	{"C2H_FATAL_ERR_STAT_RSVD_1",
+		C2H_FATAL_ERR_STAT_RSVD_1_MASK},
+	{"C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR",
+		C2H_FATAL_ERR_STAT_HDR_ECC_UNC_ERR_MASK},
+	{"C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_AVL_RING_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR",
+		C2H_FATAL_ERR_STAT_WPL_DATA_PAR_ERR_MASK},
+	{"C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_PLD_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_QID_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_CMPT_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_WRB_COAL_DATA_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_RESERVED2",
+		C2H_FATAL_ERR_STAT_RESERVED2_MASK},
+	{"C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_INT_CTXT_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_DESC_REQ_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_PFCH_CTXT_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_WRB_CTXT_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_PFCH_LL_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE",
+		C2H_FATAL_ERR_STAT_TIMER_FIFO_RAM_RDBE_MASK},
+	{"C2H_FATAL_ERR_STAT_QID_MISMATCH",
+		C2H_FATAL_ERR_STAT_QID_MISMATCH_MASK},
+	{"C2H_FATAL_ERR_STAT_RESERVED1",
+		C2H_FATAL_ERR_STAT_RESERVED1_MASK},
+	{"C2H_FATAL_ERR_STAT_LEN_MISMATCH",
+		C2H_FATAL_ERR_STAT_LEN_MISMATCH_MASK},
+	{"C2H_FATAL_ERR_STAT_MTY_MISMATCH",
+		C2H_FATAL_ERR_STAT_MTY_MISMATCH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_fatal_err_mask_field_info[] = {
+	{"C2H_FATAL_ERR_C2HEN",
+		C2H_FATAL_ERR_C2HEN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_fatal_err_enable_field_info[] = {
+	{"C2H_FATAL_ERR_ENABLE_RSVD_1",
+		C2H_FATAL_ERR_ENABLE_RSVD_1_MASK},
+	{"C2H_FATAL_ERR_ENABLE_WPL_PAR_INV",
+		C2H_FATAL_ERR_ENABLE_WPL_PAR_INV_MASK},
+	{"C2H_FATAL_ERR_ENABLE_WRQ_DIS",
+		C2H_FATAL_ERR_ENABLE_WRQ_DIS_MASK},
+};
+
+
+static struct regfield_info
+	glbl_err_int_field_info[] = {
+	{"GLBL_ERR_INT_RSVD_1",
+		GLBL_ERR_INT_RSVD_1_MASK},
+	{"GLBL_ERR_INT_HOST_ID",
+		GLBL_ERR_INT_HOST_ID_MASK},
+	{"GLBL_ERR_INT_DIS_INTR_ON_VF",
+		GLBL_ERR_INT_DIS_INTR_ON_VF_MASK},
+	{"GLBL_ERR_INT_ARM",
+		GLBL_ERR_INT_ARM_MASK},
+	{"GLBL_ERR_INT_EN_COAL",
+		GLBL_ERR_INT_EN_COAL_MASK},
+	{"GLBL_ERR_INT_VEC",
+		GLBL_ERR_INT_VEC_MASK},
+	{"GLBL_ERR_INT_FUNC",
+		GLBL_ERR_INT_FUNC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_cfg_field_info[] = {
+	{"C2H_PFCH_CFG_EVTFL_TH",
+		C2H_PFCH_CFG_EVTFL_TH_MASK},
+	{"C2H_PFCH_CFG_FL_TH",
+		C2H_PFCH_CFG_FL_TH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_cfg_1_field_info[] = {
+	{"C2H_PFCH_CFG_1_EVT_QCNT_TH",
+		C2H_PFCH_CFG_1_EVT_QCNT_TH_MASK},
+	{"C2H_PFCH_CFG_1_QCNT",
+		C2H_PFCH_CFG_1_QCNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_cfg_2_field_info[] = {
+	{"C2H_PFCH_CFG_2_FENCE",
+		C2H_PFCH_CFG_2_FENCE_MASK},
+	{"C2H_PFCH_CFG_2_RSVD",
+		C2H_PFCH_CFG_2_RSVD_MASK},
+	{"C2H_PFCH_CFG_2_VAR_DESC_NO_DROP",
+		C2H_PFCH_CFG_2_VAR_DESC_NO_DROP_MASK},
+	{"C2H_PFCH_CFG_2_LL_SZ_TH",
+		C2H_PFCH_CFG_2_LL_SZ_TH_MASK},
+	{"C2H_PFCH_CFG_2_VAR_DESC_NUM",
+		C2H_PFCH_CFG_2_VAR_DESC_NUM_MASK},
+	{"C2H_PFCH_CFG_2_NUM",
+		C2H_PFCH_CFG_2_NUM_MASK},
+};
+
+
+static struct regfield_info
+	c2h_int_timer_tick_field_info[] = {
+	{"C2H_INT_TIMER_TICK",
+		C2H_INT_TIMER_TICK_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_rsp_drop_accepted_field_info[] = {
+	{"C2H_STAT_DESC_RSP_DROP_ACCEPTED_D",
+		C2H_STAT_DESC_RSP_DROP_ACCEPTED_D_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_rsp_err_accepted_field_info[] = {
+	{"C2H_STAT_DESC_RSP_ERR_ACCEPTED_D",
+		C2H_STAT_DESC_RSP_ERR_ACCEPTED_D_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_desc_req_field_info[] = {
+	{"C2H_STAT_DESC_REQ",
+		C2H_STAT_DESC_REQ_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_0_field_info[] = {
+	{"C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TVALID",
+		C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TVALID_MASK},
+	{"C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TREADY",
+		C2H_STAT_DMA_ENG_0_S_AXIS_C2H_TREADY_MASK},
+	{"C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TVALID",
+		C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TVALID_MASK},
+	{"C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TREADY",
+		C2H_STAT_DMA_ENG_0_S_AXIS_WRB_TREADY_MASK},
+	{"C2H_STAT_DMA_ENG_0_PLD_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_0_PLD_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_0_QID_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_0_QID_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_QID",
+		C2H_STAT_DMA_ENG_0_ARB_FIFO_OUT_QID_MASK},
+	{"C2H_STAT_DMA_ENG_0_WRB_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_0_WRB_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_0_WRB_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_0_WRB_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_0_WRB_SM_CS",
+		C2H_STAT_DMA_ENG_0_WRB_SM_CS_MASK},
+	{"C2H_STAT_DMA_ENG_0_MAIN_SM_CS",
+		C2H_STAT_DMA_ENG_0_MAIN_SM_CS_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_1_field_info[] = {
+	{"C2H_STAT_DMA_ENG_1_RSVD_1",
+		C2H_STAT_DMA_ENG_1_RSVD_1_MASK},
+	{"C2H_STAT_DMA_ENG_1_QID_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_1_QID_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_1_PLD_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_1_PLD_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_1_PLD_ST_FIFO_CNT",
+		C2H_STAT_DMA_ENG_1_PLD_ST_FIFO_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_2_field_info[] = {
+	{"C2H_STAT_DMA_ENG_2_RSVD_1",
+		C2H_STAT_DMA_ENG_2_RSVD_1_MASK},
+	{"C2H_STAT_DMA_ENG_2_QID_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_2_QID_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_2_PLD_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_2_PLD_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_2_PLD_ST_FIFO_CNT",
+		C2H_STAT_DMA_ENG_2_PLD_ST_FIFO_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_3_field_info[] = {
+	{"C2H_STAT_DMA_ENG_3_RSVD_1",
+		C2H_STAT_DMA_ENG_3_RSVD_1_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_3_QID_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_3_QID_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_3_PLD_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_EOP",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_EOP_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_DROP",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_DROP_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_ERR",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_OUT_DATA_ERR_MASK},
+	{"C2H_STAT_DMA_ENG_3_DESC_CNT_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_3_DESC_CNT_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_3_DESC_RSP_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_3_DESC_RSP_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_PKT_ID_LARGER_0",
+		C2H_STAT_DMA_ENG_3_PLD_PKT_ID_LARGER_0_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_VLD",
+		C2H_STAT_DMA_ENG_3_WRQ_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_RDY",
+		C2H_STAT_DMA_ENG_3_WRQ_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_RDY",
+		C2H_STAT_DMA_ENG_3_WRQ_FIFO_OUT_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_DROP",
+		C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_DROP_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_ERR",
+		C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_ERR_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_MARKER",
+		C2H_STAT_DMA_ENG_3_WRQ_PACKET_OUT_DATA_MARKER_MASK},
+	{"C2H_STAT_DMA_ENG_3_WRQ_PACKET_PRE_EOR",
+		C2H_STAT_DMA_ENG_3_WRQ_PACKET_PRE_EOR_MASK},
+	{"C2H_STAT_DMA_ENG_3_WCP_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_3_WCP_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_3_PLD_ST_FIFO_IN_RDY_MASK},
+};
+
+
+static struct regfield_info
+	c2h_dbg_pfch_err_ctxt_field_info[] = {
+	{"C2H_PFCH_ERR_CTXT_RSVD_1",
+		C2H_PFCH_ERR_CTXT_RSVD_1_MASK},
+	{"C2H_PFCH_ERR_CTXT_ERR_STAT",
+		C2H_PFCH_ERR_CTXT_ERR_STAT_MASK},
+	{"C2H_PFCH_ERR_CTXT_CMD_WR",
+		C2H_PFCH_ERR_CTXT_CMD_WR_MASK},
+	{"C2H_PFCH_ERR_CTXT_QID",
+		C2H_PFCH_ERR_CTXT_QID_MASK},
+	{"C2H_PFCH_ERR_CTXT_DONE",
+		C2H_PFCH_ERR_CTXT_DONE_MASK},
+};
+
+
+static struct regfield_info
+	c2h_first_err_qid_field_info[] = {
+	{"C2H_FIRST_ERR_QID_RSVD_1",
+		C2H_FIRST_ERR_QID_RSVD_1_MASK},
+	{"C2H_FIRST_ERR_QID_ERR_TYPE",
+		C2H_FIRST_ERR_QID_ERR_TYPE_MASK},
+	{"C2H_FIRST_ERR_QID_RSVD",
+		C2H_FIRST_ERR_QID_RSVD_MASK},
+	{"C2H_FIRST_ERR_QID_QID",
+		C2H_FIRST_ERR_QID_QID_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_wrb_in_field_info[] = {
+	{"STAT_NUM_WRB_IN_RSVD_1",
+		STAT_NUM_WRB_IN_RSVD_1_MASK},
+	{"STAT_NUM_WRB_IN_WRB_CNT",
+		STAT_NUM_WRB_IN_WRB_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_wrb_out_field_info[] = {
+	{"STAT_NUM_WRB_OUT_RSVD_1",
+		STAT_NUM_WRB_OUT_RSVD_1_MASK},
+	{"STAT_NUM_WRB_OUT_WRB_CNT",
+		STAT_NUM_WRB_OUT_WRB_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_wrb_drp_field_info[] = {
+	{"STAT_NUM_WRB_DRP_RSVD_1",
+		STAT_NUM_WRB_DRP_RSVD_1_MASK},
+	{"STAT_NUM_WRB_DRP_WRB_CNT",
+		STAT_NUM_WRB_DRP_WRB_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_stat_desc_out_field_info[] = {
+	{"STAT_NUM_STAT_DESC_OUT_RSVD_1",
+		STAT_NUM_STAT_DESC_OUT_RSVD_1_MASK},
+	{"STAT_NUM_STAT_DESC_OUT_CNT",
+		STAT_NUM_STAT_DESC_OUT_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_dsc_crdt_sent_field_info[] = {
+	{"STAT_NUM_DSC_CRDT_SENT_RSVD_1",
+		STAT_NUM_DSC_CRDT_SENT_RSVD_1_MASK},
+	{"STAT_NUM_DSC_CRDT_SENT_CNT",
+		STAT_NUM_DSC_CRDT_SENT_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_fch_dsc_rcvd_field_info[] = {
+	{"STAT_NUM_FCH_DSC_RCVD_RSVD_1",
+		STAT_NUM_FCH_DSC_RCVD_RSVD_1_MASK},
+	{"STAT_NUM_FCH_DSC_RCVD_DSC_CNT",
+		STAT_NUM_FCH_DSC_RCVD_DSC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	stat_num_byp_dsc_rcvd_field_info[] = {
+	{"STAT_NUM_BYP_DSC_RCVD_RSVD_1",
+		STAT_NUM_BYP_DSC_RCVD_RSVD_1_MASK},
+	{"STAT_NUM_BYP_DSC_RCVD_DSC_CNT",
+		STAT_NUM_BYP_DSC_RCVD_DSC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_wrb_coal_cfg_field_info[] = {
+	{"C2H_WRB_COAL_CFG_MAX_BUF_SZ",
+		C2H_WRB_COAL_CFG_MAX_BUF_SZ_MASK},
+	{"C2H_WRB_COAL_CFG_TICK_VAL",
+		C2H_WRB_COAL_CFG_TICK_VAL_MASK},
+	{"C2H_WRB_COAL_CFG_TICK_CNT",
+		C2H_WRB_COAL_CFG_TICK_CNT_MASK},
+	{"C2H_WRB_COAL_CFG_SET_GLB_FLUSH",
+		C2H_WRB_COAL_CFG_SET_GLB_FLUSH_MASK},
+	{"C2H_WRB_COAL_CFG_DONE_GLB_FLUSH",
+		C2H_WRB_COAL_CFG_DONE_GLB_FLUSH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_h2c_req_field_info[] = {
+	{"C2H_INTR_H2C_REQ_RSVD_1",
+		C2H_INTR_H2C_REQ_RSVD_1_MASK},
+	{"C2H_INTR_H2C_REQ_CNT",
+		C2H_INTR_H2C_REQ_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_mm_req_field_info[] = {
+	{"C2H_INTR_C2H_MM_REQ_RSVD_1",
+		C2H_INTR_C2H_MM_REQ_RSVD_1_MASK},
+	{"C2H_INTR_C2H_MM_REQ_CNT",
+		C2H_INTR_C2H_MM_REQ_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_err_int_req_field_info[] = {
+	{"C2H_INTR_ERR_INT_REQ_RSVD_1",
+		C2H_INTR_ERR_INT_REQ_RSVD_1_MASK},
+	{"C2H_INTR_ERR_INT_REQ_CNT",
+		C2H_INTR_ERR_INT_REQ_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_st_req_field_info[] = {
+	{"C2H_INTR_C2H_ST_REQ_RSVD_1",
+		C2H_INTR_C2H_ST_REQ_RSVD_1_MASK},
+	{"C2H_INTR_C2H_ST_REQ_CNT",
+		C2H_INTR_C2H_ST_REQ_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_h2c_err_c2h_mm_msix_ack_field_info[] = {
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_RSVD_1",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_RSVD_1_MASK},
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_CNT",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_h2c_err_c2h_mm_msix_fail_field_info[] = {
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_RSVD_1",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_RSVD_1_MASK},
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_CNT",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_h2c_err_c2h_mm_msix_no_msix_field_info[] = {
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_RSVD_1",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_RSVD_1_MASK},
+	{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_CNT",
+		C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_h2c_err_c2h_mm_ctxt_inval_field_info[] = {
+	{"C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_RSVD_1",
+		C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_RSVD_1_MASK},
+	{"C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_CNT",
+		C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_st_msix_ack_field_info[] = {
+	{"C2H_INTR_C2H_ST_MSIX_ACK_RSVD_1",
+		C2H_INTR_C2H_ST_MSIX_ACK_RSVD_1_MASK},
+	{"C2H_INTR_C2H_ST_MSIX_ACK_CNT",
+		C2H_INTR_C2H_ST_MSIX_ACK_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_st_msix_fail_field_info[] = {
+	{"C2H_INTR_C2H_ST_MSIX_FAIL_RSVD_1",
+		C2H_INTR_C2H_ST_MSIX_FAIL_RSVD_1_MASK},
+	{"C2H_INTR_C2H_ST_MSIX_FAIL_CNT",
+		C2H_INTR_C2H_ST_MSIX_FAIL_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_st_no_msix_field_info[] = {
+	{"C2H_INTR_C2H_ST_NO_MSIX_RSVD_1",
+		C2H_INTR_C2H_ST_NO_MSIX_RSVD_1_MASK},
+	{"C2H_INTR_C2H_ST_NO_MSIX_CNT",
+		C2H_INTR_C2H_ST_NO_MSIX_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_c2h_st_ctxt_inval_field_info[] = {
+	{"C2H_INTR_C2H_ST_CTXT_INVAL_RSVD_1",
+		C2H_INTR_C2H_ST_CTXT_INVAL_RSVD_1_MASK},
+	{"C2H_INTR_C2H_ST_CTXT_INVAL_CNT",
+		C2H_INTR_C2H_ST_CTXT_INVAL_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_wr_cmp_field_info[] = {
+	{"C2H_STAT_WR_CMP_RSVD_1",
+		C2H_STAT_WR_CMP_RSVD_1_MASK},
+	{"C2H_STAT_WR_CMP_CNT",
+		C2H_STAT_WR_CMP_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_4_field_info[] = {
+	{"C2H_STAT_DMA_ENG_4_RSVD_1",
+		C2H_STAT_DMA_ENG_4_RSVD_1_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_4_QID_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_4_QID_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_4_PLD_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_VLD",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_EOP",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_EOP_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_AVL_IDX_ENABLE_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_DROP",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_DROP_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_ERR",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_OUT_DATA_ERR_MASK},
+	{"C2H_STAT_DMA_ENG_4_DESC_CNT_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_4_DESC_CNT_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_4_DESC_RSP_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_4_DESC_RSP_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_PKT_ID_LARGER_0",
+		C2H_STAT_DMA_ENG_4_PLD_PKT_ID_LARGER_0_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_VLD",
+		C2H_STAT_DMA_ENG_4_WRQ_VLD_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_RDY",
+		C2H_STAT_DMA_ENG_4_WRQ_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_RDY",
+		C2H_STAT_DMA_ENG_4_WRQ_FIFO_OUT_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_DROP",
+		C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_DROP_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_ERR",
+		C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_ERR_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_MARKER",
+		C2H_STAT_DMA_ENG_4_WRQ_PACKET_OUT_DATA_MARKER_MASK},
+	{"C2H_STAT_DMA_ENG_4_WRQ_PACKET_PRE_EOR",
+		C2H_STAT_DMA_ENG_4_WRQ_PACKET_PRE_EOR_MASK},
+	{"C2H_STAT_DMA_ENG_4_WCP_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_4_WCP_FIFO_IN_RDY_MASK},
+	{"C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_IN_RDY",
+		C2H_STAT_DMA_ENG_4_PLD_ST_FIFO_IN_RDY_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_dbg_dma_eng_5_field_info[] = {
+	{"C2H_STAT_DMA_ENG_5_RSVD_1",
+		C2H_STAT_DMA_ENG_5_RSVD_1_MASK},
+	{"C2H_STAT_DMA_ENG_5_WRB_SM_VIRT_CH",
+		C2H_STAT_DMA_ENG_5_WRB_SM_VIRT_CH_MASK},
+	{"C2H_STAT_DMA_ENG_5_WRB_FIFO_IN_REQ",
+		C2H_STAT_DMA_ENG_5_WRB_FIFO_IN_REQ_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_CNT",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_CNT_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_LEN",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_LEN_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VIRT_CH",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VIRT_CH_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VAR_DESC",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_VAR_DESC_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_DROP_REQ",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_DROP_REQ_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_NUM_BUF_OV",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_NUM_BUF_OV_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_MARKER",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_MARKER_MASK},
+	{"C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_HAS_CMPT",
+		C2H_STAT_DMA_ENG_5_ARB_FIFO_OUT_DATA_HAS_CMPT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_dbg_pfch_qid_field_info[] = {
+	{"C2H_PFCH_QID_RSVD_1",
+		C2H_PFCH_QID_RSVD_1_MASK},
+	{"C2H_PFCH_QID_ERR_CTXT",
+		C2H_PFCH_QID_ERR_CTXT_MASK},
+	{"C2H_PFCH_QID_TARGET",
+		C2H_PFCH_QID_TARGET_MASK},
+	{"C2H_PFCH_QID_QID_OR_TAG",
+		C2H_PFCH_QID_QID_OR_TAG_MASK},
+};
+
+
+static struct regfield_info
+	c2h_dbg_pfch_field_info[] = {
+	{"C2H_PFCH_DATA",
+		C2H_PFCH_DATA_MASK},
+};
+
+
+static struct regfield_info
+	c2h_int_dbg_field_info[] = {
+	{"C2H_INT_RSVD_1",
+		C2H_INT_RSVD_1_MASK},
+	{"C2H_INT_INT_COAL_SM",
+		C2H_INT_INT_COAL_SM_MASK},
+	{"C2H_INT_INT_SM",
+		C2H_INT_INT_SM_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_imm_accepted_field_info[] = {
+	{"C2H_STAT_IMM_ACCEPTED_RSVD_1",
+		C2H_STAT_IMM_ACCEPTED_RSVD_1_MASK},
+	{"C2H_STAT_IMM_ACCEPTED_CNT",
+		C2H_STAT_IMM_ACCEPTED_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_marker_accepted_field_info[] = {
+	{"C2H_STAT_MARKER_ACCEPTED_RSVD_1",
+		C2H_STAT_MARKER_ACCEPTED_RSVD_1_MASK},
+	{"C2H_STAT_MARKER_ACCEPTED_CNT",
+		C2H_STAT_MARKER_ACCEPTED_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_disable_cmp_accepted_field_info[] = {
+	{"C2H_STAT_DISABLE_CMP_ACCEPTED_RSVD_1",
+		C2H_STAT_DISABLE_CMP_ACCEPTED_RSVD_1_MASK},
+	{"C2H_STAT_DISABLE_CMP_ACCEPTED_CNT",
+		C2H_STAT_DISABLE_CMP_ACCEPTED_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pld_fifo_crdt_cnt_field_info[] = {
+	{"C2H_PLD_FIFO_CRDT_CNT_RSVD_1",
+		C2H_PLD_FIFO_CRDT_CNT_RSVD_1_MASK},
+	{"C2H_PLD_FIFO_CRDT_CNT_CNT",
+		C2H_PLD_FIFO_CRDT_CNT_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_dyn_req_field_info[] = {
+	{"C2H_INTR_DYN_REQ_RSVD_1",
+		C2H_INTR_DYN_REQ_RSVD_1_MASK},
+	{"C2H_INTR_DYN_REQ_CNT",
+		C2H_INTR_DYN_REQ_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_intr_dyn_misc_field_info[] = {
+	{"C2H_INTR_DYN_MISC_RSVD_1",
+		C2H_INTR_DYN_MISC_RSVD_1_MASK},
+	{"C2H_INTR_DYN_MISC_CNT",
+		C2H_INTR_DYN_MISC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_drop_len_mismatch_field_info[] = {
+	{"C2H_DROP_LEN_MISMATCH_RSVD_1",
+		C2H_DROP_LEN_MISMATCH_RSVD_1_MASK},
+	{"C2H_DROP_LEN_MISMATCH_CNT",
+		C2H_DROP_LEN_MISMATCH_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_drop_desc_rsp_len_field_info[] = {
+	{"C2H_DROP_DESC_RSP_LEN_RSVD_1",
+		C2H_DROP_DESC_RSP_LEN_RSVD_1_MASK},
+	{"C2H_DROP_DESC_RSP_LEN_CNT",
+		C2H_DROP_DESC_RSP_LEN_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_drop_qid_fifo_len_field_info[] = {
+	{"C2H_DROP_QID_FIFO_LEN_RSVD_1",
+		C2H_DROP_QID_FIFO_LEN_RSVD_1_MASK},
+	{"C2H_DROP_QID_FIFO_LEN_CNT",
+		C2H_DROP_QID_FIFO_LEN_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_drop_pld_cnt_field_info[] = {
+	{"C2H_DROP_PLD_CNT_RSVD_1",
+		C2H_DROP_PLD_CNT_RSVD_1_MASK},
+	{"C2H_DROP_PLD_CNT_CNT",
+		C2H_DROP_PLD_CNT_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_0_field_info[] = {
+	{"C2H_CMPT_FORMAT_0_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_0_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_0_COLOR_LOC",
+		C2H_CMPT_FORMAT_0_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_1_field_info[] = {
+	{"C2H_CMPT_FORMAT_1_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_1_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_1_COLOR_LOC",
+		C2H_CMPT_FORMAT_1_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_2_field_info[] = {
+	{"C2H_CMPT_FORMAT_2_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_2_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_2_COLOR_LOC",
+		C2H_CMPT_FORMAT_2_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_3_field_info[] = {
+	{"C2H_CMPT_FORMAT_3_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_3_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_3_COLOR_LOC",
+		C2H_CMPT_FORMAT_3_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_4_field_info[] = {
+	{"C2H_CMPT_FORMAT_4_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_4_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_4_COLOR_LOC",
+		C2H_CMPT_FORMAT_4_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_5_field_info[] = {
+	{"C2H_CMPT_FORMAT_5_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_5_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_5_COLOR_LOC",
+		C2H_CMPT_FORMAT_5_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_cmpt_format_6_field_info[] = {
+	{"C2H_CMPT_FORMAT_6_DESC_ERR_LOC",
+		C2H_CMPT_FORMAT_6_DESC_ERR_LOC_MASK},
+	{"C2H_CMPT_FORMAT_6_COLOR_LOC",
+		C2H_CMPT_FORMAT_6_COLOR_LOC_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_cache_depth_field_info[] = {
+	{"C2H_PFCH_CACHE_DEPTH_MAX_STBUF",
+		C2H_PFCH_CACHE_DEPTH_MAX_STBUF_MASK},
+	{"C2H_PFCH_CACHE_DEPTH",
+		C2H_PFCH_CACHE_DEPTH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_wrb_coal_buf_depth_field_info[] = {
+	{"C2H_WRB_COAL_BUF_DEPTH_RSVD_1",
+		C2H_WRB_COAL_BUF_DEPTH_RSVD_1_MASK},
+	{"C2H_WRB_COAL_BUF_DEPTH_BUFFER",
+		C2H_WRB_COAL_BUF_DEPTH_BUFFER_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_crdt_field_info[] = {
+	{"C2H_PFCH_CRDT_RSVD_1",
+		C2H_PFCH_CRDT_RSVD_1_MASK},
+	{"C2H_PFCH_CRDT_RSVD_2",
+		C2H_PFCH_CRDT_RSVD_2_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_has_cmpt_accepted_field_info[] = {
+	{"C2H_STAT_HAS_CMPT_ACCEPTED_RSVD_1",
+		C2H_STAT_HAS_CMPT_ACCEPTED_RSVD_1_MASK},
+	{"C2H_STAT_HAS_CMPT_ACCEPTED_CNT",
+		C2H_STAT_HAS_CMPT_ACCEPTED_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_stat_has_pld_accepted_field_info[] = {
+	{"C2H_STAT_HAS_PLD_ACCEPTED_RSVD_1",
+		C2H_STAT_HAS_PLD_ACCEPTED_RSVD_1_MASK},
+	{"C2H_STAT_HAS_PLD_ACCEPTED_CNT",
+		C2H_STAT_HAS_PLD_ACCEPTED_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pld_pkt_id_field_info[] = {
+	{"C2H_PLD_PKT_ID_CMPT_WAIT",
+		C2H_PLD_PKT_ID_CMPT_WAIT_MASK},
+	{"C2H_PLD_PKT_ID_DATA",
+		C2H_PLD_PKT_ID_DATA_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pld_pkt_id_1_field_info[] = {
+	{"C2H_PLD_PKT_ID_1_CMPT_WAIT",
+		C2H_PLD_PKT_ID_1_CMPT_WAIT_MASK},
+	{"C2H_PLD_PKT_ID_1_DATA",
+		C2H_PLD_PKT_ID_1_DATA_MASK},
+};
+
+
+static struct regfield_info
+	c2h_drop_pld_cnt_1_field_info[] = {
+	{"C2H_DROP_PLD_CNT_1_RSVD_1",
+		C2H_DROP_PLD_CNT_1_RSVD_1_MASK},
+	{"C2H_DROP_PLD_CNT_1_CNT",
+		C2H_DROP_PLD_CNT_1_CNT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_err_stat_field_info[] = {
+	{"H2C_ERR_STAT_RSVD_1",
+		H2C_ERR_STAT_RSVD_1_MASK},
+	{"H2C_ERR_STAT_PAR_ERR",
+		H2C_ERR_STAT_PAR_ERR_MASK},
+	{"H2C_ERR_STAT_SBE",
+		H2C_ERR_STAT_SBE_MASK},
+	{"H2C_ERR_STAT_DBE",
+		H2C_ERR_STAT_DBE_MASK},
+	{"H2C_ERR_STAT_NO_DMA_DS",
+		H2C_ERR_STAT_NO_DMA_DS_MASK},
+	{"H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR",
+		H2C_ERR_STAT_SDI_MRKR_REQ_MOP_ERR_MASK},
+	{"H2C_ERR_STAT_ZERO_LEN_DS",
+		H2C_ERR_STAT_ZERO_LEN_DS_MASK},
+};
+
+
+static struct regfield_info
+	h2c_err_mask_field_info[] = {
+	{"H2C_ERR_EN",
+		H2C_ERR_EN_MASK},
+};
+
+
+static struct regfield_info
+	h2c_first_err_qid_field_info[] = {
+	{"H2C_FIRST_ERR_QID_RSVD_1",
+		H2C_FIRST_ERR_QID_RSVD_1_MASK},
+	{"H2C_FIRST_ERR_QID_ERR_TYPE",
+		H2C_FIRST_ERR_QID_ERR_TYPE_MASK},
+	{"H2C_FIRST_ERR_QID_RSVD_2",
+		H2C_FIRST_ERR_QID_RSVD_2_MASK},
+	{"H2C_FIRST_ERR_QID_QID",
+		H2C_FIRST_ERR_QID_QID_MASK},
+};
+
+
+static struct regfield_info
+	h2c_dbg_reg0_field_info[] = {
+	{"H2C_REG0_NUM_DSC_RCVD",
+		H2C_REG0_NUM_DSC_RCVD_MASK},
+	{"H2C_REG0_NUM_WRB_SENT",
+		H2C_REG0_NUM_WRB_SENT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_dbg_reg1_field_info[] = {
+	{"H2C_REG1_NUM_REQ_SENT",
+		H2C_REG1_NUM_REQ_SENT_MASK},
+	{"H2C_REG1_NUM_CMP_SENT",
+		H2C_REG1_NUM_CMP_SENT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_dbg_reg2_field_info[] = {
+	{"H2C_REG2_RSVD_1",
+		H2C_REG2_RSVD_1_MASK},
+	{"H2C_REG2_NUM_ERR_DSC_RCVD",
+		H2C_REG2_NUM_ERR_DSC_RCVD_MASK},
+};
+
+
+static struct regfield_info
+	h2c_dbg_reg3_field_info[] = {
+	{"H2C_REG3_RSVD_1",
+		H2C_REG3_RSVD_1_MASK},
+	{"H2C_REG3_DSCO_FIFO_EMPTY",
+		H2C_REG3_DSCO_FIFO_EMPTY_MASK},
+	{"H2C_REG3_DSCO_FIFO_FULL",
+		H2C_REG3_DSCO_FIFO_FULL_MASK},
+	{"H2C_REG3_CUR_RC_STATE",
+		H2C_REG3_CUR_RC_STATE_MASK},
+	{"H2C_REG3_RDREQ_LINES",
+		H2C_REG3_RDREQ_LINES_MASK},
+	{"H2C_REG3_RDATA_LINES_AVAIL",
+		H2C_REG3_RDATA_LINES_AVAIL_MASK},
+	{"H2C_REG3_PEND_FIFO_EMPTY",
+		H2C_REG3_PEND_FIFO_EMPTY_MASK},
+	{"H2C_REG3_PEND_FIFO_FULL",
+		H2C_REG3_PEND_FIFO_FULL_MASK},
+	{"H2C_REG3_CUR_RQ_STATE",
+		H2C_REG3_CUR_RQ_STATE_MASK},
+	{"H2C_REG3_DSCI_FIFO_FULL",
+		H2C_REG3_DSCI_FIFO_FULL_MASK},
+	{"H2C_REG3_DSCI_FIFO_EMPTY",
+		H2C_REG3_DSCI_FIFO_EMPTY_MASK},
+};
+
+
+static struct regfield_info
+	h2c_dbg_reg4_field_info[] = {
+	{"H2C_REG4_RDREQ_ADDR",
+		H2C_REG4_RDREQ_ADDR_MASK},
+};
+
+
+static struct regfield_info
+	h2c_fatal_err_en_field_info[] = {
+	{"H2C_FATAL_ERR_EN_RSVD_1",
+		H2C_FATAL_ERR_EN_RSVD_1_MASK},
+	{"H2C_FATAL_ERR_EN_H2C",
+		H2C_FATAL_ERR_EN_H2C_MASK},
+};
+
+
+static struct regfield_info
+	h2c_req_throt_pcie_field_info[] = {
+	{"H2C_REQ_THROT_PCIE_EN_REQ",
+		H2C_REQ_THROT_PCIE_EN_REQ_MASK},
+	{"H2C_REQ_THROT_PCIE",
+		H2C_REQ_THROT_PCIE_MASK},
+	{"H2C_REQ_THROT_PCIE_EN_DATA",
+		H2C_REQ_THROT_PCIE_EN_DATA_MASK},
+	{"H2C_REQ_THROT_PCIE_DATA_THRESH",
+		H2C_REQ_THROT_PCIE_DATA_THRESH_MASK},
+};
+
+
+static struct regfield_info
+	h2c_aln_dbg_reg0_field_info[] = {
+	{"H2C_ALN_REG0_NUM_PKT_SENT",
+		H2C_ALN_REG0_NUM_PKT_SENT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_req_throt_aximm_field_info[] = {
+	{"H2C_REQ_THROT_AXIMM_EN_REQ",
+		H2C_REQ_THROT_AXIMM_EN_REQ_MASK},
+	{"H2C_REQ_THROT_AXIMM",
+		H2C_REQ_THROT_AXIMM_MASK},
+	{"H2C_REQ_THROT_AXIMM_EN_DATA",
+		H2C_REQ_THROT_AXIMM_EN_DATA_MASK},
+	{"H2C_REQ_THROT_AXIMM_DATA_THRESH",
+		H2C_REQ_THROT_AXIMM_DATA_THRESH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_ctl_field_info[] = {
+	{"C2H_MM_CTL_RESERVED1",
+		C2H_MM_CTL_RESERVED1_MASK},
+	{"C2H_MM_CTL_ERRC_EN",
+		C2H_MM_CTL_ERRC_EN_MASK},
+	{"C2H_MM_CTL_RESERVED0",
+		C2H_MM_CTL_RESERVED0_MASK},
+	{"C2H_MM_CTL_RUN",
+		C2H_MM_CTL_RUN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_status_field_info[] = {
+	{"C2H_MM_STATUS_RSVD_1",
+		C2H_MM_STATUS_RSVD_1_MASK},
+	{"C2H_MM_STATUS_RUN",
+		C2H_MM_STATUS_RUN_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_cmpl_desc_cnt_field_info[] = {
+	{"C2H_MM_CMPL_DESC_CNT_C2H_CO",
+		C2H_MM_CMPL_DESC_CNT_C2H_CO_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_err_code_enable_mask_field_info[] = {
+	{"C2H_MM_ERR_CODE_ENABLE_RESERVED1",
+		C2H_MM_ERR_CODE_ENABLE_RESERVED1_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_WR_UC_RAM",
+		C2H_MM_ERR_CODE_ENABLE_WR_UC_RAM_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_WR_UR",
+		C2H_MM_ERR_CODE_ENABLE_WR_UR_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_WR_FLR",
+		C2H_MM_ERR_CODE_ENABLE_WR_FLR_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_RESERVED0",
+		C2H_MM_ERR_CODE_ENABLE_RESERVED0_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_RD_SLV_ERR",
+		C2H_MM_ERR_CODE_ENABLE_RD_SLV_ERR_MASK},
+	{"C2H_MM_ERR_CODE_ENABLE_WR_SLV_ERR",
+		C2H_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_err_code_field_info[] = {
+	{"C2H_MM_ERR_CODE_RESERVED1",
+		C2H_MM_ERR_CODE_RESERVED1_MASK},
+	{"C2H_MM_ERR_CODE_CIDX",
+		C2H_MM_ERR_CODE_CIDX_MASK},
+	{"C2H_MM_ERR_CODE_RESERVED0",
+		C2H_MM_ERR_CODE_RESERVED0_MASK},
+	{"C2H_MM_ERR_CODE_SUB_TYPE",
+		C2H_MM_ERR_CODE_SUB_TYPE_MASK},
+	{"C2H_MM_ERR_CODE",
+		C2H_MM_ERR_CODE_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_err_info_field_info[] = {
+	{"C2H_MM_ERR_INFO_VALID",
+		C2H_MM_ERR_INFO_VALID_MASK},
+	{"C2H_MM_ERR_INFO_SEL",
+		C2H_MM_ERR_INFO_SEL_MASK},
+	{"C2H_MM_ERR_INFO_RSVD_1",
+		C2H_MM_ERR_INFO_RSVD_1_MASK},
+	{"C2H_MM_ERR_INFO_QID",
+		C2H_MM_ERR_INFO_QID_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_perf_mon_ctl_field_info[] = {
+	{"C2H_MM_PERF_MON_CTL_RSVD_1",
+		C2H_MM_PERF_MON_CTL_RSVD_1_MASK},
+	{"C2H_MM_PERF_MON_CTL_IMM_START",
+		C2H_MM_PERF_MON_CTL_IMM_START_MASK},
+	{"C2H_MM_PERF_MON_CTL_RUN_START",
+		C2H_MM_PERF_MON_CTL_RUN_START_MASK},
+	{"C2H_MM_PERF_MON_CTL_IMM_CLEAR",
+		C2H_MM_PERF_MON_CTL_IMM_CLEAR_MASK},
+	{"C2H_MM_PERF_MON_CTL_RUN_CLEAR",
+		C2H_MM_PERF_MON_CTL_RUN_CLEAR_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_perf_mon_cycle_cnt0_field_info[] = {
+	{"C2H_MM_PERF_MON_CYCLE_CNT0_CYC_CNT",
+		C2H_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_perf_mon_cycle_cnt1_field_info[] = {
+	{"C2H_MM_PERF_MON_CYCLE_CNT1_RSVD_1",
+		C2H_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK},
+	{"C2H_MM_PERF_MON_CYCLE_CNT1_CYC_CNT",
+		C2H_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_perf_mon_data_cnt0_field_info[] = {
+	{"C2H_MM_PERF_MON_DATA_CNT0_DCNT",
+		C2H_MM_PERF_MON_DATA_CNT0_DCNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_perf_mon_data_cnt1_field_info[] = {
+	{"C2H_MM_PERF_MON_DATA_CNT1_RSVD_1",
+		C2H_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK},
+	{"C2H_MM_PERF_MON_DATA_CNT1_DCNT",
+		C2H_MM_PERF_MON_DATA_CNT1_DCNT_MASK},
+};
+
+
+static struct regfield_info
+	c2h_mm_dbg_field_info[] = {
+	{"C2H_MM_RSVD_1",
+		C2H_MM_RSVD_1_MASK},
+	{"C2H_MM_RRQ_ENTRIES",
+		C2H_MM_RRQ_ENTRIES_MASK},
+	{"C2H_MM_DAT_FIFO_SPC",
+		C2H_MM_DAT_FIFO_SPC_MASK},
+	{"C2H_MM_RD_STALL",
+		C2H_MM_RD_STALL_MASK},
+	{"C2H_MM_RRQ_FIFO_FI",
+		C2H_MM_RRQ_FIFO_FI_MASK},
+	{"C2H_MM_WR_STALL",
+		C2H_MM_WR_STALL_MASK},
+	{"C2H_MM_WRQ_FIFO_FI",
+		C2H_MM_WRQ_FIFO_FI_MASK},
+	{"C2H_MM_WBK_STALL",
+		C2H_MM_WBK_STALL_MASK},
+	{"C2H_MM_DSC_FIFO_EP",
+		C2H_MM_DSC_FIFO_EP_MASK},
+	{"C2H_MM_DSC_FIFO_FL",
+		C2H_MM_DSC_FIFO_FL_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_ctl_field_info[] = {
+	{"H2C_MM_CTL_RESERVED1",
+		H2C_MM_CTL_RESERVED1_MASK},
+	{"H2C_MM_CTL_ERRC_EN",
+		H2C_MM_CTL_ERRC_EN_MASK},
+	{"H2C_MM_CTL_RESERVED0",
+		H2C_MM_CTL_RESERVED0_MASK},
+	{"H2C_MM_CTL_RUN",
+		H2C_MM_CTL_RUN_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_status_field_info[] = {
+	{"H2C_MM_STATUS_RSVD_1",
+		H2C_MM_STATUS_RSVD_1_MASK},
+	{"H2C_MM_STATUS_RUN",
+		H2C_MM_STATUS_RUN_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_cmpl_desc_cnt_field_info[] = {
+	{"H2C_MM_CMPL_DESC_CNT_H2C_CO",
+		H2C_MM_CMPL_DESC_CNT_H2C_CO_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_err_code_enable_mask_field_info[] = {
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED5",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED5_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_WR_SLV_ERR",
+		H2C_MM_ERR_CODE_ENABLE_WR_SLV_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_WR_DEC_ERR",
+		H2C_MM_ERR_CODE_ENABLE_WR_DEC_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED4",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED4_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_RQ_DIS_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_RQ_DIS_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED3",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED3_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_DAT_POISON_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_DAT_POISON_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED2",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED2_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_FLR_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_FLR_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED1",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED1_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_HDR_ADR_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_HDR_ADR_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_HDR_PARA",
+		H2C_MM_ERR_CODE_ENABLE_RD_HDR_PARA_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_HDR_BYTE_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_HDR_BYTE_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_UR_CA",
+		H2C_MM_ERR_CODE_ENABLE_RD_UR_CA_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RD_HRD_POISON_ERR",
+		H2C_MM_ERR_CODE_ENABLE_RD_HRD_POISON_ERR_MASK},
+	{"H2C_MM_ERR_CODE_ENABLE_RESERVED0",
+		H2C_MM_ERR_CODE_ENABLE_RESERVED0_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_err_code_field_info[] = {
+	{"H2C_MM_ERR_CODE_RSVD_1",
+		H2C_MM_ERR_CODE_RSVD_1_MASK},
+	{"H2C_MM_ERR_CODE_CIDX",
+		H2C_MM_ERR_CODE_CIDX_MASK},
+	{"H2C_MM_ERR_CODE_RESERVED0",
+		H2C_MM_ERR_CODE_RESERVED0_MASK},
+	{"H2C_MM_ERR_CODE_SUB_TYPE",
+		H2C_MM_ERR_CODE_SUB_TYPE_MASK},
+	{"H2C_MM_ERR_CODE",
+		H2C_MM_ERR_CODE_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_err_info_field_info[] = {
+	{"H2C_MM_ERR_INFO_VALID",
+		H2C_MM_ERR_INFO_VALID_MASK},
+	{"H2C_MM_ERR_INFO_SEL",
+		H2C_MM_ERR_INFO_SEL_MASK},
+	{"H2C_MM_ERR_INFO_RSVD_1",
+		H2C_MM_ERR_INFO_RSVD_1_MASK},
+	{"H2C_MM_ERR_INFO_QID",
+		H2C_MM_ERR_INFO_QID_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_perf_mon_ctl_field_info[] = {
+	{"H2C_MM_PERF_MON_CTL_RSVD_1",
+		H2C_MM_PERF_MON_CTL_RSVD_1_MASK},
+	{"H2C_MM_PERF_MON_CTL_IMM_START",
+		H2C_MM_PERF_MON_CTL_IMM_START_MASK},
+	{"H2C_MM_PERF_MON_CTL_RUN_START",
+		H2C_MM_PERF_MON_CTL_RUN_START_MASK},
+	{"H2C_MM_PERF_MON_CTL_IMM_CLEAR",
+		H2C_MM_PERF_MON_CTL_IMM_CLEAR_MASK},
+	{"H2C_MM_PERF_MON_CTL_RUN_CLEAR",
+		H2C_MM_PERF_MON_CTL_RUN_CLEAR_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_perf_mon_cycle_cnt0_field_info[] = {
+	{"H2C_MM_PERF_MON_CYCLE_CNT0_CYC_CNT",
+		H2C_MM_PERF_MON_CYCLE_CNT0_CYC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_perf_mon_cycle_cnt1_field_info[] = {
+	{"H2C_MM_PERF_MON_CYCLE_CNT1_RSVD_1",
+		H2C_MM_PERF_MON_CYCLE_CNT1_RSVD_1_MASK},
+	{"H2C_MM_PERF_MON_CYCLE_CNT1_CYC_CNT",
+		H2C_MM_PERF_MON_CYCLE_CNT1_CYC_CNT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_perf_mon_data_cnt0_field_info[] = {
+	{"H2C_MM_PERF_MON_DATA_CNT0_DCNT",
+		H2C_MM_PERF_MON_DATA_CNT0_DCNT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_perf_mon_data_cnt1_field_info[] = {
+	{"H2C_MM_PERF_MON_DATA_CNT1_RSVD_1",
+		H2C_MM_PERF_MON_DATA_CNT1_RSVD_1_MASK},
+	{"H2C_MM_PERF_MON_DATA_CNT1_DCNT",
+		H2C_MM_PERF_MON_DATA_CNT1_DCNT_MASK},
+};
+
+
+static struct regfield_info
+	h2c_mm_dbg_field_info[] = {
+	{"H2C_MM_RSVD_1",
+		H2C_MM_RSVD_1_MASK},
+	{"H2C_MM_RRQ_ENTRIES",
+		H2C_MM_RRQ_ENTRIES_MASK},
+	{"H2C_MM_DAT_FIFO_SPC",
+		H2C_MM_DAT_FIFO_SPC_MASK},
+	{"H2C_MM_RD_STALL",
+		H2C_MM_RD_STALL_MASK},
+	{"H2C_MM_RRQ_FIFO_FI",
+		H2C_MM_RRQ_FIFO_FI_MASK},
+	{"H2C_MM_WR_STALL",
+		H2C_MM_WR_STALL_MASK},
+	{"H2C_MM_WRQ_FIFO_FI",
+		H2C_MM_WRQ_FIFO_FI_MASK},
+	{"H2C_MM_WBK_STALL",
+		H2C_MM_WBK_STALL_MASK},
+	{"H2C_MM_DSC_FIFO_EP",
+		H2C_MM_DSC_FIFO_EP_MASK},
+	{"H2C_MM_DSC_FIFO_FL",
+		H2C_MM_DSC_FIFO_FL_MASK},
+};
+
+
+static struct regfield_info
+	c2h_crdt_coal_cfg_1_field_info[] = {
+	{"C2H_CRDT_COAL_CFG_1_RSVD_1",
+		C2H_CRDT_COAL_CFG_1_RSVD_1_MASK},
+	{"C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH",
+		C2H_CRDT_COAL_CFG_1_PLD_FIFO_TH_MASK},
+	{"C2H_CRDT_COAL_CFG_1_TIMER_TH",
+		C2H_CRDT_COAL_CFG_1_TIMER_TH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_crdt_coal_cfg_2_field_info[] = {
+	{"C2H_CRDT_COAL_CFG_2_RSVD_1",
+		C2H_CRDT_COAL_CFG_2_RSVD_1_MASK},
+	{"C2H_CRDT_COAL_CFG_2_FIFO_TH",
+		C2H_CRDT_COAL_CFG_2_FIFO_TH_MASK},
+	{"C2H_CRDT_COAL_CFG_2_RESERVED1",
+		C2H_CRDT_COAL_CFG_2_RESERVED1_MASK},
+	{"C2H_CRDT_COAL_CFG_2_NT_TH",
+		C2H_CRDT_COAL_CFG_2_NT_TH_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_byp_qid_field_info[] = {
+	{"C2H_PFCH_BYP_QID_RSVD_1",
+		C2H_PFCH_BYP_QID_RSVD_1_MASK},
+	{"C2H_PFCH_BYP_QID",
+		C2H_PFCH_BYP_QID_MASK},
+};
+
+
+static struct regfield_info
+	c2h_pfch_byp_tag_field_info[] = {
+	{"C2H_PFCH_BYP_TAG_RSVD_1",
+		C2H_PFCH_BYP_TAG_RSVD_1_MASK},
+	{"C2H_PFCH_BYP_TAG_BYP_QID",
+		C2H_PFCH_BYP_TAG_BYP_QID_MASK},
+	{"C2H_PFCH_BYP_TAG_RSVD_2",
+		C2H_PFCH_BYP_TAG_RSVD_2_MASK},
+	{"C2H_PFCH_BYP_TAG",
+		C2H_PFCH_BYP_TAG_MASK},
+};
+
+
+static struct regfield_info
+	c2h_water_mark_field_info[] = {
+	{"C2H_WATER_MARK_HIGH_WM",
+		C2H_WATER_MARK_HIGH_WM_MASK},
+	{"C2H_WATER_MARK_LOW_WM",
+		C2H_WATER_MARK_LOW_WM_MASK},
+};
+
+static struct xreg_info eqdma_config_regs[] = {
+{"CFG_BLK_IDENTIFIER", 0x00,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_identifier_field_info),
+	cfg_blk_identifier_field_info
+},
+{"CFG_BLK_PCIE_MAX_PLD_SIZE", 0x08,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_pcie_max_pld_size_field_info),
+	cfg_blk_pcie_max_pld_size_field_info
+},
+{"CFG_BLK_PCIE_MAX_READ_REQ_SIZE", 0x0c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_pcie_max_read_req_size_field_info),
+	cfg_blk_pcie_max_read_req_size_field_info
+},
+{"CFG_BLK_SYSTEM_ID", 0x10,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_system_id_field_info),
+	cfg_blk_system_id_field_info
+},
+{"CFG_BLK_MSIX_ENABLE", 0x014,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_msix_enable_field_info),
+	cfg_blk_msix_enable_field_info
+},
+{"CFG_PCIE_DATA_WIDTH", 0x18,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_pcie_data_width_field_info),
+	cfg_pcie_data_width_field_info
+},
+{"CFG_PCIE_CTL", 0x1c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_pcie_ctl_field_info),
+	cfg_pcie_ctl_field_info
+},
+{"CFG_BLK_MSI_ENABLE", 0x20,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_msi_enable_field_info),
+	cfg_blk_msi_enable_field_info
+},
+{"CFG_AXI_USER_MAX_PLD_SIZE", 0x40,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_axi_user_max_pld_size_field_info),
+	cfg_axi_user_max_pld_size_field_info
+},
+{"CFG_AXI_USER_MAX_READ_REQ_SIZE", 0x44,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_axi_user_max_read_req_size_field_info),
+	cfg_axi_user_max_read_req_size_field_info
+},
+{"CFG_BLK_MISC_CTL", 0x4c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_misc_ctl_field_info),
+	cfg_blk_misc_ctl_field_info
+},
+{"CFG_PL_CRED_CTL", 0x68,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_pl_cred_ctl_field_info),
+	cfg_pl_cred_ctl_field_info
+},
+{"CFG_BLK_SCRATCH", 0x80,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_blk_scratch_field_info),
+	cfg_blk_scratch_field_info
+},
+{"CFG_GIC", 0xa0,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(cfg_gic_field_info),
+	cfg_gic_field_info
+},
+{"RAM_SBE_MSK_1_A", 0xe0,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_sbe_msk_1_a_field_info),
+	ram_sbe_msk_1_a_field_info
+},
+{"RAM_SBE_STS_1_A", 0xe4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_sbe_sts_1_a_field_info),
+	ram_sbe_sts_1_a_field_info
+},
+{"RAM_DBE_MSK_1_A", 0xe8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_dbe_msk_1_a_field_info),
+	ram_dbe_msk_1_a_field_info
+},
+{"RAM_DBE_STS_1_A", 0xec,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_dbe_sts_1_a_field_info),
+	ram_dbe_sts_1_a_field_info
+},
+{"RAM_SBE_MSK_A", 0xf0,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_sbe_msk_a_field_info),
+	ram_sbe_msk_a_field_info
+},
+{"RAM_SBE_STS_A", 0xf4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_sbe_sts_a_field_info),
+	ram_sbe_sts_a_field_info
+},
+{"RAM_DBE_MSK_A", 0xf8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_dbe_msk_a_field_info),
+	ram_dbe_msk_a_field_info
+},
+{"RAM_DBE_STS_A", 0xfc,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ram_dbe_sts_a_field_info),
+	ram_dbe_sts_a_field_info
+},
+{"GLBL2_IDENTIFIER", 0x100,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_identifier_field_info),
+	glbl2_identifier_field_info
+},
+{"GLBL2_CHANNEL_INST", 0x114,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_channel_inst_field_info),
+	glbl2_channel_inst_field_info
+},
+{"GLBL2_CHANNEL_MDMA", 0x118,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_channel_mdma_field_info),
+	glbl2_channel_mdma_field_info
+},
+{"GLBL2_CHANNEL_STRM", 0x11c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_channel_strm_field_info),
+	glbl2_channel_strm_field_info
+},
+{"GLBL2_CHANNEL_CAP", 0x120,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_channel_cap_field_info),
+	glbl2_channel_cap_field_info
+},
+{"GLBL2_CHANNEL_PASID_CAP", 0x128,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_channel_pasid_cap_field_info),
+	glbl2_channel_pasid_cap_field_info
+},
+{"GLBL2_SYSTEM_ID", 0x130,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_system_id_field_info),
+	glbl2_system_id_field_info
+},
+{"GLBL2_MISC_CAP", 0x134,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_misc_cap_field_info),
+	glbl2_misc_cap_field_info
+},
+{"GLBL2_DBG_PCIE_RQ0", 0x1b8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_pcie_rq0_field_info),
+	glbl2_dbg_pcie_rq0_field_info
+},
+{"GLBL2_DBG_PCIE_RQ1", 0x1bc,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_pcie_rq1_field_info),
+	glbl2_dbg_pcie_rq1_field_info
+},
+{"GLBL2_DBG_AXIMM_WR0", 0x1c0,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_aximm_wr0_field_info),
+	glbl2_dbg_aximm_wr0_field_info
+},
+{"GLBL2_DBG_AXIMM_WR1", 0x1c4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_aximm_wr1_field_info),
+	glbl2_dbg_aximm_wr1_field_info
+},
+{"GLBL2_DBG_AXIMM_RD0", 0x1c8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_aximm_rd0_field_info),
+	glbl2_dbg_aximm_rd0_field_info
+},
+{"GLBL2_DBG_AXIMM_RD1", 0x1cc,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_aximm_rd1_field_info),
+	glbl2_dbg_aximm_rd1_field_info
+},
+{"GLBL2_DBG_FAB0", 0x1d0,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_fab0_field_info),
+	glbl2_dbg_fab0_field_info
+},
+{"GLBL2_DBG_FAB1", 0x1d4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_fab1_field_info),
+	glbl2_dbg_fab1_field_info
+},
+{"GLBL2_DBG_MATCH_SEL", 0x1f4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_match_sel_field_info),
+	glbl2_dbg_match_sel_field_info
+},
+{"GLBL2_DBG_MATCH_MSK", 0x1f8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_match_msk_field_info),
+	glbl2_dbg_match_msk_field_info
+},
+{"GLBL2_DBG_MATCH_PAT", 0x1fc,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl2_dbg_match_pat_field_info),
+	glbl2_dbg_match_pat_field_info
+},
+{"GLBL_RNG_SZ_1", 0x204,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_1_field_info),
+	glbl_rng_sz_1_field_info
+},
+{"GLBL_RNG_SZ_2", 0x208,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_2_field_info),
+	glbl_rng_sz_2_field_info
+},
+{"GLBL_RNG_SZ_3", 0x20c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_3_field_info),
+	glbl_rng_sz_3_field_info
+},
+{"GLBL_RNG_SZ_4", 0x210,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_4_field_info),
+	glbl_rng_sz_4_field_info
+},
+{"GLBL_RNG_SZ_5", 0x214,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_5_field_info),
+	glbl_rng_sz_5_field_info
+},
+{"GLBL_RNG_SZ_6", 0x218,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_6_field_info),
+	glbl_rng_sz_6_field_info
+},
+{"GLBL_RNG_SZ_7", 0x21c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_7_field_info),
+	glbl_rng_sz_7_field_info
+},
+{"GLBL_RNG_SZ_8", 0x220,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_8_field_info),
+	glbl_rng_sz_8_field_info
+},
+{"GLBL_RNG_SZ_9", 0x224,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_9_field_info),
+	glbl_rng_sz_9_field_info
+},
+{"GLBL_RNG_SZ_A", 0x228,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_a_field_info),
+	glbl_rng_sz_a_field_info
+},
+{"GLBL_RNG_SZ_B", 0x22c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_b_field_info),
+	glbl_rng_sz_b_field_info
+},
+{"GLBL_RNG_SZ_C", 0x230,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_c_field_info),
+	glbl_rng_sz_c_field_info
+},
+{"GLBL_RNG_SZ_D", 0x234,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_d_field_info),
+	glbl_rng_sz_d_field_info
+},
+{"GLBL_RNG_SZ_E", 0x238,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_e_field_info),
+	glbl_rng_sz_e_field_info
+},
+{"GLBL_RNG_SZ_F", 0x23c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_f_field_info),
+	glbl_rng_sz_f_field_info
+},
+{"GLBL_RNG_SZ_10", 0x240,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_rng_sz_10_field_info),
+	glbl_rng_sz_10_field_info
+},
+{"GLBL_ERR_STAT", 0x248,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_err_stat_field_info),
+	glbl_err_stat_field_info
+},
+{"GLBL_ERR_MASK", 0x24c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_err_mask_field_info),
+	glbl_err_mask_field_info
+},
+{"GLBL_DSC_CFG", 0x250,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_cfg_field_info),
+	glbl_dsc_cfg_field_info
+},
+{"GLBL_DSC_ERR_STS", 0x254,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_err_sts_field_info),
+	glbl_dsc_err_sts_field_info
+},
+{"GLBL_DSC_ERR_MSK", 0x258,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_err_msk_field_info),
+	glbl_dsc_err_msk_field_info
+},
+{"GLBL_DSC_ERR_LOG0", 0x25c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_err_log0_field_info),
+	glbl_dsc_err_log0_field_info
+},
+{"GLBL_DSC_ERR_LOG1", 0x260,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_err_log1_field_info),
+	glbl_dsc_err_log1_field_info
+},
+{"GLBL_TRQ_ERR_STS", 0x264,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_trq_err_sts_field_info),
+	glbl_trq_err_sts_field_info
+},
+{"GLBL_TRQ_ERR_MSK", 0x268,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_trq_err_msk_field_info),
+	glbl_trq_err_msk_field_info
+},
+{"GLBL_TRQ_ERR_LOG", 0x26c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_trq_err_log_field_info),
+	glbl_trq_err_log_field_info
+},
+{"GLBL_DSC_DBG_DAT0", 0x270,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_dbg_dat0_field_info),
+	glbl_dsc_dbg_dat0_field_info
+},
+{"GLBL_DSC_DBG_DAT1", 0x274,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_dbg_dat1_field_info),
+	glbl_dsc_dbg_dat1_field_info
+},
+{"GLBL_DSC_DBG_CTL", 0x278,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_dbg_ctl_field_info),
+	glbl_dsc_dbg_ctl_field_info
+},
+{"GLBL_DSC_ERR_LOG2", 0x27c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_dsc_err_log2_field_info),
+	glbl_dsc_err_log2_field_info
+},
+{"GLBL_GLBL_INTERRUPT_CFG", 0x2c4,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_glbl_interrupt_cfg_field_info),
+	glbl_glbl_interrupt_cfg_field_info
+},
+{"GLBL_VCH_HOST_PROFILE", 0x2c8,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_vch_host_profile_field_info),
+	glbl_vch_host_profile_field_info
+},
+{"GLBL_BRIDGE_HOST_PROFILE", 0x308,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_bridge_host_profile_field_info),
+	glbl_bridge_host_profile_field_info
+},
+{"AXIMM_IRQ_DEST_ADDR", 0x30c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(aximm_irq_dest_addr_field_info),
+	aximm_irq_dest_addr_field_info
+},
+{"FAB_ERR_LOG", 0x314,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(fab_err_log_field_info),
+	fab_err_log_field_info
+},
+{"GLBL_REQ_ERR_STS", 0x318,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl_req_err_sts_field_info),
+	glbl_req_err_sts_field_info
+},
+{"GLBL_REQ_ERR_MSK", 0x31c,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(glbl_req_err_msk_field_info),
+	glbl_req_err_msk_field_info
+},
+{"IND_CTXT_DATA", 0x804,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ind_ctxt_data_field_info),
+	ind_ctxt_data_field_info
+},
+{"IND_CTXT_MASK", 0x824,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ind_ctxt_mask_field_info),
+	ind_ctxt_mask_field_info
+},
+{"IND_CTXT_CMD", 0x844,
+	1, 0, 0, 0,
+	0, QDMA_MM_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(ind_ctxt_cmd_field_info),
+	ind_ctxt_cmd_field_info
+},
+{"C2H_TIMER_CNT", 0xa00,
+	1, 0, 0, 0,
+	0, QDMA_COMPLETION_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_timer_cnt_field_info),
+	c2h_timer_cnt_field_info
+},
+{"C2H_CNT_TH", 0xa40,
+	1, 0, 0, 0,
+	0, QDMA_COMPLETION_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cnt_th_field_info),
+	c2h_cnt_th_field_info
+},
+{"C2H_STAT_S_AXIS_C2H_ACCEPTED", 0xa88,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_s_axis_c2h_accepted_field_info),
+	c2h_stat_s_axis_c2h_accepted_field_info
+},
+{"C2H_STAT_S_AXIS_WRB_ACCEPTED", 0xa8c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_s_axis_wrb_accepted_field_info),
+	c2h_stat_s_axis_wrb_accepted_field_info
+},
+{"C2H_STAT_DESC_RSP_PKT_ACCEPTED", 0xa90,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_desc_rsp_pkt_accepted_field_info),
+	c2h_stat_desc_rsp_pkt_accepted_field_info
+},
+{"C2H_STAT_AXIS_PKG_CMP", 0xa94,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_axis_pkg_cmp_field_info),
+	c2h_stat_axis_pkg_cmp_field_info
+},
+{"C2H_STAT_DESC_RSP_ACCEPTED", 0xa98,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_desc_rsp_accepted_field_info),
+	c2h_stat_desc_rsp_accepted_field_info
+},
+{"C2H_STAT_DESC_RSP_CMP", 0xa9c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_desc_rsp_cmp_field_info),
+	c2h_stat_desc_rsp_cmp_field_info
+},
+{"C2H_STAT_WRQ_OUT", 0xaa0,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_wrq_out_field_info),
+	c2h_stat_wrq_out_field_info
+},
+{"C2H_STAT_WPL_REN_ACCEPTED", 0xaa4,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_wpl_ren_accepted_field_info),
+	c2h_stat_wpl_ren_accepted_field_info
+},
+{"C2H_STAT_TOTAL_WRQ_LEN", 0xaa8,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_total_wrq_len_field_info),
+	c2h_stat_total_wrq_len_field_info
+},
+{"C2H_STAT_TOTAL_WPL_LEN", 0xaac,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_total_wpl_len_field_info),
+	c2h_stat_total_wpl_len_field_info
+},
+{"C2H_BUF_SZ", 0xab0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_buf_sz_field_info),
+	c2h_buf_sz_field_info
+},
+{"C2H_ERR_STAT", 0xaf0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_err_stat_field_info),
+	c2h_err_stat_field_info
+},
+{"C2H_ERR_MASK", 0xaf4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_err_mask_field_info),
+	c2h_err_mask_field_info
+},
+{"C2H_FATAL_ERR_STAT", 0xaf8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_fatal_err_stat_field_info),
+	c2h_fatal_err_stat_field_info
+},
+{"C2H_FATAL_ERR_MASK", 0xafc,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_fatal_err_mask_field_info),
+	c2h_fatal_err_mask_field_info
+},
+{"C2H_FATAL_ERR_ENABLE", 0xb00,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_fatal_err_enable_field_info),
+	c2h_fatal_err_enable_field_info
+},
+{"GLBL_ERR_INT", 0xb04,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(glbl_err_int_field_info),
+	glbl_err_int_field_info
+},
+{"C2H_PFCH_CFG", 0xb08,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_cfg_field_info),
+	c2h_pfch_cfg_field_info
+},
+{"C2H_PFCH_CFG_1", 0xa80,
+	1, 0, 0, 0,
+	0, QDMA_COMPLETION_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_cfg_1_field_info),
+	c2h_pfch_cfg_1_field_info
+},
+{"C2H_PFCH_CFG_2", 0xa84,
+	1, 0, 0, 0,
+	0, QDMA_COMPLETION_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_cfg_2_field_info),
+	c2h_pfch_cfg_2_field_info
+},
+{"C2H_INT_TIMER_TICK", 0xb0c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_int_timer_tick_field_info),
+	c2h_int_timer_tick_field_info
+},
+{"C2H_STAT_DESC_RSP_DROP_ACCEPTED", 0xb10,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_desc_rsp_drop_accepted_field_info),
+	c2h_stat_desc_rsp_drop_accepted_field_info
+},
+{"C2H_STAT_DESC_RSP_ERR_ACCEPTED", 0xb14,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_stat_desc_rsp_err_accepted_field_info),
+	c2h_stat_desc_rsp_err_accepted_field_info
+},
+{"C2H_STAT_DESC_REQ", 0xb18,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_desc_req_field_info),
+	c2h_stat_desc_req_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_0", 0xb1c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_0_field_info),
+	c2h_stat_dbg_dma_eng_0_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_1", 0xb20,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_1_field_info),
+	c2h_stat_dbg_dma_eng_1_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_2", 0xb24,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_2_field_info),
+	c2h_stat_dbg_dma_eng_2_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_3", 0xb28,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_3_field_info),
+	c2h_stat_dbg_dma_eng_3_field_info
+},
+{"C2H_DBG_PFCH_ERR_CTXT", 0xb2c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_dbg_pfch_err_ctxt_field_info),
+	c2h_dbg_pfch_err_ctxt_field_info
+},
+{"C2H_FIRST_ERR_QID", 0xb30,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_first_err_qid_field_info),
+	c2h_first_err_qid_field_info
+},
+{"STAT_NUM_WRB_IN", 0xb34,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_wrb_in_field_info),
+	stat_num_wrb_in_field_info
+},
+{"STAT_NUM_WRB_OUT", 0xb38,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_wrb_out_field_info),
+	stat_num_wrb_out_field_info
+},
+{"STAT_NUM_WRB_DRP", 0xb3c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_wrb_drp_field_info),
+	stat_num_wrb_drp_field_info
+},
+{"STAT_NUM_STAT_DESC_OUT", 0xb40,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_stat_desc_out_field_info),
+	stat_num_stat_desc_out_field_info
+},
+{"STAT_NUM_DSC_CRDT_SENT", 0xb44,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_dsc_crdt_sent_field_info),
+	stat_num_dsc_crdt_sent_field_info
+},
+{"STAT_NUM_FCH_DSC_RCVD", 0xb48,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_fch_dsc_rcvd_field_info),
+	stat_num_fch_dsc_rcvd_field_info
+},
+{"STAT_NUM_BYP_DSC_RCVD", 0xb4c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(stat_num_byp_dsc_rcvd_field_info),
+	stat_num_byp_dsc_rcvd_field_info
+},
+{"C2H_WRB_COAL_CFG", 0xb50,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_wrb_coal_cfg_field_info),
+	c2h_wrb_coal_cfg_field_info
+},
+{"C2H_INTR_H2C_REQ", 0xb54,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_h2c_req_field_info),
+	c2h_intr_h2c_req_field_info
+},
+{"C2H_INTR_C2H_MM_REQ", 0xb58,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_c2h_mm_req_field_info),
+	c2h_intr_c2h_mm_req_field_info
+},
+{"C2H_INTR_ERR_INT_REQ", 0xb5c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_err_int_req_field_info),
+	c2h_intr_err_int_req_field_info
+},
+{"C2H_INTR_C2H_ST_REQ", 0xb60,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_c2h_st_req_field_info),
+	c2h_intr_c2h_st_req_field_info
+},
+{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_ACK", 0xb64,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_h2c_err_c2h_mm_msix_ack_field_info),
+	c2h_intr_h2c_err_c2h_mm_msix_ack_field_info
+},
+{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_FAIL", 0xb68,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_h2c_err_c2h_mm_msix_fail_field_info),
+	c2h_intr_h2c_err_c2h_mm_msix_fail_field_info
+},
+{"C2H_INTR_H2C_ERR_C2H_MM_MSIX_NO_MSIX", 0xb6c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_h2c_err_c2h_mm_msix_no_msix_field_info),
+	c2h_intr_h2c_err_c2h_mm_msix_no_msix_field_info
+},
+{"C2H_INTR_H2C_ERR_C2H_MM_CTXT_INVAL", 0xb70,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_h2c_err_c2h_mm_ctxt_inval_field_info),
+	c2h_intr_h2c_err_c2h_mm_ctxt_inval_field_info
+},
+{"C2H_INTR_C2H_ST_MSIX_ACK", 0xb74,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_c2h_st_msix_ack_field_info),
+	c2h_intr_c2h_st_msix_ack_field_info
+},
+{"C2H_INTR_C2H_ST_MSIX_FAIL", 0xb78,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(c2h_intr_c2h_st_msix_fail_field_info),
+	c2h_intr_c2h_st_msix_fail_field_info
+},
+{"C2H_INTR_C2H_ST_NO_MSIX", 0xb7c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_c2h_st_no_msix_field_info),
+	c2h_intr_c2h_st_no_msix_field_info
+},
+{"C2H_INTR_C2H_ST_CTXT_INVAL", 0xb80,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_c2h_st_ctxt_inval_field_info),
+	c2h_intr_c2h_st_ctxt_inval_field_info
+},
+{"C2H_STAT_WR_CMP", 0xb84,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_wr_cmp_field_info),
+	c2h_stat_wr_cmp_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_4", 0xb88,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_4_field_info),
+	c2h_stat_dbg_dma_eng_4_field_info
+},
+{"C2H_STAT_DBG_DMA_ENG_5", 0xb8c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_dbg_dma_eng_5_field_info),
+	c2h_stat_dbg_dma_eng_5_field_info
+},
+{"C2H_DBG_PFCH_QID", 0xb90,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_dbg_pfch_qid_field_info),
+	c2h_dbg_pfch_qid_field_info
+},
+{"C2H_DBG_PFCH", 0xb94,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_dbg_pfch_field_info),
+	c2h_dbg_pfch_field_info
+},
+{"C2H_INT_DBG", 0xb98,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_int_dbg_field_info),
+	c2h_int_dbg_field_info
+},
+{"C2H_STAT_IMM_ACCEPTED", 0xb9c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_imm_accepted_field_info),
+	c2h_stat_imm_accepted_field_info
+},
+{"C2H_STAT_MARKER_ACCEPTED", 0xba0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_marker_accepted_field_info),
+	c2h_stat_marker_accepted_field_info
+},
+{"C2H_STAT_DISABLE_CMP_ACCEPTED", 0xba4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_disable_cmp_accepted_field_info),
+	c2h_stat_disable_cmp_accepted_field_info
+},
+{"C2H_PLD_FIFO_CRDT_CNT", 0xba8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pld_fifo_crdt_cnt_field_info),
+	c2h_pld_fifo_crdt_cnt_field_info
+},
+{"C2H_INTR_DYN_REQ", 0xbac,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_dyn_req_field_info),
+	c2h_intr_dyn_req_field_info
+},
+{"C2H_INTR_DYN_MISC", 0xbb0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_intr_dyn_misc_field_info),
+	c2h_intr_dyn_misc_field_info
+},
+{"C2H_DROP_LEN_MISMATCH", 0xbb4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_drop_len_mismatch_field_info),
+	c2h_drop_len_mismatch_field_info
+},
+{"C2H_DROP_DESC_RSP_LEN", 0xbb8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_drop_desc_rsp_len_field_info),
+	c2h_drop_desc_rsp_len_field_info
+},
+{"C2H_DROP_QID_FIFO_LEN", 0xbbc,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_drop_qid_fifo_len_field_info),
+	c2h_drop_qid_fifo_len_field_info
+},
+{"C2H_DROP_PLD_CNT", 0xbc0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_drop_pld_cnt_field_info),
+	c2h_drop_pld_cnt_field_info
+},
+{"C2H_CMPT_FORMAT_0", 0xbc4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_0_field_info),
+	c2h_cmpt_format_0_field_info
+},
+{"C2H_CMPT_FORMAT_1", 0xbc8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_1_field_info),
+	c2h_cmpt_format_1_field_info
+},
+{"C2H_CMPT_FORMAT_2", 0xbcc,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_2_field_info),
+	c2h_cmpt_format_2_field_info
+},
+{"C2H_CMPT_FORMAT_3", 0xbd0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_3_field_info),
+	c2h_cmpt_format_3_field_info
+},
+{"C2H_CMPT_FORMAT_4", 0xbd4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_4_field_info),
+	c2h_cmpt_format_4_field_info
+},
+{"C2H_CMPT_FORMAT_5", 0xbd8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_5_field_info),
+	c2h_cmpt_format_5_field_info
+},
+{"C2H_CMPT_FORMAT_6", 0xbdc,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_cmpt_format_6_field_info),
+	c2h_cmpt_format_6_field_info
+},
+{"C2H_PFCH_CACHE_DEPTH", 0xbe0,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_cache_depth_field_info),
+	c2h_pfch_cache_depth_field_info
+},
+{"C2H_WRB_COAL_BUF_DEPTH", 0xbe4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_wrb_coal_buf_depth_field_info),
+	c2h_wrb_coal_buf_depth_field_info
+},
+{"C2H_PFCH_CRDT", 0xbe8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_crdt_field_info),
+	c2h_pfch_crdt_field_info
+},
+{"C2H_STAT_HAS_CMPT_ACCEPTED", 0xbec,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_has_cmpt_accepted_field_info),
+	c2h_stat_has_cmpt_accepted_field_info
+},
+{"C2H_STAT_HAS_PLD_ACCEPTED", 0xbf0,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_stat_has_pld_accepted_field_info),
+	c2h_stat_has_pld_accepted_field_info
+},
+{"C2H_PLD_PKT_ID", 0xbf4,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pld_pkt_id_field_info),
+	c2h_pld_pkt_id_field_info
+},
+{"C2H_PLD_PKT_ID_1", 0xbf8,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pld_pkt_id_1_field_info),
+	c2h_pld_pkt_id_1_field_info
+},
+{"C2H_DROP_PLD_CNT_1", 0xbfc,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_drop_pld_cnt_1_field_info),
+	c2h_drop_pld_cnt_1_field_info
+},
+{"H2C_ERR_STAT", 0xe00,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(h2c_err_stat_field_info),
+	h2c_err_stat_field_info
+},
+{"H2C_ERR_MASK", 0xe04,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(h2c_err_mask_field_info),
+	h2c_err_mask_field_info
+},
+{"H2C_FIRST_ERR_QID", 0xe08,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(h2c_first_err_qid_field_info),
+	h2c_first_err_qid_field_info
+},
+{"H2C_DBG_REG0", 0xe0c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_dbg_reg0_field_info),
+	h2c_dbg_reg0_field_info
+},
+{"H2C_DBG_REG1", 0xe10,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_dbg_reg1_field_info),
+	h2c_dbg_reg1_field_info
+},
+{"H2C_DBG_REG2", 0xe14,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_dbg_reg2_field_info),
+	h2c_dbg_reg2_field_info
+},
+{"H2C_DBG_REG3", 0xe18,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_dbg_reg3_field_info),
+	h2c_dbg_reg3_field_info
+},
+{"H2C_DBG_REG4", 0xe1c,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_dbg_reg4_field_info),
+	h2c_dbg_reg4_field_info
+},
+{"H2C_FATAL_ERR_EN", 0xe20,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_VF,
+	ARRAY_SIZE(h2c_fatal_err_en_field_info),
+	h2c_fatal_err_en_field_info
+},
+{"H2C_REQ_THROT_PCIE", 0xe24,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_req_throt_pcie_field_info),
+	h2c_req_throt_pcie_field_info
+},
+{"H2C_ALN_DBG_REG0", 0xe28,
+	1, 0, 0, 0,
+	1, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_aln_dbg_reg0_field_info),
+	h2c_aln_dbg_reg0_field_info
+},
+{"H2C_REQ_THROT_AXIMM", 0xe2c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_req_throt_aximm_field_info),
+	h2c_req_throt_aximm_field_info
+},
+{"C2H_MM_CTL", 0x1004,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_ctl_field_info),
+	c2h_mm_ctl_field_info
+},
+{"C2H_MM_STATUS", 0x1040,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_status_field_info),
+	c2h_mm_status_field_info
+},
+{"C2H_MM_CMPL_DESC_CNT", 0x1048,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_cmpl_desc_cnt_field_info),
+	c2h_mm_cmpl_desc_cnt_field_info
+},
+{"C2H_MM_ERR_CODE_ENABLE_MASK", 0x1054,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_err_code_enable_mask_field_info),
+	c2h_mm_err_code_enable_mask_field_info
+},
+{"C2H_MM_ERR_CODE", 0x1058,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_err_code_field_info),
+	c2h_mm_err_code_field_info
+},
+{"C2H_MM_ERR_INFO", 0x105c,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_err_info_field_info),
+	c2h_mm_err_info_field_info
+},
+{"C2H_MM_PERF_MON_CTL", 0x10c0,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_perf_mon_ctl_field_info),
+	c2h_mm_perf_mon_ctl_field_info
+},
+{"C2H_MM_PERF_MON_CYCLE_CNT0", 0x10c4,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_perf_mon_cycle_cnt0_field_info),
+	c2h_mm_perf_mon_cycle_cnt0_field_info
+},
+{"C2H_MM_PERF_MON_CYCLE_CNT1", 0x10c8,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_perf_mon_cycle_cnt1_field_info),
+	c2h_mm_perf_mon_cycle_cnt1_field_info
+},
+{"C2H_MM_PERF_MON_DATA_CNT0", 0x10cc,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_perf_mon_data_cnt0_field_info),
+	c2h_mm_perf_mon_data_cnt0_field_info
+},
+{"C2H_MM_PERF_MON_DATA_CNT1", 0x10d0,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_perf_mon_data_cnt1_field_info),
+	c2h_mm_perf_mon_data_cnt1_field_info
+},
+{"C2H_MM_DBG", 0x10e8,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_mm_dbg_field_info),
+	c2h_mm_dbg_field_info
+},
+{"H2C_MM_CTL", 0x1204,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_ctl_field_info),
+	h2c_mm_ctl_field_info
+},
+{"H2C_MM_STATUS", 0x1240,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_status_field_info),
+	h2c_mm_status_field_info
+},
+{"H2C_MM_CMPL_DESC_CNT", 0x1248,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_cmpl_desc_cnt_field_info),
+	h2c_mm_cmpl_desc_cnt_field_info
+},
+{"H2C_MM_ERR_CODE_ENABLE_MASK", 0x1254,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_err_code_enable_mask_field_info),
+	h2c_mm_err_code_enable_mask_field_info
+},
+{"H2C_MM_ERR_CODE", 0x1258,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_err_code_field_info),
+	h2c_mm_err_code_field_info
+},
+{"H2C_MM_ERR_INFO", 0x125c,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_err_info_field_info),
+	h2c_mm_err_info_field_info
+},
+{"H2C_MM_PERF_MON_CTL", 0x12c0,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_perf_mon_ctl_field_info),
+	h2c_mm_perf_mon_ctl_field_info
+},
+{"H2C_MM_PERF_MON_CYCLE_CNT0", 0x12c4,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_perf_mon_cycle_cnt0_field_info),
+	h2c_mm_perf_mon_cycle_cnt0_field_info
+},
+{"H2C_MM_PERF_MON_CYCLE_CNT1", 0x12c8,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_perf_mon_cycle_cnt1_field_info),
+	h2c_mm_perf_mon_cycle_cnt1_field_info
+},
+{"H2C_MM_PERF_MON_DATA_CNT0", 0x12cc,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_perf_mon_data_cnt0_field_info),
+	h2c_mm_perf_mon_data_cnt0_field_info
+},
+{"H2C_MM_PERF_MON_DATA_CNT1", 0x12d0,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_perf_mon_data_cnt1_field_info),
+	h2c_mm_perf_mon_data_cnt1_field_info
+},
+{"H2C_MM_DBG", 0x12e8,
+	1, 0, 0, 0,
+	0, QDMA_MM_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(h2c_mm_dbg_field_info),
+	h2c_mm_dbg_field_info
+},
+{"C2H_CRDT_COAL_CFG_1", 0x1400,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_crdt_coal_cfg_1_field_info),
+	c2h_crdt_coal_cfg_1_field_info
+},
+{"C2H_CRDT_COAL_CFG_2", 0x1404,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_crdt_coal_cfg_2_field_info),
+	c2h_crdt_coal_cfg_2_field_info
+},
+{"C2H_PFCH_BYP_QID", 0x1408,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_byp_qid_field_info),
+	c2h_pfch_byp_qid_field_info
+},
+{"C2H_PFCH_BYP_TAG", 0x140c,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_pfch_byp_tag_field_info),
+	c2h_pfch_byp_tag_field_info
+},
+{"C2H_WATER_MARK", 0x1500,
+	1, 0, 0, 0,
+	0, QDMA_ST_MODE, QDMA_REG_READ_PF_ONLY,
+	ARRAY_SIZE(c2h_water_mark_field_info),
+	c2h_water_mark_field_info
+},
+
+};
+
+uint32_t eqdma_config_num_regs_get(void)
+{
+	return (sizeof(eqdma_config_regs) /
+		sizeof(eqdma_config_regs[0]));
+}
+
+struct xreg_info *eqdma_config_regs_get(void)
+{
+	return eqdma_config_regs;
+}
diff --git a/drivers/net/qdma/qdma_access/qdma_access_common.c b/drivers/net/qdma/qdma_access/qdma_access_common.c
new file mode 100644
index 0000000000..a86ef14651
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_access_common.c
@@ -0,0 +1,1271 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "qdma_access_common.h"
+#include "qdma_platform.h"
+#include "qdma_soft_reg.h"
+#include "qdma_soft_access.h"
+#include "qdma_s80_hard_access.h"
+#include "eqdma_soft_access.h"
+#include "qdma_reg_dump.h"
+
+#ifdef ENABLE_WPP_TRACING
+#include "qdma_access_common.tmh"
+#endif
+
+/* qdma version info */
+#define RTL_BASE_VERSION                        2
+#define RTL_PATCH_VERSION                       3
+
+/**
+ * enum qdma_ip - To hold ip type
+ */
+enum qdma_ip {
+	QDMA_OR_VERSAL_IP,
+	EQDMA_IP
+};
+
+
+/*
+ * hw_monitor_reg() - polling a register repeatly until
+ *	(the register value & mask) == val or time is up
+ *
+ * return -QDMA_BUSY_IIMEOUT_ERR if register value didn't match, 0 other wise
+ */
+int hw_monitor_reg(void *dev_hndl, uint32_t reg, uint32_t mask,
+		uint32_t val, uint32_t interval_us, uint32_t timeout_us)
+{
+	int count;
+	uint32_t v;
+
+	if (!interval_us)
+		interval_us = QDMA_REG_POLL_DFLT_INTERVAL_US;
+	if (!timeout_us)
+		timeout_us = QDMA_REG_POLL_DFLT_TIMEOUT_US;
+
+	count = timeout_us / interval_us;
+
+	do {
+		v = qdma_reg_read(dev_hndl, reg);
+		if ((v & mask) == val)
+			return QDMA_SUCCESS;
+		qdma_udelay(interval_us);
+	} while (--count);
+
+	v = qdma_reg_read(dev_hndl, reg);
+	if ((v & mask) == val)
+		return QDMA_SUCCESS;
+
+	qdma_log_error("%s: Reg read=%u Expected=%u, err:%d\n",
+				   __func__, v, val,
+				   -QDMA_ERR_HWACC_BUSY_TIMEOUT);
+	return -QDMA_ERR_HWACC_BUSY_TIMEOUT;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_rtl_version() - Function to get the rtl_version in
+ * string format
+ *
+ * @rtl_version: Vivado release ID
+ *
+ * Return: string - success and NULL on failure
+ *****************************************************************************/
+static const char *qdma_get_rtl_version(enum qdma_rtl_version rtl_version)
+{
+	switch (rtl_version) {
+	case QDMA_RTL_PATCH:
+		return "RTL Patch";
+	case QDMA_RTL_BASE:
+		return "RTL Base";
+	default:
+		qdma_log_error("%s: invalid rtl_version(%d), err:%d\n",
+				__func__, rtl_version, -QDMA_ERR_INV_PARAM);
+		return NULL;
+	}
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_ip_type() - Function to get the ip type in string format
+ *
+ * @ip_type: IP Type
+ *
+ * Return: string - success and NULL on failure
+ *****************************************************************************/
+static const char *qdma_get_ip_type(enum qdma_ip_type ip_type)
+{
+	switch (ip_type) {
+	case QDMA_VERSAL_HARD_IP:
+		return "Versal Hard IP";
+	case QDMA_VERSAL_SOFT_IP:
+		return "Versal Soft IP";
+	case QDMA_SOFT_IP:
+		return "QDMA Soft IP";
+	case EQDMA_SOFT_IP:
+		return "EQDMA Soft IP";
+	default:
+		qdma_log_error("%s: invalid ip type(%d), err:%d\n",
+				__func__, ip_type, -QDMA_ERR_INV_PARAM);
+		return NULL;
+	}
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_device_type() - Function to get the device type in
+ * string format
+ *
+ * @device_type: Device Type
+ *
+ * Return: string - success and NULL on failure
+ *****************************************************************************/
+static const char *qdma_get_device_type(enum qdma_device_type device_type)
+{
+	switch (device_type) {
+	case QDMA_DEVICE_SOFT:
+		return "Soft IP";
+	case QDMA_DEVICE_VERSAL:
+		return "Versal S80 Hard IP";
+	default:
+		qdma_log_error("%s: invalid device type(%d), err:%d\n",
+				__func__, device_type, -QDMA_ERR_INV_PARAM);
+		return NULL;
+	}
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_vivado_release_id() - Function to get the vivado release id in
+ * string format
+ *
+ * @vivado_release_id: Vivado release ID
+ *
+ * Return: string - success and NULL on failure
+ *****************************************************************************/
+static const char *qdma_get_vivado_release_id
+				(enum qdma_vivado_release_id vivado_release_id)
+{
+	switch (vivado_release_id) {
+	case QDMA_VIVADO_2018_3:
+		return "vivado 2018.3";
+	case QDMA_VIVADO_2019_1:
+		return "vivado 2019.1";
+	case QDMA_VIVADO_2019_2:
+		return "vivado 2019.2";
+	case QDMA_VIVADO_2020_1:
+		return "vivado 2020.1";
+	case QDMA_VIVADO_2020_2:
+		return "vivado 2020.2";
+	default:
+		qdma_log_error("%s: invalid vivado_release_id(%d), err:%d\n",
+				__func__,
+				vivado_release_id,
+				-QDMA_ERR_INV_PARAM);
+		return NULL;
+	}
+}
+
+
+void qdma_write_csr_values(void *dev_hndl, uint32_t reg_offst,
+		uint32_t idx, uint32_t cnt, const uint32_t *values)
+{
+	uint32_t index, reg_addr;
+
+	for (index = idx; index < (idx + cnt); index++) {
+		reg_addr = reg_offst + (index * sizeof(uint32_t));
+		qdma_reg_write(dev_hndl, reg_addr, values[index - idx]);
+	}
+}
+
+void qdma_read_csr_values(void *dev_hndl, uint32_t reg_offst,
+		uint32_t idx, uint32_t cnt, uint32_t *values)
+{
+	uint32_t index, reg_addr;
+
+	reg_addr = reg_offst + (idx * sizeof(uint32_t));
+	for (index = 0; index < cnt; index++) {
+		values[index] = qdma_reg_read(dev_hndl, reg_addr +
+					      (index * sizeof(uint32_t)));
+	}
+}
+
+void qdma_fetch_version_details(uint8_t is_vf, uint32_t version_reg_val,
+		struct qdma_hw_version_info *version_info)
+{
+	uint32_t rtl_version, vivado_release_id, ip_type, device_type;
+	const char *version_str;
+
+	if (!is_vf) {
+		rtl_version = FIELD_GET(QDMA_GLBL2_RTL_VERSION_MASK,
+				version_reg_val);
+		vivado_release_id =
+			FIELD_GET(QDMA_GLBL2_VIVADO_RELEASE_MASK,
+					version_reg_val);
+		device_type = FIELD_GET(QDMA_GLBL2_DEVICE_ID_MASK,
+				version_reg_val);
+		ip_type = FIELD_GET(QDMA_GLBL2_VERSAL_IP_MASK,
+				version_reg_val);
+	} else {
+		rtl_version =
+			FIELD_GET(QDMA_GLBL2_VF_RTL_VERSION_MASK,
+					version_reg_val);
+		vivado_release_id =
+			FIELD_GET(QDMA_GLBL2_VF_VIVADO_RELEASE_MASK,
+					version_reg_val);
+		device_type = FIELD_GET(QDMA_GLBL2_VF_DEVICE_ID_MASK,
+				version_reg_val);
+		ip_type =
+			FIELD_GET(QDMA_GLBL2_VF_VERSAL_IP_MASK,
+					version_reg_val);
+	}
+
+	switch (rtl_version) {
+	case 0:
+		version_info->rtl_version = QDMA_RTL_BASE;
+		break;
+	case 1:
+		version_info->rtl_version = QDMA_RTL_PATCH;
+		break;
+	default:
+		version_info->rtl_version = QDMA_RTL_NONE;
+		break;
+	}
+
+	version_str = qdma_get_rtl_version(version_info->rtl_version);
+	if (version_str != NULL)
+		qdma_strncpy(version_info->qdma_rtl_version_str,
+				version_str,
+				QDMA_HW_VERSION_STRING_LEN);
+
+	switch (device_type) {
+	case 0:
+		version_info->device_type = QDMA_DEVICE_SOFT;
+		break;
+	case 1:
+		version_info->device_type = QDMA_DEVICE_VERSAL;
+		break;
+	default:
+		version_info->device_type = QDMA_DEVICE_NONE;
+		break;
+	}
+
+	version_str = qdma_get_device_type(version_info->device_type);
+	if (version_str != NULL)
+		qdma_strncpy(version_info->qdma_device_type_str,
+				version_str,
+				QDMA_HW_VERSION_STRING_LEN);
+
+
+	if (version_info->device_type == QDMA_DEVICE_SOFT) {
+		switch (ip_type) {
+		case 0:
+			version_info->ip_type = QDMA_SOFT_IP;
+			break;
+		case 1:
+			version_info->ip_type = EQDMA_SOFT_IP;
+			break;
+		default:
+			version_info->ip_type = QDMA_NONE_IP;
+		}
+	} else {
+		switch (ip_type) {
+		case 0:
+			version_info->ip_type = QDMA_VERSAL_HARD_IP;
+			break;
+		case 1:
+			version_info->ip_type = QDMA_VERSAL_SOFT_IP;
+			break;
+		default:
+			version_info->ip_type = QDMA_NONE_IP;
+		}
+	}
+
+	version_str = qdma_get_ip_type(version_info->ip_type);
+	if (version_str != NULL)
+		qdma_strncpy(version_info->qdma_ip_type_str,
+			version_str,
+			QDMA_HW_VERSION_STRING_LEN);
+
+	if (version_info->ip_type == QDMA_SOFT_IP) {
+		switch (vivado_release_id) {
+		case 0:
+			version_info->vivado_release = QDMA_VIVADO_2018_3;
+			break;
+		case 1:
+			version_info->vivado_release = QDMA_VIVADO_2019_1;
+			break;
+		case 2:
+			version_info->vivado_release = QDMA_VIVADO_2019_2;
+			break;
+		default:
+			version_info->vivado_release = QDMA_VIVADO_NONE;
+			break;
+		}
+	} else if (version_info->ip_type == EQDMA_SOFT_IP) {
+		switch (vivado_release_id) {
+		case 0:
+			version_info->vivado_release = QDMA_VIVADO_2020_1;
+			break;
+		case 1:
+			version_info->vivado_release = QDMA_VIVADO_2020_2;
+			break;
+		default:
+			version_info->vivado_release = QDMA_VIVADO_NONE;
+			break;
+		}
+	} else { /* Versal case */
+		switch (vivado_release_id) {
+		case 0:
+			version_info->vivado_release = QDMA_VIVADO_2019_2;
+			break;
+		default:
+			version_info->vivado_release = QDMA_VIVADO_NONE;
+			break;
+		}
+	}
+
+	version_str = qdma_get_vivado_release_id
+			(version_info->vivado_release);
+	if (version_str != NULL)
+		qdma_strncpy(version_info->qdma_vivado_release_id_str,
+				version_str,
+				QDMA_HW_VERSION_STRING_LEN);
+}
+
+
+/*
+ * dump_reg() - Helper function to dump register value into string
+ *
+ * return len - length of the string copied into buffer
+ */
+int dump_reg(char *buf, int buf_sz, uint32_t raddr,
+		const char *rname, uint32_t rval)
+{
+	/* length of the line should be minimum 80 chars.
+	 * If below print pattern is changed, check for
+	 * new buffer size requirement
+	 */
+	if (buf_sz < DEBGFS_LINE_SZ) {
+		qdma_log_error("%s: buf_sz(%d) < expected(%d): err: %d\n",
+						__func__,
+						buf_sz, DEBGFS_LINE_SZ,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return QDMA_SNPRINTF_S(buf, buf_sz, DEBGFS_LINE_SZ,
+			"[%#7x] %-47s %#-10x %u\n",
+			raddr, rname, rval, rval);
+}
+
+void qdma_memset(void *to, uint8_t val, uint32_t size)
+{
+	uint32_t i;
+	uint8_t *_to = (uint8_t *)to;
+
+	for (i = 0; i < size; i++)
+		_to[i] = val;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_queue_cmpt_cidx_read() - function to read the CMPT CIDX register
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ * @qid:	Queue id relative to the PF/VF calling this API
+ * @reg_info:	pointer to array to hold the values read
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_queue_cmpt_cidx_read(void *dev_hndl, uint8_t is_vf,
+		uint16_t qid, struct qdma_q_cmpt_cidx_reg_info *reg_info)
+{
+	uint32_t reg_val = 0;
+	uint32_t reg_addr = (is_vf) ? QDMA_OFFSET_VF_DMAP_SEL_CMPT_CIDX :
+			QDMA_OFFSET_DMAP_SEL_CMPT_CIDX;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	if (!reg_info) {
+		qdma_log_error("%s: reg_info is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+
+	reg_addr += qid * QDMA_CMPT_CIDX_STEP;
+
+	reg_val = qdma_reg_read(dev_hndl, reg_addr);
+
+	reg_info->wrb_cidx =
+		FIELD_GET(QDMA_DMAP_SEL_CMPT_WRB_CIDX_MASK, reg_val);
+	reg_info->counter_idx =
+		(uint8_t)(FIELD_GET(QDMA_DMAP_SEL_CMPT_CNT_THRESH_MASK,
+			reg_val));
+	reg_info->wrb_en =
+		(uint8_t)(FIELD_GET(QDMA_DMAP_SEL_CMPT_STS_DESC_EN_MASK,
+			reg_val));
+	reg_info->irq_en =
+		(uint8_t)(FIELD_GET(QDMA_DMAP_SEL_CMPT_IRQ_EN_MASK, reg_val));
+	reg_info->timer_idx =
+		(uint8_t)(FIELD_GET(QDMA_DMAP_SEL_CMPT_TMR_CNT_MASK, reg_val));
+	reg_info->trig_mode =
+		(uint8_t)(FIELD_GET(QDMA_DMAP_SEL_CMPT_TRG_MODE_MASK, reg_val));
+
+	return QDMA_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+ * qdma_initiate_flr() - function to initiate Function Level Reset
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_initiate_flr(void *dev_hndl, uint8_t is_vf)
+{
+	uint32_t reg_addr = (is_vf) ?  QDMA_OFFSET_VF_REG_FLR_STATUS :
+			QDMA_OFFSET_PF_REG_FLR_STATUS;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_reg_write(dev_hndl, reg_addr, 1);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_is_flr_done() - function to check whether the FLR is done or not
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ * @done:	if FLR process completed ,  done is 1 else 0.
+ *
+ * Return:   0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_is_flr_done(void *dev_hndl, uint8_t is_vf, uint8_t *done)
+{
+	int rv;
+	uint32_t reg_addr = (is_vf) ?  QDMA_OFFSET_VF_REG_FLR_STATUS :
+			QDMA_OFFSET_PF_REG_FLR_STATUS;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	if (!done) {
+		qdma_log_error("%s: done is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	/* wait for it to become zero */
+	rv = hw_monitor_reg(dev_hndl, reg_addr, QDMA_FLR_STATUS_MASK,
+			0, 5 * QDMA_REG_POLL_DFLT_INTERVAL_US,
+			QDMA_REG_POLL_DFLT_TIMEOUT_US);
+	if (rv < 0)
+		*done = 0;
+	else
+		*done = 1;
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_is_config_bar() - function for the config bar verification
+ *
+ * @dev_hndl:	device handle
+ * @is_vf:	Whether PF or VF
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_is_config_bar(void *dev_hndl, uint8_t is_vf, enum qdma_ip *ip)
+{
+	uint32_t reg_val = 0;
+	uint32_t reg_addr = (is_vf) ? QDMA_OFFSET_VF_VERSION :
+			QDMA_OFFSET_CONFIG_BLOCK_ID;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	reg_val = qdma_reg_read(dev_hndl, reg_addr);
+
+	/** TODO: Version register for VFs is 0x5014 for EQDMA and
+	 *  0x1014 for QDMA/Versal. First time reading 0x5014 for
+	 *  all the device and based on the upper 16 bits value
+	 *  (i.e. 0x1fd3), finding out whether its EQDMA or QDMA/Versal
+	 *  for EQDMA VFs.
+	 *  Need to modify this logic once the hardware team
+	 *  comes up with a common register for VFs
+	 */
+	if (is_vf) {
+		if (FIELD_GET(QDMA_GLBL2_VF_UNIQUE_ID_MASK, reg_val)
+				!= QDMA_MAGIC_NUMBER) {
+			/* Its either QDMA or Versal */
+			*ip = EQDMA_IP;
+			reg_addr = EQDMA_OFFSET_VF_VERSION;
+			reg_val = qdma_reg_read(dev_hndl, reg_addr);
+		} else {
+			*ip = QDMA_OR_VERSAL_IP;
+			return QDMA_SUCCESS;
+		}
+	}
+
+	if (FIELD_GET(QDMA_CONFIG_BLOCK_ID_MASK, reg_val)
+			!= QDMA_MAGIC_NUMBER) {
+		qdma_log_error("%s: Invalid config bar, err:%d\n",
+					__func__,
+					-QDMA_ERR_HWACC_INV_CONFIG_BAR);
+		return -QDMA_ERR_HWACC_INV_CONFIG_BAR;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_acc_reg_dump_buf_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, int *buflen)
+{
+	uint32_t len = 0;
+	int rv = 0;
+
+	*buflen = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		len = qdma_soft_reg_dump_buf_len();
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		len = qdma_s80_hard_reg_dump_buf_len();
+		break;
+	case EQDMA_SOFT_IP:
+		len = eqdma_reg_dump_buf_len();
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	*buflen = (int)len;
+	return rv;
+}
+
+int qdma_acc_reg_info_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, int *buflen, int *num_regs)
+{
+	uint32_t len = 0;
+	int rv = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buflen) {
+		qdma_log_error("%s: buflen is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!num_regs) {
+		qdma_log_error("%s: num_regs is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	*buflen = 0;
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		len = 0;
+		*num_regs = 0;
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		len = qdma_s80_hard_reg_dump_buf_len();
+		*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
+		break;
+	case EQDMA_SOFT_IP:
+		len = eqdma_reg_dump_buf_len();
+		*num_regs = (int)((len / REG_DUMP_SIZE_PER_LINE) - 1);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	*buflen = (int)len;
+	return rv;
+}
+
+int qdma_acc_context_buf_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint8_t st,
+		enum qdma_dev_q_type q_type, uint32_t *buflen)
+{
+	int rv = 0;
+
+	*buflen = 0;
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv = qdma_soft_context_buf_len(st, q_type, buflen);
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_context_buf_len(st, q_type, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_context_buf_len(st, q_type, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+int qdma_acc_get_num_config_regs(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint32_t *num_regs)
+{
+	int rv = 0;
+
+	*num_regs = 0;
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+			__func__, -QDMA_ERR_INV_PARAM);
+
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv = qdma_get_config_num_regs();
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_get_config_num_regs();
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_get_config_num_regs();
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	*num_regs = rv;
+
+	return 0;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_acc_get_config_regs() - Function to get qdma config registers.
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:      Whether PF or VF
+ * @ip_type:	QDMA IP Type
+ * @reg_data:   pointer to register data to be filled
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
+		enum qdma_ip_type ip_type,
+		uint32_t *reg_data)
+{
+	struct xreg_info *reg_info;
+	uint32_t count = 0;
+	uint32_t num_regs;
+	int rv = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (is_vf) {
+		qdma_log_error("%s: Get Config regs not valid for VF, err:%d\n",
+			__func__,
+			-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (reg_data == NULL) {
+		qdma_log_error("%s: reg_data is NULL, err:%d\n",
+						__func__,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		num_regs = qdma_get_config_num_regs();
+		reg_info = qdma_get_config_regs();
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		num_regs = qdma_s80_hard_get_config_num_regs();
+		reg_info = qdma_s80_hard_get_config_regs();
+		break;
+	case EQDMA_SOFT_IP:
+		num_regs = eqdma_get_config_num_regs();
+		reg_info = eqdma_get_config_regs();
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	for (count = 0; count < num_regs - 1; count++) {
+		reg_data[count] = qdma_reg_read(dev_hndl,
+				reg_info[count].addr);
+	}
+
+	return rv;
+}
+
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
+ * buffer
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:      Whether PF or VF
+ * @ip_type:	QDMA IP Type
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
+		enum qdma_ip_type ip_type,
+		char *buf, uint32_t buflen)
+{
+	int rv = 0;
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv =  qdma_soft_dump_config_regs(dev_hndl, is_vf,
+				buf, buflen);
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_dump_config_regs(dev_hndl, is_vf,
+				buf, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_dump_config_regs(dev_hndl, is_vf,
+				buf, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_reg_info() - Function to dump fileds in
+ * a specified register.
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP Type
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_reg_info(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint32_t reg_addr,
+		uint32_t num_regs, char *buf, uint32_t buflen)
+{
+	int rv = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	if (!buf || !buflen) {
+		qdma_log_error("%s: Invalid input buffer, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		QDMA_SNPRINTF_S(buf, buflen, DEBGFS_LINE_SZ,
+		"QDMA reg field info not supported for QDMA_SOFT_IP\n");
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_dump_reg_info(dev_hndl, reg_addr,
+				num_regs, buf, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_dump_reg_info(dev_hndl, reg_addr,
+				num_regs, buf, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_queue_context() - Function to get qdma queue context dump in a
+ * buffer
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP Type
+ * @st:		Queue Mode (ST or MM)
+ * @q_type:	Queue Type
+ * @ctxt_data:  Context Data
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_queue_context(void *dev_hndl,
+		enum qdma_ip_type ip_type,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		struct qdma_descq_context *ctxt_data,
+		char *buf, uint32_t buflen)
+{
+	int rv = 0;
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv = qdma_soft_dump_queue_context(dev_hndl,
+				st, q_type, ctxt_data, buf, buflen);
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_dump_queue_context(dev_hndl,
+				st, q_type, ctxt_data, buf, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_dump_queue_context(dev_hndl,
+				st, q_type, ctxt_data, buf, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_acc_read_dump_queue_context() - Function to read and dump the queue
+ * context in the user-provided buffer. This API is valid only for PF and
+ * should not be used for VFs. For VF's use qdma_dump_queue_context() API
+ * after reading the context through mailbox.
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP type
+ * @hw_qid:     queue id
+ * @st:		Queue Mode(ST or MM)
+ * @q_type:	Queue type(H2C/C2H/CMPT)*
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_read_dump_queue_context(void *dev_hndl,
+				enum qdma_ip_type ip_type,
+				uint16_t qid_hw,
+				uint8_t st,
+				enum qdma_dev_q_type q_type,
+				char *buf, uint32_t buflen)
+{
+	int rv = QDMA_SUCCESS;
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv = qdma_soft_read_dump_queue_context(dev_hndl,
+				qid_hw, st, q_type, buf, buflen);
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_read_dump_queue_context(dev_hndl,
+				qid_hw, st, q_type, buf, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_read_dump_queue_context(dev_hndl,
+				qid_hw, st, q_type, buf, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_config_reg_list() - Dump the registers
+ *
+ * @dev_hndl:		device handle
+ * @ip_type:		QDMA ip type
+ * @num_regs :		Max registers to read
+ * @reg_list :		array of reg addr and reg values
+ * @buf :		pointer to buffer to be filled
+ * @buflen :		Length of the buffer
+ *
+ * Return: returns the platform specific error code
+ *****************************************************************************/
+int qdma_acc_dump_config_reg_list(void *dev_hndl,
+		enum qdma_ip_type ip_type,
+		uint32_t num_regs,
+		struct qdma_reg_data *reg_list,
+		char *buf, uint32_t buflen)
+{
+	int rv = 0;
+
+	switch (ip_type) {
+	case QDMA_SOFT_IP:
+		rv = qdma_soft_dump_config_reg_list(dev_hndl,
+				num_regs,
+				reg_list, buf, buflen);
+		break;
+	case QDMA_VERSAL_HARD_IP:
+		rv = qdma_s80_hard_dump_config_reg_list(dev_hndl,
+				num_regs,
+				reg_list, buf, buflen);
+		break;
+	case EQDMA_SOFT_IP:
+		rv = eqdma_dump_config_reg_list(dev_hndl,
+				num_regs,
+				reg_list, buf, buflen);
+		break;
+	default:
+		qdma_log_error("%s: Invalid version number, err = %d",
+			__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	return rv;
+}
+
+
+/*****************************************************************************/
+/**
+ * qdma_get_function_number() - Function to get the function number
+ *
+ * @dev_hndl:	device handle
+ * @func_id:	pointer to hold the function id
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_get_function_number(void *dev_hndl, uint8_t *func_id)
+{
+	if (!dev_hndl || !func_id) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	*func_id = (uint8_t)qdma_reg_read(dev_hndl,
+			QDMA_OFFSET_GLBL2_CHANNEL_FUNC_RET);
+
+	return QDMA_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+ * qdma_hw_error_intr_setup() - Function to set up the qdma error
+ * interrupt
+ *
+ * @dev_hndl:	device handle
+ * @func_id:	Function id
+ * @err_intr_index:	Interrupt vector
+ * @rearm:	rearm or not
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_hw_error_intr_setup(void *dev_hndl, uint16_t func_id,
+		uint8_t err_intr_index)
+{
+	uint32_t reg_val = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	reg_val =
+		FIELD_SET(QDMA_GLBL_ERR_FUNC_MASK, func_id) |
+		FIELD_SET(QDMA_GLBL_ERR_VEC_MASK, err_intr_index);
+
+	qdma_reg_write(dev_hndl, QDMA_OFFSET_GLBL_ERR_INT, reg_val);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_hw_error_intr_rearm() - Function to re-arm the error interrupt
+ *
+ * @dev_hndl: device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+static int qdma_hw_error_intr_rearm(void *dev_hndl)
+{
+	uint32_t reg_val = 0;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	reg_val = qdma_reg_read(dev_hndl, QDMA_OFFSET_GLBL_ERR_INT);
+	reg_val |= FIELD_SET(QDMA_GLBL_ERR_ARM_MASK, 1);
+
+	qdma_reg_write(dev_hndl, QDMA_OFFSET_GLBL_ERR_INT, reg_val);
+
+	return QDMA_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_error_code() - function to get the qdma access mapped
+ *				error code
+ *
+ * @acc_err_code: qdma access error code
+ *
+ * Return:   returns the platform specific error code
+ *****************************************************************************/
+int qdma_get_error_code(int acc_err_code)
+{
+	return qdma_get_err_code(acc_err_code);
+}
+
+int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
+				struct qdma_hw_access *hw_access)
+{
+	int rv = QDMA_SUCCESS;
+	enum qdma_ip ip = EQDMA_IP;
+
+	struct qdma_hw_version_info version_info;
+
+	if (!dev_hndl) {
+		qdma_log_error("%s: dev_handle is NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	if (!hw_access) {
+		qdma_log_error("%s: hw_access is NULL, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = qdma_is_config_bar(dev_hndl, is_vf, &ip);
+	if (rv != QDMA_SUCCESS) {
+		qdma_log_error("%s: config bar passed is INVALID, err:%d\n",
+				__func__, -QDMA_ERR_INV_PARAM);
+		return rv;
+	}
+
+	qdma_memset(hw_access, 0, sizeof(struct qdma_hw_access));
+
+	if (ip == EQDMA_IP)
+		hw_access->qdma_get_version = &eqdma_get_version;
+	else
+		hw_access->qdma_get_version = &qdma_get_version;
+	hw_access->qdma_init_ctxt_memory = &qdma_init_ctxt_memory;
+	hw_access->qdma_fmap_conf = &qdma_fmap_conf;
+	hw_access->qdma_sw_ctx_conf = &qdma_sw_ctx_conf;
+	hw_access->qdma_pfetch_ctx_conf = &qdma_pfetch_ctx_conf;
+	hw_access->qdma_cmpt_ctx_conf = &qdma_cmpt_ctx_conf;
+	hw_access->qdma_hw_ctx_conf = &qdma_hw_ctx_conf;
+	hw_access->qdma_credit_ctx_conf = &qdma_credit_ctx_conf;
+	hw_access->qdma_indirect_intr_ctx_conf = &qdma_indirect_intr_ctx_conf;
+	hw_access->qdma_set_default_global_csr = &qdma_set_default_global_csr;
+	hw_access->qdma_global_csr_conf = &qdma_global_csr_conf;
+	hw_access->qdma_global_writeback_interval_conf =
+					&qdma_global_writeback_interval_conf;
+	hw_access->qdma_queue_pidx_update = &qdma_queue_pidx_update;
+	hw_access->qdma_queue_cmpt_cidx_read = &qdma_queue_cmpt_cidx_read;
+	hw_access->qdma_queue_cmpt_cidx_update = &qdma_queue_cmpt_cidx_update;
+	hw_access->qdma_queue_intr_cidx_update = &qdma_queue_intr_cidx_update;
+	hw_access->qdma_mm_channel_conf = &qdma_mm_channel_conf;
+	hw_access->qdma_get_user_bar = &qdma_get_user_bar;
+	hw_access->qdma_get_function_number = &qdma_get_function_number;
+	hw_access->qdma_get_device_attributes = &qdma_get_device_attributes;
+	hw_access->qdma_hw_error_intr_setup = &qdma_hw_error_intr_setup;
+	hw_access->qdma_hw_error_intr_rearm = &qdma_hw_error_intr_rearm;
+	hw_access->qdma_hw_error_enable = &qdma_hw_error_enable;
+	hw_access->qdma_hw_get_error_name = &qdma_hw_get_error_name;
+	hw_access->qdma_hw_error_process = &qdma_hw_error_process;
+	hw_access->qdma_dump_config_regs = &qdma_soft_dump_config_regs;
+	hw_access->qdma_dump_queue_context = &qdma_soft_dump_queue_context;
+	hw_access->qdma_read_dump_queue_context =
+					&qdma_soft_read_dump_queue_context;
+	hw_access->qdma_dump_intr_context = &qdma_dump_intr_context;
+	hw_access->qdma_is_legacy_intr_pend = &qdma_is_legacy_intr_pend;
+	hw_access->qdma_clear_pend_legacy_intr = &qdma_clear_pend_legacy_intr;
+	hw_access->qdma_legacy_intr_conf = &qdma_legacy_intr_conf;
+	hw_access->qdma_initiate_flr = &qdma_initiate_flr;
+	hw_access->qdma_is_flr_done = &qdma_is_flr_done;
+	hw_access->qdma_get_error_code = &qdma_get_error_code;
+	hw_access->qdma_read_reg_list = &qdma_read_reg_list;
+	hw_access->qdma_dump_config_reg_list =
+			&qdma_soft_dump_config_reg_list;
+	hw_access->qdma_dump_reg_info = &qdma_dump_reg_info;
+	hw_access->mbox_base_pf = QDMA_OFFSET_MBOX_BASE_PF;
+	hw_access->mbox_base_vf = QDMA_OFFSET_MBOX_BASE_VF;
+	hw_access->qdma_max_errors = QDMA_ERRS_ALL;
+
+	rv = hw_access->qdma_get_version(dev_hndl, is_vf, &version_info);
+	if (rv != QDMA_SUCCESS)
+		return rv;
+
+	qdma_log_info("Device Type: %s\n",
+			qdma_get_device_type(version_info.device_type));
+
+	qdma_log_info("IP Type: %s\n",
+			qdma_get_ip_type(version_info.ip_type));
+
+	qdma_log_info("Vivado Release: %s\n",
+		qdma_get_vivado_release_id(version_info.vivado_release));
+
+	if (version_info.ip_type == QDMA_VERSAL_HARD_IP) {
+		hw_access->qdma_init_ctxt_memory =
+				&qdma_s80_hard_init_ctxt_memory;
+		hw_access->qdma_qid2vec_conf = &qdma_s80_hard_qid2vec_conf;
+		hw_access->qdma_fmap_conf = &qdma_s80_hard_fmap_conf;
+		hw_access->qdma_sw_ctx_conf = &qdma_s80_hard_sw_ctx_conf;
+		hw_access->qdma_pfetch_ctx_conf =
+				&qdma_s80_hard_pfetch_ctx_conf;
+		hw_access->qdma_cmpt_ctx_conf = &qdma_s80_hard_cmpt_ctx_conf;
+		hw_access->qdma_hw_ctx_conf = &qdma_s80_hard_hw_ctx_conf;
+		hw_access->qdma_credit_ctx_conf =
+				&qdma_s80_hard_credit_ctx_conf;
+		hw_access->qdma_indirect_intr_ctx_conf =
+				&qdma_s80_hard_indirect_intr_ctx_conf;
+		hw_access->qdma_set_default_global_csr =
+					&qdma_s80_hard_set_default_global_csr;
+		hw_access->qdma_queue_pidx_update =
+				&qdma_s80_hard_queue_pidx_update;
+		hw_access->qdma_queue_cmpt_cidx_update =
+				&qdma_s80_hard_queue_cmpt_cidx_update;
+		hw_access->qdma_queue_intr_cidx_update =
+				&qdma_s80_hard_queue_intr_cidx_update;
+		hw_access->qdma_get_user_bar = &qdma_cmp_get_user_bar;
+		hw_access->qdma_get_device_attributes =
+				&qdma_s80_hard_get_device_attributes;
+		hw_access->qdma_dump_config_regs =
+				&qdma_s80_hard_dump_config_regs;
+		hw_access->qdma_dump_intr_context =
+				&qdma_s80_hard_dump_intr_context;
+		hw_access->qdma_hw_error_enable =
+				&qdma_s80_hard_hw_error_enable;
+		hw_access->qdma_hw_error_process =
+				&qdma_s80_hard_hw_error_process;
+		hw_access->qdma_hw_get_error_name =
+				&qdma_s80_hard_hw_get_error_name;
+		hw_access->qdma_legacy_intr_conf = NULL;
+		hw_access->qdma_read_reg_list = &qdma_s80_hard_read_reg_list;
+		hw_access->qdma_dump_config_reg_list =
+				&qdma_s80_hard_dump_config_reg_list;
+		hw_access->qdma_dump_queue_context =
+				&qdma_s80_hard_dump_queue_context;
+		hw_access->qdma_read_dump_queue_context =
+				&qdma_s80_hard_read_dump_queue_context;
+		hw_access->qdma_dump_reg_info = &qdma_s80_hard_dump_reg_info;
+		hw_access->qdma_max_errors = QDMA_S80_HARD_ERRS_ALL;
+	}
+
+	if (version_info.ip_type == EQDMA_SOFT_IP) {
+		hw_access->qdma_init_ctxt_memory = &eqdma_init_ctxt_memory;
+		hw_access->qdma_sw_ctx_conf = &eqdma_sw_ctx_conf;
+		hw_access->qdma_pfetch_ctx_conf = &eqdma_pfetch_ctx_conf;
+		hw_access->qdma_cmpt_ctx_conf = &eqdma_cmpt_ctx_conf;
+		hw_access->qdma_indirect_intr_ctx_conf =
+				&eqdma_indirect_intr_ctx_conf;
+		hw_access->qdma_dump_config_regs = &eqdma_dump_config_regs;
+		hw_access->qdma_dump_intr_context = &eqdma_dump_intr_context;
+		hw_access->qdma_hw_error_enable = &eqdma_hw_error_enable;
+		hw_access->qdma_hw_error_process = &eqdma_hw_error_process;
+		hw_access->qdma_hw_get_error_name = &eqdma_hw_get_error_name;
+		hw_access->qdma_hw_ctx_conf = &eqdma_hw_ctx_conf;
+		hw_access->qdma_credit_ctx_conf = &eqdma_credit_ctx_conf;
+		hw_access->qdma_set_default_global_csr =
+				&eqdma_set_default_global_csr;
+		hw_access->qdma_get_device_attributes =
+				&eqdma_get_device_attributes;
+		hw_access->qdma_get_user_bar = &eqdma_get_user_bar;
+		hw_access->qdma_read_reg_list = &eqdma_read_reg_list;
+		hw_access->qdma_dump_config_reg_list =
+				&eqdma_dump_config_reg_list;
+		hw_access->qdma_dump_queue_context =
+				&eqdma_dump_queue_context;
+		hw_access->qdma_read_dump_queue_context =
+				&eqdma_read_dump_queue_context;
+		hw_access->qdma_dump_reg_info = &eqdma_dump_reg_info;
+		/* All CSR and Queue space register belongs to Window 0.
+		 * Mailbox and MSIX register belongs to Window 1
+		 * Therefore, Mailbox offsets are different for EQDMA
+		 * Mailbox offset for PF : 128K + original address
+		 * Mailbox offset for VF : 16K + original address
+		 */
+		hw_access->mbox_base_pf = EQDMA_OFFSET_MBOX_BASE_PF;
+		hw_access->mbox_base_vf = EQDMA_OFFSET_MBOX_BASE_VF;
+		hw_access->qdma_max_errors = EQDMA_ERRS_ALL;
+	}
+
+	return QDMA_SUCCESS;
+}
diff --git a/drivers/net/qdma/qdma_access/qdma_access_common.h b/drivers/net/qdma/qdma_access/qdma_access_common.h
new file mode 100644
index 0000000000..a2ca188c65
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_access_common.h
@@ -0,0 +1,888 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_ACCESS_COMMON_H_
+#define __QDMA_ACCESS_COMMON_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "qdma_access_export.h"
+#include "qdma_access_errors.h"
+
+/* QDMA HW version string array length */
+#define QDMA_HW_VERSION_STRING_LEN			32
+
+#define ENABLE_INIT_CTXT_MEMORY			1
+
+#ifdef GCC_COMPILER
+static inline uint32_t get_trailing_zeros(uint64_t x)
+{
+	uint32_t rv =
+		__builtin_ffsll(x) - 1;
+	return rv;
+}
+#else
+static inline uint32_t get_trailing_zeros(uint64_t value)
+{
+	uint32_t pos = 0;
+
+	if ((value & 0xffffffff) == 0) {
+		pos += 32;
+		value >>= 32;
+	}
+	if ((value & 0xffff) == 0) {
+		pos += 16;
+		value >>= 16;
+	}
+	if ((value & 0xff) == 0) {
+		pos += 8;
+		value >>= 8;
+	}
+	if ((value & 0xf) == 0) {
+		pos += 4;
+		value >>= 4;
+	}
+	if ((value & 0x3) == 0) {
+		pos += 2;
+		value >>= 2;
+	}
+	if ((value & 0x1) == 0)
+		pos += 1;
+
+	return pos;
+}
+#endif
+
+#define FIELD_SHIFT(mask)       get_trailing_zeros(mask)
+#define FIELD_SET(mask, val) (__extension__ ({typeof(mask) (_mask) = (mask); \
+				 (((val) << FIELD_SHIFT(_mask)) & (_mask)); }))
+#define FIELD_GET(mask, reg) (__extension__ ({typeof(mask) (_mask) = (mask); \
+				 (((reg) & (_mask)) >> FIELD_SHIFT(_mask)); }))
+
+
+/* CSR Default values */
+#define DEFAULT_MAX_DSC_FETCH               6
+#define DEFAULT_WRB_INT                     QDMA_WRB_INTERVAL_128
+#define DEFAULT_PFCH_STOP_THRESH            256
+#define DEFAULT_PFCH_NUM_ENTRIES_PER_Q      8
+#define DEFAULT_PFCH_MAX_Q_CNT              16
+#define DEFAULT_C2H_INTR_TIMER_TICK         25
+#define DEFAULT_CMPT_COAL_TIMER_CNT         5
+#define DEFAULT_CMPT_COAL_TIMER_TICK        25
+#define DEFAULT_CMPT_COAL_MAX_BUF_SZ        32
+
+#define QDMA_BAR_NUM                        6
+
+/** Maximum data vectors to be used for each function
+ * TODO: Please note that for 2018.2 only one vector would be used
+ * per pf and only one ring would be created for this vector
+ * It is also assumed that all functions have the same number of data vectors
+ * and currently different number of vectors per PF is not supported
+ */
+#define QDMA_NUM_DATA_VEC_FOR_INTR_CXT  1
+
+enum ind_ctxt_cmd_op {
+	QDMA_CTXT_CMD_CLR,
+	QDMA_CTXT_CMD_WR,
+	QDMA_CTXT_CMD_RD,
+	QDMA_CTXT_CMD_INV
+};
+
+enum ind_ctxt_cmd_sel {
+	QDMA_CTXT_SEL_SW_C2H,
+	QDMA_CTXT_SEL_SW_H2C,
+	QDMA_CTXT_SEL_HW_C2H,
+	QDMA_CTXT_SEL_HW_H2C,
+	QDMA_CTXT_SEL_CR_C2H,
+	QDMA_CTXT_SEL_CR_H2C,
+	QDMA_CTXT_SEL_CMPT,
+	QDMA_CTXT_SEL_PFTCH,
+	QDMA_CTXT_SEL_INT_COAL,
+	QDMA_CTXT_SEL_PASID_RAM_LOW,
+	QDMA_CTXT_SEL_PASID_RAM_HIGH,
+	QDMA_CTXT_SEL_TIMER,
+	QDMA_CTXT_SEL_FMAP,
+};
+
+/* polling a register */
+#define	QDMA_REG_POLL_DFLT_INTERVAL_US	10		    /* 10us per poll */
+#define	QDMA_REG_POLL_DFLT_TIMEOUT_US	(500 * 1000)	/* 500ms */
+
+/** Constants */
+#define QDMA_NUM_RING_SIZES                                 16
+#define QDMA_NUM_C2H_TIMERS                                 16
+#define QDMA_NUM_C2H_BUFFER_SIZES                           16
+#define QDMA_NUM_C2H_COUNTERS                               16
+#define QDMA_MM_CONTROL_RUN                                 0x1
+#define QDMA_MM_CONTROL_STEP                                0x100
+#define QDMA_MAGIC_NUMBER                                   0x1fd3
+#define QDMA_PIDX_STEP                                      0x10
+#define QDMA_CMPT_CIDX_STEP                                 0x10
+#define QDMA_INT_CIDX_STEP                                  0x10
+
+
+/** QDMA_IND_REG_SEL_PFTCH */
+#define QDMA_PFTCH_CTXT_SW_CRDT_GET_H_MASK                  GENMASK(15, 3)
+#define QDMA_PFTCH_CTXT_SW_CRDT_GET_L_MASK                  GENMASK(2, 0)
+
+/** QDMA_IND_REG_SEL_CMPT */
+#define QDMA_COMPL_CTXT_BADDR_GET_H_MASK                    GENMASK_ULL(63, 38)
+#define QDMA_COMPL_CTXT_BADDR_GET_L_MASK                    GENMASK_ULL(37, 12)
+#define QDMA_COMPL_CTXT_PIDX_GET_H_MASK                     GENMASK(15, 4)
+#define QDMA_COMPL_CTXT_PIDX_GET_L_MASK                     GENMASK(3, 0)
+
+#define QDMA_INTR_CTXT_BADDR_GET_H_MASK                     GENMASK_ULL(63, 61)
+#define QDMA_INTR_CTXT_BADDR_GET_M_MASK                     GENMASK_ULL(60, 29)
+#define QDMA_INTR_CTXT_BADDR_GET_L_MASK                     GENMASK_ULL(28, 12)
+
+#define     QDMA_GLBL2_MM_CMPT_EN_MASK                      BIT(2)
+#define     QDMA_GLBL2_FLR_PRESENT_MASK                     BIT(1)
+#define     QDMA_GLBL2_MAILBOX_EN_MASK                      BIT(0)
+
+#define QDMA_REG_IND_CTXT_REG_COUNT                         8
+
+/* ------------------------ indirect register context fields -----------*/
+union qdma_ind_ctxt_cmd {
+	uint32_t word;
+	struct {
+		uint32_t busy:1;
+		uint32_t sel:4;
+		uint32_t op:2;
+		uint32_t qid:11;
+		uint32_t rsvd:14;
+	} bits;
+};
+
+#define QDMA_IND_CTXT_DATA_NUM_REGS                         8
+
+/**
+ * struct qdma_indirect_ctxt_regs - Inirect Context programming registers
+ */
+struct qdma_indirect_ctxt_regs {
+	uint32_t qdma_ind_ctxt_data[QDMA_IND_CTXT_DATA_NUM_REGS];
+	uint32_t qdma_ind_ctxt_mask[QDMA_IND_CTXT_DATA_NUM_REGS];
+	union qdma_ind_ctxt_cmd cmd;
+};
+
+/**
+ * struct qdma_fmap_cfg - fmap config data structure
+ */
+struct qdma_fmap_cfg {
+	/** @qbase - queue base for the function */
+	uint16_t qbase;
+	/** @qmax - maximum queues in the function */
+	uint16_t qmax;
+};
+
+/**
+ * struct qdma_qid2vec - qid to vector mapping data structure
+ */
+struct qdma_qid2vec {
+	/** @c2h_vector - For direct interrupt, it is the interrupt
+	 * vector index of msix table;
+	 * for indirect interrupt, it is the ring index
+	 */
+	uint8_t c2h_vector;
+	/** @c2h_en_coal - C2H Interrupt aggregation enable */
+	uint8_t c2h_en_coal;
+	/** @h2c_vector - For direct interrupt, it is the interrupt
+	 * vector index of msix table;
+	 * for indirect interrupt, it is the ring index
+	 */
+	uint8_t h2c_vector;
+	/** @h2c_en_coal - H2C Interrupt aggregation enable */
+	uint8_t h2c_en_coal;
+};
+
+/**
+ * struct qdma_descq_sw_ctxt - descq SW context config data structure
+ */
+struct qdma_descq_sw_ctxt {
+	/** @ring_bs_addr - ring base address */
+	uint64_t ring_bs_addr;
+	/** @vec - vector number */
+	uint16_t vec;
+	/** @pidx - initial producer index */
+	uint16_t pidx;
+	/** @irq_arm - Interrupt Arm */
+	uint8_t irq_arm;
+	/** @fnc_id - Function ID */
+	uint8_t fnc_id;
+	/** @qen - Indicates that the queue is enabled */
+	uint8_t qen;
+	/** @frcd_en -Enable fetch credit */
+	uint8_t frcd_en;
+	/** @wbi_chk -Writeback/Interrupt after pending check */
+	uint8_t wbi_chk;
+	/** @wbi_intvl_en -Write back/Interrupt interval */
+	uint8_t wbi_intvl_en;
+	/** @at - Address tanslation */
+	uint8_t at;
+	/** @fetch_max - Maximum number of descriptor fetches outstanding */
+	uint8_t fetch_max;
+	/** @rngsz_idx - Descriptor ring size index */
+	uint8_t rngsz_idx;
+	/** @desc_sz -Descriptor fetch size */
+	uint8_t desc_sz;
+	/** @bypass - bypass enable */
+	uint8_t bypass;
+	/** @mm_chn - MM channel */
+	uint8_t mm_chn;
+	/** @wbk_en -Writeback enable */
+	uint8_t wbk_en;
+	/** @irq_en -Interrupt enable */
+	uint8_t irq_en;
+	/** @port_id -Port_id */
+	uint8_t port_id;
+	/** @irq_no_last - No interrupt was sent */
+	uint8_t irq_no_last;
+	/** @err - Error status */
+	uint8_t err;
+	/** @err_wb_sent -writeback/interrupt was sent for an error */
+	uint8_t err_wb_sent;
+	/** @irq_req - Interrupt due to error waiting to be sent */
+	uint8_t irq_req;
+	/** @mrkr_dis - Marker disable */
+	uint8_t mrkr_dis;
+	/** @is_mm - MM mode */
+	uint8_t is_mm;
+	/** @intr_aggr - interrupt aggregation enable */
+	uint8_t intr_aggr;
+	/** @pasid_en - PASID Enable */
+	uint8_t pasid_en;
+	/** @dis_intr_on_vf - Disable interrupt with VF */
+	uint8_t dis_intr_on_vf;
+	/** @virtio_en - Queue is in Virtio Mode */
+	uint8_t virtio_en;
+	/** @pack_byp_out - descs on desc output interface can be packed */
+	uint8_t pack_byp_out;
+	/** @irq_byp - IRQ Bypass mode */
+	uint8_t irq_byp;
+	/** @host_id - Host ID */
+	uint8_t host_id;
+	/** @pasid - PASID */
+	uint32_t pasid;
+	/** @virtio_dsc_base - Virtio Desc Base Address */
+	uint64_t virtio_dsc_base;
+};
+
+/**
+ * struct qdma_descq_hw_ctxt - descq hw context config data structure
+ */
+struct qdma_descq_hw_ctxt {
+	/** @cidx - consumer index */
+	uint16_t cidx;
+	/** @crd_use - credits consumed */
+	uint16_t crd_use;
+	/** @dsc_pend - descriptors pending */
+	uint8_t dsc_pend;
+	/** @idl_stp_b -Queue invalid and no descriptors pending */
+	uint8_t idl_stp_b;
+	/** @evt_pnd - Event pending */
+	uint8_t evt_pnd;
+	/** @fetch_pnd -Descriptor fetch pending */
+	uint8_t fetch_pnd;
+};
+
+/**
+ * struct qdma_descq_credit_ctxt - descq credit context config data structure
+ */
+struct qdma_descq_credit_ctxt {
+	/** @credit -Fetch credits received. */
+	uint32_t credit;
+};
+
+/**
+ * struct qdma_descq_prefetch_ctxt - descq pfetch context config data structure
+ */
+struct qdma_descq_prefetch_ctxt {
+	/** @sw_crdt -Software credit */
+	uint16_t sw_crdt;
+	/** @bypass - bypass enable */
+	uint8_t bypass;
+	/** @bufsz_idx - c2h buffer size index */
+	uint8_t bufsz_idx;
+	/** @port_id - port ID */
+	uint8_t port_id;
+	/** @var_desc - Variable Descriptor */
+	uint8_t var_desc;
+	/** @num_pftch - Number of descs prefetched */
+	uint16_t num_pftch;
+	/** @err -Error detected on this queue */
+	uint8_t err;
+	/** @pfch_en - Enable prefetch */
+	uint8_t pfch_en;
+	/** @pfch - Queue is in prefetch */
+	uint8_t pfch;
+	/** @valid - context is valid */
+	uint8_t valid;
+};
+
+/**
+ * struct qdma_descq_cmpt_ctxt - descq completion context config data structure
+ */
+struct qdma_descq_cmpt_ctxt {
+	/** @bs_addr - completion ring base address */
+	uint64_t bs_addr;
+	/** @vec - Interrupt Vector */
+	uint16_t vec;
+	/** @pidx_l - producer index low */
+	uint16_t pidx;
+	/** @cidx - consumer index */
+	uint16_t cidx;
+	/** @en_stat_desc - Enable Completion Status writes */
+	uint8_t en_stat_desc;
+	/** @en_int - Enable Completion interrupts */
+	uint8_t en_int;
+	/** @trig_mode - Interrupt and Completion Status Write Trigger Mode */
+	uint8_t trig_mode;
+	/** @fnc_id - Function ID */
+	uint8_t fnc_id;
+	/** @counter_idx - Index to counter register */
+	uint8_t counter_idx;
+	/** @timer_idx - Index to timer register */
+	uint8_t timer_idx;
+	/** @in_st - Interrupt State */
+	uint8_t in_st;
+	/** @color - initial color bit to be used on Completion */
+	uint8_t color;
+	/** @ringsz_idx - Completion ring size index to ring size registers */
+	uint8_t ringsz_idx;
+	/** @desc_sz  -descriptor size */
+	uint8_t desc_sz;
+	/** @valid  - context valid */
+	uint8_t valid;
+	/** @err - error status */
+	uint8_t err;
+	/**
+	 * @user_trig_pend - user logic initiated interrupt is
+	 * pending to be generate
+	 */
+	uint8_t user_trig_pend;
+	/** @timer_running - timer is running on this queue */
+	uint8_t timer_running;
+	/** @full_upd - Full update */
+	uint8_t full_upd;
+	/** @ovf_chk_dis - Completion Ring Overflow Check Disable */
+	uint8_t ovf_chk_dis;
+	/** @at -Address Translation */
+	uint8_t at;
+	/** @int_aggr -Interrupt Aggregation */
+	uint8_t int_aggr;
+	/** @dis_intr_on_vf - Disable interrupt with VF */
+	uint8_t dis_intr_on_vf;
+	/** @vio - queue is in VirtIO mode */
+	uint8_t vio;
+	/** @dir_c2h - DMA direction is C2H */
+	uint8_t dir_c2h;
+	/** @host_id - Host ID */
+	uint8_t host_id;
+	/** @pasid - PASID */
+	uint32_t pasid;
+	/** @pasid_en - PASID Enable */
+	uint8_t pasid_en;
+	/** @vio_eop - Virtio End-of-packet */
+	uint8_t vio_eop;
+	/** @sh_cmpt - Shared Completion Queue */
+	uint8_t sh_cmpt;
+};
+
+/**
+ * struct qdma_indirect_intr_ctxt - indirect interrupt context config data
+ * structure
+ */
+struct qdma_indirect_intr_ctxt {
+	/** @baddr_4k -Base address of Interrupt Aggregation Ring */
+	uint64_t baddr_4k;
+	/** @vec - Interrupt vector index in msix table */
+	uint16_t vec;
+	/** @pidx - Producer Index */
+	uint16_t pidx;
+	/** @valid - context valid */
+	uint8_t valid;
+	/** @int_st -Interrupt State */
+	uint8_t int_st;
+	/** @color - Color bit */
+	uint8_t color;
+	/** @page_size - Interrupt Aggregation Ring size */
+	uint8_t page_size;
+	/** @at - Address translation */
+	uint8_t at;
+	/** @host_id - Host ID */
+	uint8_t host_id;
+	/** @pasid - PASID */
+	uint32_t pasid;
+	/** @pasid_en - PASID Enable */
+	uint8_t pasid_en;
+	/** @func_id - Function ID */
+	uint16_t func_id;
+};
+
+struct qdma_hw_version_info {
+	/** @rtl_version - RTL Version */
+	enum qdma_rtl_version rtl_version;
+	/** @vivado_release - Vivado Release id */
+	enum qdma_vivado_release_id vivado_release;
+	/** @versal_ip_state - Versal IP state */
+	enum qdma_ip_type ip_type;
+	/** @device_type - Device Type */
+	enum qdma_device_type device_type;
+	/** @qdma_rtl_version_str - RTL Version string*/
+	char qdma_rtl_version_str[QDMA_HW_VERSION_STRING_LEN];
+	/** @qdma_vivado_release_id_str - Vivado Release id string*/
+	char qdma_vivado_release_id_str[QDMA_HW_VERSION_STRING_LEN];
+	/** @qdma_device_type_str - Qdma device type string*/
+	char qdma_device_type_str[QDMA_HW_VERSION_STRING_LEN];
+	/** @qdma_versal_ip_state_str - Versal IP state string*/
+	char qdma_ip_type_str[QDMA_HW_VERSION_STRING_LEN];
+};
+
+#define CTXT_ENTRY_NAME_SZ        64
+struct qctx_entry {
+	char		name[CTXT_ENTRY_NAME_SZ];
+	uint32_t	value;
+};
+
+/**
+ * @struct - qdma_descq_context
+ * @brief	queue context information
+ */
+struct qdma_descq_context {
+	struct qdma_qid2vec qid2vec;
+	struct qdma_fmap_cfg fmap;
+	struct qdma_descq_sw_ctxt sw_ctxt;
+	struct qdma_descq_hw_ctxt hw_ctxt;
+	struct qdma_descq_credit_ctxt cr_ctxt;
+	struct qdma_descq_prefetch_ctxt pfetch_ctxt;
+	struct qdma_descq_cmpt_ctxt cmpt_ctxt;
+};
+
+/**
+ * struct qdma_q_pidx_reg_info - Software PIDX register fields
+ */
+struct qdma_q_pidx_reg_info {
+	/** @pidx - Producer Index */
+	uint16_t pidx;
+	/** @irq_en - Interrupt enable */
+	uint8_t irq_en;
+};
+
+/**
+ * struct qdma_q_intr_cidx_reg_info - Interrupt Ring CIDX register fields
+ */
+struct qdma_intr_cidx_reg_info {
+	/** @sw_cidx - Software Consumer Index */
+	uint16_t sw_cidx;
+	/** @rng_idx - Ring Index of the Interrupt Aggregation ring */
+	uint8_t rng_idx;
+};
+
+/**
+ * struct qdma_q_cmpt_cidx_reg_info - CMPT CIDX register fields
+ */
+struct qdma_q_cmpt_cidx_reg_info {
+	/** @wrb_cidx - CMPT Consumer Index */
+	uint16_t wrb_cidx;
+	/** @counter_idx - Counter Threshold Index */
+	uint8_t counter_idx;
+	/** @timer_idx - Timer Count Index */
+	uint8_t timer_idx;
+	/** @trig_mode - Trigger mode */
+	uint8_t trig_mode;
+	/** @wrb_en - Enable status descriptor for CMPT */
+	uint8_t wrb_en;
+	/** @irq_en - Enable Interrupt for CMPT */
+	uint8_t irq_en;
+};
+
+
+/**
+ * struct qdma_csr_info - Global CSR info data structure
+ */
+struct qdma_csr_info {
+	/** @ringsz: ring size values */
+	uint16_t ringsz[QDMA_GLOBAL_CSR_ARRAY_SZ];
+	/** @bufsz: buffer size values */
+	uint16_t bufsz[QDMA_GLOBAL_CSR_ARRAY_SZ];
+	/** @timer_cnt: timer threshold values */
+	uint8_t timer_cnt[QDMA_GLOBAL_CSR_ARRAY_SZ];
+	/** @cnt_thres: counter threshold values */
+	uint8_t cnt_thres[QDMA_GLOBAL_CSR_ARRAY_SZ];
+	/** @wb_intvl: writeback interval */
+	uint8_t wb_intvl;
+};
+
+#define QDMA_MAX_REGISTER_DUMP	14
+
+/**
+ * struct qdma_reg_data - Structure to
+ * hold address value and pair
+ */
+struct qdma_reg_data {
+	/** @reg_addr: register address */
+	uint32_t reg_addr;
+	/** @reg_val: register value */
+	uint32_t reg_val;
+};
+
+/**
+ * enum qdma_hw_access_type - To hold hw access type
+ */
+enum qdma_hw_access_type {
+	QDMA_HW_ACCESS_READ,
+	QDMA_HW_ACCESS_WRITE,
+	QDMA_HW_ACCESS_CLEAR,
+	QDMA_HW_ACCESS_INVALIDATE,
+	QDMA_HW_ACCESS_MAX
+};
+
+/**
+ * enum qdma_global_csr_type - To hold global csr type
+ */
+enum qdma_global_csr_type {
+	QDMA_CSR_RING_SZ,
+	QDMA_CSR_TIMER_CNT,
+	QDMA_CSR_CNT_TH,
+	QDMA_CSR_BUF_SZ,
+	QDMA_CSR_MAX
+};
+
+/**
+ * enum status_type - To hold enable/disable status type
+ */
+enum status_type {
+	DISABLE = 0,
+	ENABLE = 1,
+};
+
+/**
+ * enum qdma_reg_read_type - Indicates reg read type
+ */
+enum qdma_reg_read_type {
+	/** @QDMA_REG_READ_PF_ONLY: Read the register for PFs only */
+	QDMA_REG_READ_PF_ONLY,
+	/** @QDMA_REG_READ_VF_ONLY: Read the register for VFs only */
+	QDMA_REG_READ_VF_ONLY,
+	/** @QDMA_REG_READ_PF_VF: Read the register for both PF and VF */
+	QDMA_REG_READ_PF_VF,
+	/** @QDMA_REG_READ_MAX: Reg read enum max */
+	QDMA_REG_READ_MAX
+};
+
+/**
+ * enum qdma_reg_read_groups - Indicates reg read groups
+ */
+enum qdma_reg_read_groups {
+	/** @QDMA_REG_READ_GROUP_1: Read the register from  0x000 to 0x288 */
+	QDMA_REG_READ_GROUP_1,
+	/** @QDMA_REG_READ_GROUP_2: Read the register from 0x400 to 0xAFC */
+	QDMA_REG_READ_GROUP_2,
+	/** @QDMA_REG_READ_GROUP_3: Read the register from 0xB00 to 0xE28 */
+	QDMA_REG_READ_GROUP_3,
+	/** @QDMA_REG_READ_GROUP_4: Read the register Mailbox Registers */
+	QDMA_REG_READ_GROUP_4,
+	/** @QDMA_REG_READ_GROUP_MAX: Reg read max groups */
+	QDMA_REG_READ_GROUP_MAX
+};
+
+void qdma_write_csr_values(void *dev_hndl, uint32_t reg_offst,
+		uint32_t idx, uint32_t cnt, const uint32_t *values);
+
+void qdma_read_csr_values(void *dev_hndl, uint32_t reg_offst,
+		uint32_t idx, uint32_t cnt, uint32_t *values);
+
+int dump_reg(char *buf, int buf_sz, uint32_t raddr,
+		const char *rname, uint32_t rval);
+
+int hw_monitor_reg(void *dev_hndl, uint32_t reg, uint32_t mask,
+		uint32_t val, uint32_t interval_us,
+		uint32_t timeout_us);
+
+void qdma_memset(void *to, uint8_t val, uint32_t size);
+
+int qdma_acc_reg_dump_buf_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, int *buflen);
+
+int qdma_acc_reg_info_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, int *buflen, int *num_regs);
+
+int qdma_acc_context_buf_len(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint8_t st,
+		enum qdma_dev_q_type q_type, uint32_t *buflen);
+
+int qdma_acc_get_num_config_regs(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint32_t *num_regs);
+
+/*
+ * struct qdma_hw_access - Structure to hold HW access function pointers
+ */
+struct qdma_hw_access {
+	int (*qdma_set_default_global_csr)(void *dev_hndl);
+	int (*qdma_global_csr_conf)(void *dev_hndl, uint8_t index,
+					uint8_t count, uint32_t *csr_val,
+					enum qdma_global_csr_type csr_type,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_global_writeback_interval_conf)(void *dev_hndl,
+					enum qdma_wrb_interval *wb_int,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_init_ctxt_memory)(void *dev_hndl);
+	int (*qdma_qid2vec_conf)(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+				 struct qdma_qid2vec *ctxt,
+				 enum qdma_hw_access_type access_type);
+	int (*qdma_fmap_conf)(void *dev_hndl, uint16_t func_id,
+					struct qdma_fmap_cfg *config,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_sw_ctx_conf)(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+					struct qdma_descq_sw_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_pfetch_ctx_conf)(void *dev_hndl, uint16_t hw_qid,
+					struct qdma_descq_prefetch_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_cmpt_ctx_conf)(void *dev_hndl, uint16_t hw_qid,
+					struct qdma_descq_cmpt_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_hw_ctx_conf)(void *dev_hndl, uint8_t c2h, uint16_t hw_qid,
+					struct qdma_descq_hw_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_credit_ctx_conf)(void *dev_hndl, uint8_t c2h,
+					uint16_t hw_qid,
+					struct qdma_descq_credit_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_indirect_intr_ctx_conf)(void *dev_hndl, uint16_t ring_index,
+					struct qdma_indirect_intr_ctxt *ctxt,
+					enum qdma_hw_access_type access_type);
+	int (*qdma_queue_pidx_update)(void *dev_hndl, uint8_t is_vf,
+				uint16_t qid,
+				uint8_t is_c2h,
+				const struct qdma_q_pidx_reg_info *reg_info);
+	int (*qdma_queue_cmpt_cidx_read)(void *dev_hndl, uint8_t is_vf,
+				uint16_t qid,
+				struct qdma_q_cmpt_cidx_reg_info *reg_info);
+	int (*qdma_queue_cmpt_cidx_update)(void *dev_hndl, uint8_t is_vf,
+			uint16_t qid,
+			const struct qdma_q_cmpt_cidx_reg_info *reg_info);
+	int (*qdma_queue_intr_cidx_update)(void *dev_hndl, uint8_t is_vf,
+				uint16_t qid,
+				const struct qdma_intr_cidx_reg_info *reg_info);
+	int (*qdma_mm_channel_conf)(void *dev_hndl, uint8_t channel,
+				uint8_t is_c2h, uint8_t enable);
+	int (*qdma_get_user_bar)(void *dev_hndl, uint8_t is_vf,
+				uint8_t func_id, uint8_t *user_bar);
+	int (*qdma_get_function_number)(void *dev_hndl, uint8_t *func_id);
+	int (*qdma_get_version)(void *dev_hndl, uint8_t is_vf,
+				struct qdma_hw_version_info *version_info);
+	int (*qdma_get_device_attributes)(void *dev_hndl,
+					struct qdma_dev_attributes *dev_info);
+	int (*qdma_hw_error_intr_setup)(void *dev_hndl, uint16_t func_id,
+					uint8_t err_intr_index);
+	int (*qdma_hw_error_intr_rearm)(void *dev_hndl);
+	int (*qdma_hw_error_enable)(void *dev_hndl,
+			uint32_t err_idx);
+	const char *(*qdma_hw_get_error_name)(uint32_t err_idx);
+	int (*qdma_hw_error_process)(void *dev_hndl);
+	int (*qdma_dump_config_regs)(void *dev_hndl, uint8_t is_vf, char *buf,
+					uint32_t buflen);
+	int (*qdma_dump_reg_info)(void *dev_hndl, uint32_t reg_addr,
+				  uint32_t num_regs,
+				  char *buf,
+				  uint32_t buflen);
+	int (*qdma_dump_queue_context)(void *dev_hndl,
+			uint8_t st,
+			enum qdma_dev_q_type q_type,
+			struct qdma_descq_context *ctxt_data,
+			char *buf, uint32_t buflen);
+	int (*qdma_read_dump_queue_context)(void *dev_hndl,
+			uint16_t qid_hw,
+			uint8_t st,
+			enum qdma_dev_q_type q_type,
+			char *buf, uint32_t buflen);
+	int (*qdma_dump_intr_context)(void *dev_hndl,
+			struct qdma_indirect_intr_ctxt *intr_ctx,
+			int ring_index,
+			char *buf, uint32_t buflen);
+	int (*qdma_is_legacy_intr_pend)(void *dev_hndl);
+	int (*qdma_clear_pend_legacy_intr)(void *dev_hndl);
+	int (*qdma_legacy_intr_conf)(void *dev_hndl, enum status_type enable);
+	int (*qdma_initiate_flr)(void *dev_hndl, uint8_t is_vf);
+	int (*qdma_is_flr_done)(void *dev_hndl, uint8_t is_vf, uint8_t *done);
+	int (*qdma_get_error_code)(int acc_err_code);
+	int (*qdma_read_reg_list)(void *dev_hndl, uint8_t is_vf,
+			uint16_t reg_rd_group,
+			uint16_t *total_regs,
+			struct qdma_reg_data *reg_list);
+	int (*qdma_dump_config_reg_list)(void *dev_hndl,
+			uint32_t num_regs,
+			struct qdma_reg_data *reg_list,
+			char *buf, uint32_t buflen);
+	uint32_t mbox_base_pf;
+	uint32_t mbox_base_vf;
+	uint32_t qdma_max_errors;
+};
+
+/*****************************************************************************/
+/**
+ * qdma_hw_access_init() - Function to get the QDMA hardware
+ *			access function pointers
+ *	This function should be called once per device from
+ *	device_open()/probe(). Caller shall allocate memory for
+ *	qdma_hw_access structure and store pointer to it in their
+ *	per device structure. Config BAR validation will be done
+ *	inside this function
+ *
+ * @dev_hndl: device handle
+ * @is_vf: Whether PF or VF
+ * @hw_access: qdma_hw_access structure pointer.
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int qdma_hw_access_init(void *dev_hndl, uint8_t is_vf,
+				struct qdma_hw_access *hw_access);
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_config_regs() - Function to get qdma config registers
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:      Whether PF or VF
+ * @ip_type:	QDMA IP Type
+ * @reg_data:  pointer to register data to be filled
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_get_config_regs(void *dev_hndl, uint8_t is_vf,
+		enum qdma_ip_type ip_type,
+		uint32_t *reg_data);
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_config_regs() - Function to get qdma config register dump in a
+ * buffer
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:      Whether PF or VF
+ * @ip_type:	QDMA IP Type
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_config_regs(void *dev_hndl, uint8_t is_vf,
+		enum qdma_ip_type ip_type,
+		char *buf, uint32_t buflen);
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_reg_info() - Function to get qdma reg info in a buffer
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP Type
+ * @reg_addr:   Register Address
+ * @num_regs:   Number of Registers
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_reg_info(void *dev_hndl,
+		enum qdma_ip_type ip_type, uint32_t reg_addr,
+		uint32_t num_regs, char *buf, uint32_t buflen);
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_queue_context() - Function to dump qdma queue context data in a
+ * buffer where context information is already available in 'ctxt_data'
+ * structure pointer buffer
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP Type
+ * @st:		ST or MM
+ * @q_type:	Queue Type
+ * @ctxt_data:	Context Data
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_dump_queue_context(void *dev_hndl,
+		enum qdma_ip_type ip_type,
+		uint8_t st,
+		enum qdma_dev_q_type q_type,
+		struct qdma_descq_context *ctxt_data,
+		char *buf, uint32_t buflen);
+
+/*****************************************************************************/
+/**
+ * qdma_acc_read_dump_queue_context() - Function to read and dump the queue
+ * context in a buffer
+ *
+ * @dev_hndl:   device handle
+ * @ip_type:	QDMA IP Type
+ * @qid_hw:     queue id
+ * @st:		ST or MM
+ * @q_type:	Queue Type
+ * @buf :       pointer to buffer to be filled
+ * @buflen :    Length of the buffer
+ *
+ * Return:	Length up-till the buffer is filled -success and < 0 - failure
+ *****************************************************************************/
+int qdma_acc_read_dump_queue_context(void *dev_hndl,
+				enum qdma_ip_type ip_type,
+				uint16_t qid_hw,
+				uint8_t st,
+				enum qdma_dev_q_type q_type,
+				char *buf, uint32_t buflen);
+
+
+/*****************************************************************************/
+/**
+ * qdma_acc_dump_config_reg_list() - Dump the registers
+ *
+ * @dev_hndl:		device handle
+ * @ip_type:		QDMA IP Type
+ * @total_regs :	Max registers to read
+ * @reg_list :		array of reg addr and reg values
+ * @buf :		pointer to buffer to be filled
+ * @buflen :		Length of the buffer
+ *
+ * Return: returns the platform specific error code
+ *****************************************************************************/
+int qdma_acc_dump_config_reg_list(void *dev_hndl,
+		enum qdma_ip_type ip_type,
+		uint32_t num_regs,
+		struct qdma_reg_data *reg_list,
+		char *buf, uint32_t buflen);
+
+/*****************************************************************************/
+/**
+ * qdma_get_error_code() - function to get the qdma access mapped
+ *				error code
+ *
+ * @acc_err_code: qdma access error code
+ *
+ * Return:   returns the platform specific error code
+ *****************************************************************************/
+int qdma_get_error_code(int acc_err_code);
+
+/*****************************************************************************/
+/**
+ * qdma_fetch_version_details() - Function to fetch the version details from the
+ *  version register value
+ *
+ * @is_vf           :    Whether PF or VF
+ * @version_reg_val :    Value of the version register
+ * @version_info :       Pointer to store the version details.
+ *
+ * Return:	Nothing
+ *****************************************************************************/
+void qdma_fetch_version_details(uint8_t is_vf, uint32_t version_reg_val,
+		struct qdma_hw_version_info *version_info);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* QDMA_ACCESS_COMMON_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_access_errors.h b/drivers/net/qdma/qdma_access/qdma_access_errors.h
new file mode 100644
index 0000000000..a103c3a7fb
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_access_errors.h
@@ -0,0 +1,60 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_ACCESS_ERRORS_H_
+#define __QDMA_ACCESS_ERRORS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * DOC: QDMA common library error codes definitions
+ *
+ * Header file *qdma_access_errors.h* defines error codes for common library
+ */
+
+struct err_code_map {
+	int acc_err_code;
+	int err_code;
+};
+
+#define QDMA_HW_ERR_NOT_DETECTED		1
+
+enum qdma_access_error_codes {
+	QDMA_SUCCESS = 0,
+	QDMA_ERR_INV_PARAM,
+	QDMA_ERR_NO_MEM,
+	QDMA_ERR_HWACC_BUSY_TIMEOUT,
+	QDMA_ERR_HWACC_INV_CONFIG_BAR,
+	QDMA_ERR_HWACC_NO_PEND_LEGCY_INTR,
+	QDMA_ERR_HWACC_BAR_NOT_FOUND,
+	QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED,   /* 7 */
+
+	QDMA_ERR_RM_RES_EXISTS,				/* 8 */
+	QDMA_ERR_RM_RES_NOT_EXISTS,
+	QDMA_ERR_RM_DEV_EXISTS,
+	QDMA_ERR_RM_DEV_NOT_EXISTS,
+	QDMA_ERR_RM_NO_QUEUES_LEFT,
+	QDMA_ERR_RM_QMAX_CONF_REJECTED,		/* 13 */
+
+	QDMA_ERR_MBOX_FMAP_WR_FAILED,		/* 14 */
+	QDMA_ERR_MBOX_NUM_QUEUES,
+	QDMA_ERR_MBOX_INV_QID,
+	QDMA_ERR_MBOX_INV_RINGSZ,
+	QDMA_ERR_MBOX_INV_BUFSZ,
+	QDMA_ERR_MBOX_INV_CNTR_TH,
+	QDMA_ERR_MBOX_INV_TMR_TH,
+	QDMA_ERR_MBOX_INV_MSG,
+	QDMA_ERR_MBOX_SEND_BUSY,
+	QDMA_ERR_MBOX_NO_MSG_IN,
+	QDMA_ERR_MBOX_REG_READ_FAILED,
+	QDMA_ERR_MBOX_ALL_ZERO_MSG,			/* 25 */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __QDMA_ACCESS_ERRORS_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_access_export.h b/drivers/net/qdma/qdma_access/qdma_access_export.h
new file mode 100644
index 0000000000..37eaa4cd5e
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_access_export.h
@@ -0,0 +1,243 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_ACCESS_EXPORT_H_
+#define __QDMA_ACCESS_EXPORT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "qdma_platform_env.h"
+
+/** QDMA Global CSR array size */
+#define QDMA_GLOBAL_CSR_ARRAY_SZ        16
+
+/**
+ * struct qdma_dev_attributes - QDMA device attributes
+ */
+struct qdma_dev_attributes {
+	/** @num_pfs - Num of PFs*/
+	uint8_t num_pfs;
+	/** @num_qs - Num of Queues */
+	uint16_t num_qs;
+	/** @flr_present - FLR resent or not? */
+	uint8_t flr_present:1;
+	/** @st_en - ST mode supported or not? */
+	uint8_t st_en:1;
+	/** @mm_en - MM mode supported or not? */
+	uint8_t mm_en:1;
+	/** @mm_cmpt_en - MM with Completions supported or not? */
+	uint8_t mm_cmpt_en:1;
+	/** @mailbox_en - Mailbox supported or not? */
+	uint8_t mailbox_en:1;
+	/** @debug_mode - Debug mode is enabled/disabled for IP */
+	uint8_t debug_mode:1;
+	/** @desc_eng_mode - Descriptor Engine mode:
+	 * Internal only/Bypass only/Internal & Bypass
+	 */
+	uint8_t desc_eng_mode:2;
+	/** @mm_channel_max - Num of MM channels */
+	uint8_t mm_channel_max;
+
+	/** Below are the list of HW features which are populated by qdma_access
+	 * based on RTL version
+	 */
+	/** @qid2vec_ctx - To indicate support of qid2vec context */
+	uint8_t qid2vec_ctx:1;
+	/** @cmpt_ovf_chk_dis - To indicate support of overflow check
+	 * disable in CMPT ring
+	 */
+	uint8_t cmpt_ovf_chk_dis:1;
+	/** @mailbox_intr - To indicate support of mailbox interrupt */
+	uint8_t mailbox_intr:1;
+	/** @sw_desc_64b - To indicate support of 64 bytes C2H/H2C
+	 * descriptor format
+	 */
+	uint8_t sw_desc_64b:1;
+	/** @cmpt_desc_64b - To indicate support of 64 bytes CMPT
+	 * descriptor format
+	 */
+	uint8_t cmpt_desc_64b:1;
+	/** @dynamic_bar - To indicate support of dynamic bar detection */
+	uint8_t dynamic_bar:1;
+	/** @legacy_intr - To indicate support of legacy interrupt */
+	uint8_t legacy_intr:1;
+	/** @cmpt_trig_count_timer - To indicate support of counter + timer
+	 * trigger mode
+	 */
+	uint8_t cmpt_trig_count_timer:1;
+};
+
+/** qdma_dev_attributes structure size */
+#define QDMA_DEV_ATTR_STRUCT_SIZE	(sizeof(struct qdma_dev_attributes))
+
+/** global_csr_conf structure size */
+#define QDMA_DEV_GLOBAL_CSR_STRUCT_SIZE	(sizeof(struct global_csr_conf))
+
+/**
+ * enum qdma_dev_type - To hold qdma device type
+ */
+enum qdma_dev_type {
+	QDMA_DEV_PF,
+	QDMA_DEV_VF
+};
+
+/**
+ * enum qdma_dev_q_type: Q type
+ */
+enum qdma_dev_q_type {
+	/** @QDMA_DEV_Q_TYPE_H2C: H2C Q */
+	QDMA_DEV_Q_TYPE_H2C,
+	/** @QDMA_DEV_Q_TYPE_C2H: C2H Q */
+	QDMA_DEV_Q_TYPE_C2H,
+	/** @QDMA_DEV_Q_TYPE_CMPT: CMPT Q */
+	QDMA_DEV_Q_TYPE_CMPT,
+	/** @QDMA_DEV_Q_TYPE_MAX: Total Q types */
+	QDMA_DEV_Q_TYPE_MAX
+};
+
+/**
+ * @enum qdma_desc_size - QDMA queue descriptor size
+ */
+enum qdma_desc_size {
+	/** @QDMA_DESC_SIZE_8B - 8 byte descriptor */
+	QDMA_DESC_SIZE_8B,
+	/** @QDMA_DESC_SIZE_16B - 16 byte descriptor */
+	QDMA_DESC_SIZE_16B,
+	/** @QDMA_DESC_SIZE_32B - 32 byte descriptor */
+	QDMA_DESC_SIZE_32B,
+	/** @QDMA_DESC_SIZE_64B - 64 byte descriptor */
+	QDMA_DESC_SIZE_64B
+};
+
+/**
+ * @enum qdma_cmpt_update_trig_mode - Interrupt and Completion status write
+ * trigger mode
+ */
+enum qdma_cmpt_update_trig_mode {
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_DIS - disabled */
+	QDMA_CMPT_UPDATE_TRIG_MODE_DIS,
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_EVERY - every */
+	QDMA_CMPT_UPDATE_TRIG_MODE_EVERY,
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_USR_CNT - user counter */
+	QDMA_CMPT_UPDATE_TRIG_MODE_USR_CNT,
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_USR - user */
+	QDMA_CMPT_UPDATE_TRIG_MODE_USR,
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_USR_TMR - user timer */
+	QDMA_CMPT_UPDATE_TRIG_MODE_USR_TMR,
+	/** @QDMA_CMPT_UPDATE_TRIG_MODE_TMR_CNTR - timer + counter combo */
+	QDMA_CMPT_UPDATE_TRIG_MODE_TMR_CNTR
+};
+
+
+/**
+ * @enum qdma_indirect_intr_ring_size - Indirect interrupt ring size
+ */
+enum qdma_indirect_intr_ring_size {
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_4KB - Accommodates 512 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_4KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_8KB - Accommodates 1024 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_8KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_12KB - Accommodates 1536 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_12KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_16KB - Accommodates 2048 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_16KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_20KB - Accommodates 2560 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_20KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_24KB - Accommodates 3072 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_24KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_28KB - Accommodates 3584 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_28KB,
+	/** @QDMA_INDIRECT_INTR_RING_SIZE_32KB - Accommodates 4096 entries */
+	QDMA_INDIRECT_INTR_RING_SIZE_32KB
+};
+
+/**
+ * @enum qdma_wrb_interval - writeback update interval
+ */
+enum qdma_wrb_interval {
+	/** @QDMA_WRB_INTERVAL_4 - writeback update interval of 4 */
+	QDMA_WRB_INTERVAL_4,
+	/** @QDMA_WRB_INTERVAL_8 - writeback update interval of 8 */
+	QDMA_WRB_INTERVAL_8,
+	/** @QDMA_WRB_INTERVAL_16 - writeback update interval of 16 */
+	QDMA_WRB_INTERVAL_16,
+	/** @QDMA_WRB_INTERVAL_32 - writeback update interval of 32 */
+	QDMA_WRB_INTERVAL_32,
+	/** @QDMA_WRB_INTERVAL_64 - writeback update interval of 64 */
+	QDMA_WRB_INTERVAL_64,
+	/** @QDMA_WRB_INTERVAL_128 - writeback update interval of 128 */
+	QDMA_WRB_INTERVAL_128,
+	/** @QDMA_WRB_INTERVAL_256 - writeback update interval of 256 */
+	QDMA_WRB_INTERVAL_256,
+	/** @QDMA_WRB_INTERVAL_512 - writeback update interval of 512 */
+	QDMA_WRB_INTERVAL_512,
+	/** @QDMA_NUM_WRB_INTERVALS - total number of writeback intervals */
+	QDMA_NUM_WRB_INTERVALS
+};
+
+enum qdma_rtl_version {
+	/** @QDMA_RTL_BASE - RTL Base  */
+	QDMA_RTL_BASE,
+	/** @QDMA_RTL_PATCH - RTL Patch  */
+	QDMA_RTL_PATCH,
+	/** @QDMA_RTL_NONE - Not a valid RTL version */
+	QDMA_RTL_NONE,
+};
+
+enum qdma_vivado_release_id {
+	/** @QDMA_VIVADO_2018_3 - Vivado version 2018.3  */
+	QDMA_VIVADO_2018_3,
+	/** @QDMA_VIVADO_2019_1 - Vivado version 2019.1  */
+	QDMA_VIVADO_2019_1,
+	/** @QDMA_VIVADO_2019_2 - Vivado version 2019.2  */
+	QDMA_VIVADO_2019_2,
+	/** @QDMA_VIVADO_2020_1 - Vivado version 2020.1  */
+	QDMA_VIVADO_2020_1,
+	/** @QDMA_VIVADO_2020_2 - Vivado version 2020.2  */
+	QDMA_VIVADO_2020_2,
+	/** @QDMA_VIVADO_NONE - Not a valid Vivado version*/
+	QDMA_VIVADO_NONE
+};
+
+enum qdma_ip_type {
+	/** @QDMA_VERSAL_HARD_IP - Hard IP  */
+	QDMA_VERSAL_HARD_IP,
+	/** @QDMA_VERSAL_SOFT_IP - Soft IP  */
+	QDMA_VERSAL_SOFT_IP,
+	/** @QDMA_SOFT_IP - Hard IP  */
+	QDMA_SOFT_IP,
+	/** @EQDMA_SOFT_IP - Soft IP  */
+	EQDMA_SOFT_IP,
+	/** @QDMA_VERSAL_NONE - Not versal device  */
+	QDMA_NONE_IP
+};
+
+
+enum qdma_device_type {
+	/** @QDMA_DEVICE_SOFT - UltraScale+ IP's  */
+	QDMA_DEVICE_SOFT,
+	/** @QDMA_DEVICE_VERSAL -VERSAL IP  */
+	QDMA_DEVICE_VERSAL,
+	/** @QDMA_DEVICE_NONE - Not a valid device  */
+	QDMA_DEVICE_NONE
+};
+
+enum qdma_desc_eng_mode {
+	/** @QDMA_DESC_ENG_INTERNAL_BYPASS - Internal and Bypass mode */
+	QDMA_DESC_ENG_INTERNAL_BYPASS,
+	/** @QDMA_DESC_ENG_BYPASS_ONLY - Only Bypass mode  */
+	QDMA_DESC_ENG_BYPASS_ONLY,
+	/** @QDMA_DESC_ENG_INTERNAL_ONLY - Only Internal mode  */
+	QDMA_DESC_ENG_INTERNAL_ONLY,
+	/** @QDMA_DESC_ENG_MODE_MAX - Max of desc engine modes  */
+	QDMA_DESC_ENG_MODE_MAX
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __QDMA_ACCESS_EXPORT_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_access_version.h b/drivers/net/qdma/qdma_access/qdma_access_version.h
new file mode 100644
index 0000000000..d016a2a980
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_access_version.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_ACCESS_VERSION_H_
+#define __QDMA_ACCESS_VERSION_H_
+
+
+#define QDMA_VERSION_MAJOR	2020
+#define QDMA_VERSION_MINOR	2
+#define QDMA_VERSION_PATCH	0
+
+#define QDMA_VERSION_STR	\
+	__stringify(QDMA_VERSION_MAJOR) "." \
+	__stringify(QDMA_VERSION_MINOR) "." \
+	__stringify(QDMA_VERSION_PATCH)
+
+#define QDMA_VERSION  \
+	((QDMA_VERSION_MAJOR) * 1000 + \
+	 (QDMA_VERSION_MINOR) * 100 + \
+	  QDMA_VERSION_PATCH)
+
+
+#endif /* __QDMA_ACCESS_VERSION_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_list.c b/drivers/net/qdma/qdma_access/qdma_list.c
new file mode 100644
index 0000000000..f53fce20cb
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_list.c
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "qdma_list.h"
+
+void qdma_list_init_head(struct qdma_list_head *head)
+{
+	if (head) {
+		head->prev = head;
+		head->next = head;
+	}
+}
+
+void qdma_list_add_tail(struct qdma_list_head *node,
+			  struct qdma_list_head *head)
+{
+	head->prev->next = node;
+	node->next = head;
+	node->prev = head->prev;
+	head->prev = node;
+}
+
+void qdma_list_insert_before(struct qdma_list_head *new_node,
+				    struct qdma_list_head *node)
+{
+	node->prev->next = new_node;
+	new_node->prev = node->prev;
+	new_node->next = node;
+	node->prev = new_node;
+}
+
+void qdma_list_insert_after(struct qdma_list_head *new_node,
+				   struct qdma_list_head *node)
+{
+	new_node->prev = node;
+	new_node->next = node->next;
+	node->next->prev = new_node;
+	node->next = new_node;
+}
+
+
+void qdma_list_del(struct qdma_list_head *node)
+{
+	if (node) {
+		if (node->prev)
+			node->prev->next = node->next;
+		if (node->next)
+			node->next->prev = node->prev;
+	}
+}
diff --git a/drivers/net/qdma/qdma_access/qdma_list.h b/drivers/net/qdma/qdma_access/qdma_list.h
new file mode 100644
index 0000000000..0f2789b6b1
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_list.h
@@ -0,0 +1,109 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_LIST_H_
+#define __QDMA_LIST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * DOC: QDMA common library provided list implementation definitions
+ *
+ * Header file *qdma_list.h* defines APIs for creating and managing list.
+ */
+
+/**
+ * struct qdma_list_head - data type for creating a list node
+ */
+struct qdma_list_head {
+	struct qdma_list_head *prev;
+	struct qdma_list_head *next;
+	void *priv;
+};
+
+#define QDMA_LIST_GET_DATA(node) ((node)->priv)
+#define QDMA_LIST_SET_DATA(node, data) ((node)->priv = data)
+
+#define qdma_list_for_each_safe(pos, n, head) \
+	for (pos = (head)->next, n = pos->next; pos != (head); \
+		pos = n, n = pos->next)
+
+#define qdma_list_is_last_entry(entry, head) ((entry)->next == (head))
+
+static inline int qdma_list_is_empty(struct qdma_list_head *head)
+{
+	return (head->next == head);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_list_init_head(): Init the list head
+ *
+ * @head:     head of the list
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_list_init_head(struct qdma_list_head *head);
+
+/*****************************************************************************/
+/**
+ * qdma_list_add_tail(): add the given @node at the end of the list with @head
+ *
+ * @node:     new entry which has to be added at the end of the list with @head
+ * @head:     head of the list
+ *
+ * This API needs to be called with holding the lock to the list
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_list_add_tail(struct qdma_list_head *node,
+			  struct qdma_list_head *head);
+
+/*****************************************************************************/
+/**
+ * qdma_list_insert_before(): add the given @node at the before a @node
+ *
+ * @new_node:     new entry which has to be added before @node
+ * @node:         reference node in the list
+ *
+ * This API needs to be called with holding the lock to the list
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_list_insert_before(struct qdma_list_head *new_node,
+				    struct qdma_list_head *node);
+
+/*****************************************************************************/
+/**
+ * qdma_list_insert_after(): add the given @node at the after a @node
+ *
+ * @new_node:     new entry which has to be added after @node
+ * @node:         reference node in the list
+ *
+ * This API needs to be called with holding the lock to the list
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_list_insert_after(struct qdma_list_head *new_node,
+				   struct qdma_list_head *node);
+
+/*****************************************************************************/
+/**
+ * qdma_list_del(): delete an node from the list
+ *
+ * @node:     node in a list
+ *
+ * This API needs to be called with holding the lock to the list
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_list_del(struct qdma_list_head *node);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __QDMA_LIST_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_mbox_protocol.c b/drivers/net/qdma/qdma_access/qdma_mbox_protocol.c
new file mode 100644
index 0000000000..fb797ca380
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_mbox_protocol.c
@@ -0,0 +1,2107 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "qdma_mbox_protocol.h"
+
+/** mailbox function status */
+#define MBOX_FN_STATUS			0x0
+/** shift value for mailbox function status in msg */
+#define		S_MBOX_FN_STATUS_IN_MSG	0
+/** mask value for mailbox function status in msg*/
+#define		M_MBOX_FN_STATUS_IN_MSG	0x1
+/** face value for mailbox function status in msg */
+#define		F_MBOX_FN_STATUS_IN_MSG	0x1
+
+/** shift value for out msg */
+#define		S_MBOX_FN_STATUS_OUT_MSG	1
+/** mask value for out msg */
+#define		M_MBOX_FN_STATUS_OUT_MSG	0x1
+/** face value for out msg */
+#define		F_MBOX_FN_STATUS_OUT_MSG	(1 << S_MBOX_FN_STATUS_OUT_MSG)
+/** shift value for status ack */
+#define		S_MBOX_FN_STATUS_ACK	2	/* PF only, ack status */
+/** mask value for status ack */
+#define		M_MBOX_FN_STATUS_ACK	0x1
+/** face value for status ack */
+#define		F_MBOX_FN_STATUS_ACK	(1 << S_MBOX_FN_STATUS_ACK)
+/** shift value for status src */
+#define		S_MBOX_FN_STATUS_SRC	4	/* PF only, source func.*/
+/** mask value for status src */
+#define		M_MBOX_FN_STATUS_SRC	0xFFF
+/** face value for status src */
+#define		G_MBOX_FN_STATUS_SRC(x)	\
+		(((x) >> S_MBOX_FN_STATUS_SRC) & M_MBOX_FN_STATUS_SRC)
+/** face value for mailbox function status */
+#define MBOX_FN_STATUS_MASK \
+		(F_MBOX_FN_STATUS_IN_MSG | \
+		 F_MBOX_FN_STATUS_OUT_MSG | \
+		 F_MBOX_FN_STATUS_ACK)
+
+/** mailbox function commands register */
+#define MBOX_FN_CMD			0x4
+/** shift value for send command */
+#define		S_MBOX_FN_CMD_SND	0
+/** mask value for send command */
+#define		M_MBOX_FN_CMD_SND	0x1
+/** face value for send command */
+#define		F_MBOX_FN_CMD_SND	(1 << S_MBOX_FN_CMD_SND)
+/** shift value for receive command */
+#define		S_MBOX_FN_CMD_RCV	1
+/** mask value for receive command */
+#define		M_MBOX_FN_CMD_RCV	0x1
+/** face value for receive command */
+#define		F_MBOX_FN_CMD_RCV	(1 << S_MBOX_FN_CMD_RCV)
+/** shift value for vf reset */
+#define		S_MBOX_FN_CMD_VF_RESET	3	/* TBD PF only: reset VF */
+/** mask value for vf reset */
+#define		M_MBOX_FN_CMD_VF_RESET	0x1
+/** mailbox isr vector register */
+#define MBOX_ISR_VEC			0x8
+/** shift value for isr vector */
+#define		S_MBOX_ISR_VEC		0
+/** mask value for isr vector */
+#define		M_MBOX_ISR_VEC		0x1F
+/** face value for isr vector */
+#define		V_MBOX_ISR_VEC(x)	((x) & M_MBOX_ISR_VEC)
+/** mailbox FN target register */
+#define MBOX_FN_TARGET			0xC
+/** shift value for FN target id */
+#define		S_MBOX_FN_TARGET_ID	0
+/** mask value for FN target id */
+#define		M_MBOX_FN_TARGET_ID	0xFFF
+/** face value for FN target id */
+#define		V_MBOX_FN_TARGET_ID(x)	((x) & M_MBOX_FN_TARGET_ID)
+/** mailbox isr enable register */
+#define MBOX_ISR_EN			0x10
+/** shift value for isr enable */
+#define		S_MBOX_ISR_EN		0
+/** mask value for isr enable */
+#define		M_MBOX_ISR_EN		0x1
+/** face value for isr enable */
+#define		F_MBOX_ISR_EN		0x1
+/** pf acknowledge base */
+#define MBOX_PF_ACK_BASE		0x20
+/** pf acknowledge step */
+#define MBOX_PF_ACK_STEP		4
+/** pf acknowledge count */
+#define MBOX_PF_ACK_COUNT		8
+/** mailbox incoming msg base */
+#define MBOX_IN_MSG_BASE		0x800
+/** mailbox outgoing msg base */
+#define MBOX_OUT_MSG_BASE		0xc00
+/** mailbox msg step */
+#define MBOX_MSG_STEP			4
+/** mailbox register max */
+#define MBOX_MSG_REG_MAX		32
+
+/**
+ * enum mbox_msg_op - mailbox messages opcode
+ */
+#define MBOX_MSG_OP_RSP_OFFSET	0x80
+enum mbox_msg_op {
+	/** @MBOX_OP_BYE: vf offline, response not required*/
+	MBOX_OP_VF_BYE,
+	/** @MBOX_OP_HELLO: vf online */
+	MBOX_OP_HELLO,
+	/** @: FMAP programming request */
+	MBOX_OP_FMAP,
+	/** @MBOX_OP_CSR: global CSR registers request */
+	MBOX_OP_CSR,
+	/** @MBOX_OP_QREQ: request queues */
+	MBOX_OP_QREQ,
+	/** @MBOX_OP_QADD: notify of queue addition */
+	MBOX_OP_QNOTIFY_ADD,
+	/** @MBOX_OP_QNOTIFY_DEL: notify of queue deletion */
+	MBOX_OP_QNOTIFY_DEL,
+	/** @MBOX_OP_QACTIVE_CNT: get active q count */
+	MBOX_OP_GET_QACTIVE_CNT,
+	/** @MBOX_OP_QCTXT_WRT: queue context write */
+	MBOX_OP_QCTXT_WRT,
+	/** @MBOX_OP_QCTXT_RD: queue context read */
+	MBOX_OP_QCTXT_RD,
+	/** @MBOX_OP_QCTXT_CLR: queue context clear */
+	MBOX_OP_QCTXT_CLR,
+	/** @MBOX_OP_QCTXT_INV: queue context invalidate */
+	MBOX_OP_QCTXT_INV,
+	/** @MBOX_OP_INTR_CTXT_WRT: interrupt context write */
+	MBOX_OP_INTR_CTXT_WRT,
+	/** @MBOX_OP_INTR_CTXT_RD: interrupt context read */
+	MBOX_OP_INTR_CTXT_RD,
+	/** @MBOX_OP_INTR_CTXT_CLR: interrupt context clear */
+	MBOX_OP_INTR_CTXT_CLR,
+	/** @MBOX_OP_INTR_CTXT_INV: interrupt context invalidate */
+	MBOX_OP_INTR_CTXT_INV,
+	/** @MBOX_OP_RESET_PREPARE: PF to VF message for VF reset*/
+	MBOX_OP_RESET_PREPARE,
+	/** @MBOX_OP_RESET_DONE: PF reset done */
+	MBOX_OP_RESET_DONE,
+	/** @MBOX_OP_REG_LIST_READ: Read the register list */
+	MBOX_OP_REG_LIST_READ,
+	/** @MBOX_OP_PF_BYE: pf offline, response required */
+	MBOX_OP_PF_BYE,
+	/** @MBOX_OP_PF_RESET_VF_BYE: VF reset BYE, response required*/
+	MBOX_OP_PF_RESET_VF_BYE,
+
+	/** @MBOX_OP_HELLO_RESP: response to @MBOX_OP_HELLO */
+	MBOX_OP_HELLO_RESP = 0x81,
+	/** @MBOX_OP_FMAP_RESP: response to @MBOX_OP_FMAP */
+	MBOX_OP_FMAP_RESP,
+	/** @MBOX_OP_CSR_RESP: response to @MBOX_OP_CSR */
+	MBOX_OP_CSR_RESP,
+	/** @MBOX_OP_QREQ_RESP: response to @MBOX_OP_QREQ */
+	MBOX_OP_QREQ_RESP,
+	/** @MBOX_OP_QADD: notify of queue addition */
+	MBOX_OP_QNOTIFY_ADD_RESP,
+	/** @MBOX_OP_QNOTIFY_DEL: notify of queue deletion */
+	MBOX_OP_QNOTIFY_DEL_RESP,
+	/** @MBOX_OP_QACTIVE_CNT_RESP: get active q count */
+	MBOX_OP_GET_QACTIVE_CNT_RESP,
+	/** @MBOX_OP_QCTXT_WRT_RESP: response to @MBOX_OP_QCTXT_WRT */
+	MBOX_OP_QCTXT_WRT_RESP,
+	/** @MBOX_OP_QCTXT_RD_RESP: response to @MBOX_OP_QCTXT_RD */
+	MBOX_OP_QCTXT_RD_RESP,
+	/** @MBOX_OP_QCTXT_CLR_RESP: response to @MBOX_OP_QCTXT_CLR */
+	MBOX_OP_QCTXT_CLR_RESP,
+	/** @MBOX_OP_QCTXT_INV_RESP: response to @MBOX_OP_QCTXT_INV */
+	MBOX_OP_QCTXT_INV_RESP,
+	/** @MBOX_OP_INTR_CTXT_WRT_RESP: response to @MBOX_OP_INTR_CTXT_WRT */
+	MBOX_OP_INTR_CTXT_WRT_RESP,
+	/** @MBOX_OP_INTR_CTXT_RD_RESP: response to @MBOX_OP_INTR_CTXT_RD */
+	MBOX_OP_INTR_CTXT_RD_RESP,
+	/** @MBOX_OP_INTR_CTXT_CLR_RESP: response to @MBOX_OP_INTR_CTXT_CLR */
+	MBOX_OP_INTR_CTXT_CLR_RESP,
+	/** @MBOX_OP_INTR_CTXT_INV_RESP: response to @MBOX_OP_INTR_CTXT_INV */
+	MBOX_OP_INTR_CTXT_INV_RESP,
+	/** @MBOX_OP_RESET_PREPARE_RESP: response to @MBOX_OP_RESET_PREPARE */
+	MBOX_OP_RESET_PREPARE_RESP,
+	/** @MBOX_OP_RESET_DONE_RESP: response to @MBOX_OP_PF_VF_RESET */
+	MBOX_OP_RESET_DONE_RESP,
+	/** @MBOX_OP_REG_LIST_READ_RESP: response to @MBOX_OP_REG_LIST_READ */
+	MBOX_OP_REG_LIST_READ_RESP,
+	/** @MBOX_OP_PF_BYE_RESP: response to @MBOX_OP_PF_BYE */
+	MBOX_OP_PF_BYE_RESP,
+	/** @MBOX_OP_PF_RESET_VF_BYE_RESP:
+	 * response to @MBOX_OP_PF_RESET_VF_BYE
+	 */
+	MBOX_OP_PF_RESET_VF_BYE_RESP,
+	/** @MBOX_OP_MAX: total mbox opcodes*/
+	MBOX_OP_MAX
+};
+
+/**
+ * struct mbox_msg_hdr - mailbox message header
+ */
+struct mbox_msg_hdr {
+	/** @op: opcode */
+	uint8_t op;
+	/** @status: execution status */
+	char status;
+	/** @src_func_id: src function */
+	uint16_t src_func_id;
+	/** @dst_func_id: dst function */
+	uint16_t dst_func_id;
+};
+
+/**
+ * struct mbox_msg_fmap - FMAP programming command
+ */
+struct mbox_msg_hello {
+	/** @hdr: mailbox message header */
+	struct mbox_msg_hdr hdr;
+	/** @qbase: start queue number in the queue range */
+	uint32_t qbase;
+	/** @qmax: max queue number in the queue range(0-2k) */
+	uint32_t qmax;
+	/** @dev_cap: device capability */
+	struct qdma_dev_attributes dev_cap;
+	/** @dma_device_index: dma_device_index */
+	uint32_t dma_device_index;
+};
+
+/**
+ * struct mbox_msg_active_qcnt - get active queue count command
+ */
+struct mbox_msg_active_qcnt {
+	/** @hdr: mailbox message header */
+	struct mbox_msg_hdr hdr;
+	/** @h2c_queues: number of h2c queues */
+	uint32_t h2c_queues;
+	/** @c2h_queues: number of c2h queues */
+	uint32_t c2h_queues;
+	/** @cmpt_queues: number of cmpt queues */
+	uint32_t cmpt_queues;
+};
+
+/**
+ * struct mbox_msg_fmap - FMAP programming command
+ */
+struct mbox_msg_fmap {
+	/** @hdr: mailbox message header */
+	struct mbox_msg_hdr hdr;
+	/** @qbase: start queue number in the queue range */
+	int qbase;
+	/** @qmax: max queue number in the queue range(0-2k) */
+	uint32_t qmax;
+};
+
+/**
+ * struct mbox_msg_csr - mailbox csr reading message
+ */
+struct mbox_msg_csr {
+	/** @hdr - mailbox message header */
+	struct mbox_msg_hdr hdr;
+	/** @csr_info: csr info data strucutre */
+	struct qdma_csr_info csr_info;
+};
+
+/**
+ * struct mbox_msg_q_nitfy - queue add/del notify message
+ */
+struct mbox_msg_q_nitfy {
+	/** @hdr - mailbox message header */
+	struct mbox_msg_hdr hdr;
+	/** @qid_hw: queue ID */
+	uint16_t qid_hw;
+	/** @q_type: type of q */
+	enum qdma_dev_q_type q_type;
+};
+
+/**
+ * @struct - mbox_msg_qctxt
+ * @brief queue context mailbox message header
+ */
+struct mbox_msg_qctxt {
+	/** @hdr: mailbox message header*/
+	struct mbox_msg_hdr hdr;
+	/** @qid_hw: queue ID */
+	uint16_t qid_hw;
+	/** @st: streaming mode */
+	uint8_t st:1;
+	/** @c2h: c2h direction */
+	uint8_t c2h:1;
+	/** @cmpt_ctxt_type: completion context type */
+	enum mbox_cmpt_ctxt_type cmpt_ctxt_type:2;
+	/** @rsvd: reserved */
+	uint8_t rsvd:4;
+	/** union compiled_message - complete hw configuration */
+	union {
+		/** @descq_conf: mailbox message for queue context write*/
+		struct mbox_descq_conf descq_conf;
+		/** @descq_ctxt: mailbox message for queue context read*/
+		struct qdma_descq_context descq_ctxt;
+	};
+};
+
+/**
+ * @struct - mbox_intr_ctxt
+ * @brief queue context mailbox message header
+ */
+struct mbox_intr_ctxt {
+	/** @hdr: mailbox message header*/
+	struct mbox_msg_hdr hdr;
+	/** interrupt context mailbox message */
+	struct mbox_msg_intr_ctxt ctxt;
+};
+
+/**
+ * @struct - mbox_read_reg_list
+ * @brief read register mailbox message header
+ */
+struct mbox_read_reg_list {
+	/** @hdr: mailbox message header*/
+	struct mbox_msg_hdr hdr;
+	/** @group_num: reg group to read */
+	uint16_t group_num;
+	/** @num_regs: number of registers to read */
+	uint16_t num_regs;
+	/** @reg_list: register list */
+	struct qdma_reg_data reg_list[QDMA_MAX_REGISTER_DUMP];
+};
+
+union qdma_mbox_txrx {
+		/** mailbox message header*/
+		struct mbox_msg_hdr hdr;
+		/** hello mailbox message */
+		struct mbox_msg_hello hello;
+		/** fmap mailbox message */
+		struct mbox_msg_fmap fmap;
+		/** interrupt context mailbox message */
+		struct mbox_intr_ctxt intr_ctxt;
+		/** queue context mailbox message*/
+		struct mbox_msg_qctxt qctxt;
+		/** global csr mailbox message */
+		struct mbox_msg_csr csr;
+		/** acive q count */
+		struct mbox_msg_active_qcnt qcnt;
+		/** q add/del notify message */
+		struct mbox_msg_q_nitfy q_notify;
+		/** reg list mailbox message */
+		struct mbox_read_reg_list reg_read_list;
+		/** buffer to hold raw data between pf and vf */
+		uint32_t raw[MBOX_MSG_REG_MAX];
+};
+
+
+static inline uint32_t get_mbox_offset(void *dev_hndl, uint8_t is_vf)
+{
+	uint32_t mbox_base;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+	mbox_base = (is_vf) ?
+		hw->mbox_base_vf : hw->mbox_base_pf;
+
+	return mbox_base;
+}
+
+static inline void mbox_pf_hw_clear_func_ack(void *dev_hndl, uint16_t func_id)
+{
+	int idx = func_id / 32; /* bitmask, uint32_t reg */
+	int bit = func_id % 32;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, 0);
+
+	/* clear the function's ack status */
+	qdma_reg_write(dev_hndl,
+			mbox_base + MBOX_PF_ACK_BASE + idx * MBOX_PF_ACK_STEP,
+			(1 << bit));
+}
+
+static void qdma_mbox_memcpy(void *to, void *from, uint8_t size)
+{
+	uint8_t i;
+	uint8_t *_to = (uint8_t *)to;
+	uint8_t *_from = (uint8_t *)from;
+
+	for (i = 0; i < size; i++)
+		_to[i] = _from[i];
+}
+
+static void qdma_mbox_memset(void *to, uint8_t val, uint8_t size)
+{
+	uint8_t i;
+	uint8_t *_to = (uint8_t *)to;
+
+	for (i = 0; i < size; i++)
+		_to[i] = val;
+}
+
+static int get_ring_idx(void *dev_hndl, uint16_t ring_sz, uint16_t *rng_idx)
+{
+	uint32_t rng_sz[QDMA_GLOBAL_CSR_ARRAY_SZ] = { 0 };
+	int i, rv;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+	rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+			QDMA_GLOBAL_CSR_ARRAY_SZ, rng_sz,
+			QDMA_CSR_RING_SZ, QDMA_HW_ACCESS_READ);
+
+	if (rv)
+		return rv;
+	for (i = 0; i < QDMA_GLOBAL_CSR_ARRAY_SZ; i++) {
+		if (ring_sz == (rng_sz[i] - 1)) {
+			*rng_idx = i;
+			return QDMA_SUCCESS;
+		}
+	}
+
+	qdma_log_error("%s: Ring size not found, err:%d\n",
+				   __func__, -QDMA_ERR_MBOX_INV_RINGSZ);
+	return -QDMA_ERR_MBOX_INV_RINGSZ;
+}
+
+static int get_buf_idx(void *dev_hndl,  uint16_t buf_sz, uint16_t *buf_idx)
+{
+	uint32_t c2h_buf_sz[QDMA_GLOBAL_CSR_ARRAY_SZ] = { 0 };
+	int i, rv;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+			QDMA_GLOBAL_CSR_ARRAY_SZ, c2h_buf_sz,
+			QDMA_CSR_BUF_SZ, QDMA_HW_ACCESS_READ);
+	if (rv)
+		return rv;
+	for (i = 0; i < QDMA_GLOBAL_CSR_ARRAY_SZ; i++) {
+		if (c2h_buf_sz[i] == buf_sz) {
+			*buf_idx = i;
+			return QDMA_SUCCESS;
+		}
+	}
+
+	qdma_log_error("%s: Buf index not found, err:%d\n",
+				   __func__, -QDMA_ERR_MBOX_INV_BUFSZ);
+	return -QDMA_ERR_MBOX_INV_BUFSZ;
+}
+
+static int get_cntr_idx(void *dev_hndl, uint8_t cntr_val, uint8_t *cntr_idx)
+{
+	uint32_t cntr_th[QDMA_GLOBAL_CSR_ARRAY_SZ] = { 0 };
+	int i, rv;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+			QDMA_GLOBAL_CSR_ARRAY_SZ, cntr_th,
+			QDMA_CSR_CNT_TH, QDMA_HW_ACCESS_READ);
+
+	if (rv)
+		return rv;
+	for (i = 0; i < QDMA_GLOBAL_CSR_ARRAY_SZ; i++) {
+		if (cntr_th[i] == cntr_val) {
+			*cntr_idx = i;
+			return QDMA_SUCCESS;
+		}
+	}
+
+	qdma_log_error("%s: Counter val not found, err:%d\n",
+				   __func__, -QDMA_ERR_MBOX_INV_CNTR_TH);
+	return -QDMA_ERR_MBOX_INV_CNTR_TH;
+}
+
+static int get_tmr_idx(void *dev_hndl, uint8_t tmr_val, uint8_t *tmr_idx)
+{
+	uint32_t tmr_th[QDMA_GLOBAL_CSR_ARRAY_SZ] = { 0 };
+	int i, rv;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+			QDMA_GLOBAL_CSR_ARRAY_SZ, tmr_th,
+			QDMA_CSR_TIMER_CNT, QDMA_HW_ACCESS_READ);
+	if (rv)
+		return rv;
+	for (i = 0; i < QDMA_GLOBAL_CSR_ARRAY_SZ; i++) {
+		if (tmr_th[i] == tmr_val) {
+			*tmr_idx = i;
+			return QDMA_SUCCESS;
+		}
+	}
+
+	qdma_log_error("%s: Timer val not found, err:%d\n",
+				   __func__, -QDMA_ERR_MBOX_INV_TMR_TH);
+	return -QDMA_ERR_MBOX_INV_TMR_TH;
+}
+
+static int mbox_compose_sw_context(void *dev_hndl,
+				   struct mbox_msg_qctxt *qctxt,
+				   struct qdma_descq_sw_ctxt *sw_ctxt)
+{
+	uint16_t rng_idx = 0;
+	int rv = QDMA_SUCCESS;
+
+	if (!qctxt || !sw_ctxt) {
+		qdma_log_error("%s: qctxt=%p sw_ctxt=%p, err:%d\n",
+						__func__,
+						qctxt, sw_ctxt,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	rv = get_ring_idx(dev_hndl, qctxt->descq_conf.ringsz, &rng_idx);
+	if (rv < 0) {
+		qdma_log_error("%s: failed to get ring index, err:%d\n",
+						__func__, rv);
+		return rv;
+	}
+	/* compose sw context */
+	sw_ctxt->vec = qctxt->descq_conf.intr_id;
+	sw_ctxt->intr_aggr = qctxt->descq_conf.intr_aggr;
+
+	sw_ctxt->ring_bs_addr = qctxt->descq_conf.ring_bs_addr;
+	sw_ctxt->wbi_chk = qctxt->descq_conf.wbi_chk;
+	sw_ctxt->wbi_intvl_en = qctxt->descq_conf.wbi_intvl_en;
+	sw_ctxt->rngsz_idx = rng_idx;
+	sw_ctxt->bypass = qctxt->descq_conf.en_bypass;
+	sw_ctxt->wbk_en = qctxt->descq_conf.wbk_en;
+	sw_ctxt->irq_en = qctxt->descq_conf.irq_en;
+	sw_ctxt->is_mm = ~qctxt->st;
+	sw_ctxt->mm_chn = 0;
+	sw_ctxt->qen = 1;
+	sw_ctxt->frcd_en = qctxt->descq_conf.forced_en;
+
+	sw_ctxt->desc_sz = qctxt->descq_conf.desc_sz;
+
+	/* pidx = 0; irq_ack = 0 */
+	sw_ctxt->fnc_id = qctxt->descq_conf.func_id;
+	sw_ctxt->irq_arm =  qctxt->descq_conf.irq_arm;
+
+	if (qctxt->st && qctxt->c2h) {
+		sw_ctxt->irq_en = 0;
+		sw_ctxt->irq_arm = 0;
+		sw_ctxt->wbk_en = 0;
+		sw_ctxt->wbi_chk = 0;
+	}
+
+	return QDMA_SUCCESS;
+}
+
+static int mbox_compose_prefetch_context(void *dev_hndl,
+					 struct mbox_msg_qctxt *qctxt,
+				 struct qdma_descq_prefetch_ctxt *pfetch_ctxt)
+{
+	uint16_t buf_idx = 0;
+	int rv = QDMA_SUCCESS;
+
+	if (!qctxt || !pfetch_ctxt) {
+		qdma_log_error("%s: qctxt=%p pfetch_ctxt=%p, err:%d\n",
+					   __func__,
+					   qctxt,
+					   pfetch_ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	rv = get_buf_idx(dev_hndl, qctxt->descq_conf.bufsz, &buf_idx);
+	if (rv < 0) {
+		qdma_log_error("%s: failed to get buf index, err:%d\n",
+					   __func__, -QDMA_ERR_INV_PARAM);
+		return rv;
+	}
+	/* prefetch context */
+	pfetch_ctxt->valid = 1;
+	pfetch_ctxt->bypass = qctxt->descq_conf.en_bypass_prefetch;
+	pfetch_ctxt->bufsz_idx = buf_idx;
+	pfetch_ctxt->pfch_en = qctxt->descq_conf.pfch_en;
+
+	return QDMA_SUCCESS;
+}
+
+
+static int mbox_compose_cmpt_context(void *dev_hndl,
+				     struct mbox_msg_qctxt *qctxt,
+				     struct qdma_descq_cmpt_ctxt *cmpt_ctxt)
+{
+	uint16_t rng_idx = 0;
+	uint8_t cntr_idx = 0, tmr_idx = 0;
+	int rv = QDMA_SUCCESS;
+
+	if (!qctxt || !cmpt_ctxt) {
+		qdma_log_error("%s: qctxt=%p cmpt_ctxt=%p, err:%d\n",
+					   __func__, qctxt, cmpt_ctxt,
+					   -QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	rv = get_cntr_idx(dev_hndl, qctxt->descq_conf.cnt_thres, &cntr_idx);
+	if (rv < 0)
+		return rv;
+	rv = get_tmr_idx(dev_hndl, qctxt->descq_conf.timer_thres, &tmr_idx);
+	if (rv < 0)
+		return rv;
+	rv = get_ring_idx(dev_hndl, qctxt->descq_conf.cmpt_ringsz, &rng_idx);
+	if (rv < 0)
+		return rv;
+	/* writeback context */
+
+	cmpt_ctxt->bs_addr = qctxt->descq_conf.cmpt_ring_bs_addr;
+	cmpt_ctxt->en_stat_desc = qctxt->descq_conf.cmpl_stat_en;
+	cmpt_ctxt->en_int = qctxt->descq_conf.cmpt_int_en;
+	cmpt_ctxt->trig_mode = qctxt->descq_conf.triggermode;
+	cmpt_ctxt->fnc_id = qctxt->descq_conf.func_id;
+	cmpt_ctxt->timer_idx = tmr_idx;
+	cmpt_ctxt->counter_idx = cntr_idx;
+	cmpt_ctxt->color = 1;
+	cmpt_ctxt->ringsz_idx = rng_idx;
+
+	cmpt_ctxt->desc_sz = qctxt->descq_conf.cmpt_desc_sz;
+
+	cmpt_ctxt->valid = 1;
+
+	cmpt_ctxt->ovf_chk_dis = qctxt->descq_conf.dis_overflow_check;
+	cmpt_ctxt->vec = qctxt->descq_conf.intr_id;
+	cmpt_ctxt->int_aggr = qctxt->descq_conf.intr_aggr;
+
+	return QDMA_SUCCESS;
+}
+
+static int mbox_clear_queue_contexts(void *dev_hndl, uint8_t dma_device_index,
+			      uint16_t func_id, uint16_t qid_hw, uint8_t st,
+			      uint8_t c2h,
+			      enum mbox_cmpt_ctxt_type cmpt_ctxt_type)
+{
+	int rv;
+	int qbase;
+	uint32_t qmax;
+	enum qdma_dev_q_range q_range;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	if (cmpt_ctxt_type == QDMA_MBOX_CMPT_CTXT_ONLY) {
+		rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+					    NULL, QDMA_HW_ACCESS_CLEAR);
+		if (rv < 0) {
+			qdma_log_error("%s: clear cmpt ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+	} else {
+		rv = qdma_dev_qinfo_get(dma_device_index,
+				func_id, &qbase, &qmax);
+		if (rv < 0) {
+			qdma_log_error("%s: failed to get qinfo, err:%d\n",
+					__func__, rv);
+			return rv;
+		}
+
+		q_range = qdma_dev_is_queue_in_range(dma_device_index,
+						func_id, qid_hw);
+		if (q_range != QDMA_DEV_Q_IN_RANGE) {
+			qdma_log_error("%s: q_range invalid, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_sw_ctx_conf(dev_hndl, c2h, qid_hw,
+					  NULL, QDMA_HW_ACCESS_CLEAR);
+		if (rv < 0) {
+			qdma_log_error("%s: clear sw_ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_hw_ctx_conf(dev_hndl, c2h, qid_hw, NULL,
+					       QDMA_HW_ACCESS_CLEAR);
+		if (rv < 0) {
+			qdma_log_error("%s: clear hw_ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_credit_ctx_conf(dev_hndl, c2h, qid_hw, NULL,
+					       QDMA_HW_ACCESS_CLEAR);
+		if (rv < 0) {
+			qdma_log_error("%s: clear cr_ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		if (st && c2h) {
+			rv = hw->qdma_pfetch_ctx_conf(dev_hndl, qid_hw,
+						       NULL,
+						       QDMA_HW_ACCESS_CLEAR);
+			if (rv < 0) {
+				qdma_log_error("%s:clear pfetch ctxt, err:%d\n",
+						__func__, rv);
+				return rv;
+			}
+		}
+
+		if (cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_MM ||
+		    cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_ST) {
+			rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+						     NULL,
+						     QDMA_HW_ACCESS_CLEAR);
+			if (rv < 0) {
+				qdma_log_error("%s: clear cmpt ctxt, err:%d\n",
+							__func__, rv);
+				return rv;
+			}
+		}
+	}
+
+	return QDMA_SUCCESS;
+}
+
+static int mbox_invalidate_queue_contexts(void *dev_hndl,
+		uint8_t dma_device_index, uint16_t func_id,
+		uint16_t qid_hw, uint8_t st,
+		uint8_t c2h, enum mbox_cmpt_ctxt_type cmpt_ctxt_type)
+{
+	int rv;
+	int qbase;
+	uint32_t qmax;
+	enum qdma_dev_q_range q_range;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	if (cmpt_ctxt_type == QDMA_MBOX_CMPT_CTXT_ONLY) {
+		rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw, NULL,
+					    QDMA_HW_ACCESS_INVALIDATE);
+		if (rv < 0) {
+			qdma_log_error("%s: inv cmpt ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+	} else {
+		rv = qdma_dev_qinfo_get(dma_device_index, func_id,
+				&qbase, &qmax);
+		if (rv < 0) {
+			qdma_log_error("%s: failed to get qinfo, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		q_range = qdma_dev_is_queue_in_range(dma_device_index,
+						func_id, qid_hw);
+		if (q_range != QDMA_DEV_Q_IN_RANGE) {
+			qdma_log_error("%s: Invalid qrange, err:%d\n",
+							__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_sw_ctx_conf(dev_hndl, c2h, qid_hw,
+					  NULL, QDMA_HW_ACCESS_INVALIDATE);
+		if (rv < 0) {
+			qdma_log_error("%s: inv sw ctxt, err:%d\n",
+							__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_hw_ctx_conf(dev_hndl, c2h, qid_hw, NULL,
+				QDMA_HW_ACCESS_INVALIDATE);
+		if (rv < 0) {
+			qdma_log_error("%s: clear hw_ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_credit_ctx_conf(dev_hndl, c2h, qid_hw, NULL,
+				QDMA_HW_ACCESS_INVALIDATE);
+		if (rv < 0) {
+			qdma_log_error("%s: clear cr_ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		if (st && c2h) {
+			rv = hw->qdma_pfetch_ctx_conf(dev_hndl, qid_hw,
+						NULL,
+						QDMA_HW_ACCESS_INVALIDATE);
+			if (rv < 0) {
+				qdma_log_error("%s: inv pfetch ctxt, err:%d\n",
+						__func__, rv);
+				return rv;
+			}
+		}
+
+		if (cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_MM ||
+		    cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_ST) {
+			rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+						NULL,
+						QDMA_HW_ACCESS_INVALIDATE);
+			if (rv < 0) {
+				qdma_log_error("%s: inv cmpt ctxt, err:%d\n",
+						__func__, rv);
+				return rv;
+			}
+		}
+	}
+
+	return QDMA_SUCCESS;
+}
+
+static int mbox_write_queue_contexts(void *dev_hndl, uint8_t dma_device_index,
+				     struct mbox_msg_qctxt *qctxt)
+{
+	int rv;
+	int qbase;
+	uint32_t qmax;
+	enum qdma_dev_q_range q_range;
+	struct qdma_descq_context descq_ctxt;
+	uint16_t qid_hw = qctxt->qid_hw;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	rv = qdma_dev_qinfo_get(dma_device_index, qctxt->descq_conf.func_id,
+				&qbase, &qmax);
+	if (rv < 0)
+		return rv;
+
+	q_range = qdma_dev_is_queue_in_range(dma_device_index,
+					     qctxt->descq_conf.func_id,
+					     qctxt->qid_hw);
+	if (q_range != QDMA_DEV_Q_IN_RANGE) {
+		qdma_log_error("%s: Invalid qrange, err:%d\n",
+							__func__, rv);
+		return rv;
+	}
+
+	qdma_mbox_memset(&descq_ctxt, 0, sizeof(struct qdma_descq_context));
+
+	if (qctxt->cmpt_ctxt_type == QDMA_MBOX_CMPT_CTXT_ONLY) {
+		rv = mbox_compose_cmpt_context(dev_hndl, qctxt,
+			       &descq_ctxt.cmpt_ctxt);
+		if (rv < 0)
+			return rv;
+
+		rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+					    NULL, QDMA_HW_ACCESS_CLEAR);
+		if (rv < 0) {
+			qdma_log_error("%s: clear cmpt ctxt, err:%d\n",
+								__func__, rv);
+			return rv;
+		}
+
+		rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+			     &descq_ctxt.cmpt_ctxt, QDMA_HW_ACCESS_WRITE);
+		if (rv < 0) {
+			qdma_log_error("%s: write cmpt ctxt, err:%d\n",
+								__func__, rv);
+			return rv;
+		}
+
+	} else {
+		rv = mbox_compose_sw_context(dev_hndl, qctxt,
+				&descq_ctxt.sw_ctxt);
+		if (rv < 0)
+			return rv;
+
+		if (qctxt->st && qctxt->c2h) {
+			rv = mbox_compose_prefetch_context(dev_hndl, qctxt,
+						&descq_ctxt.pfetch_ctxt);
+			if (rv < 0)
+				return rv;
+		}
+
+		if (qctxt->cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_MM ||
+		    qctxt->cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_ST) {
+			rv = mbox_compose_cmpt_context(dev_hndl, qctxt,
+							&descq_ctxt.cmpt_ctxt);
+			if (rv < 0)
+				return rv;
+		}
+
+		rv = mbox_clear_queue_contexts(dev_hndl, dma_device_index,
+					qctxt->descq_conf.func_id,
+					qctxt->qid_hw,
+					qctxt->st,
+					qctxt->c2h,
+					qctxt->cmpt_ctxt_type);
+		if (rv < 0)
+			return rv;
+		rv = hw->qdma_sw_ctx_conf(dev_hndl, qctxt->c2h, qid_hw,
+					   &descq_ctxt.sw_ctxt,
+					   QDMA_HW_ACCESS_WRITE);
+		if (rv < 0) {
+			qdma_log_error("%s: write sw ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+
+		if (qctxt->st && qctxt->c2h) {
+			rv = hw->qdma_pfetch_ctx_conf(dev_hndl, qid_hw,
+						       &descq_ctxt.pfetch_ctxt,
+						       QDMA_HW_ACCESS_WRITE);
+			if (rv < 0) {
+				qdma_log_error("%s:write pfetch ctxt, err:%d\n",
+						__func__, rv);
+				return rv;
+			}
+		}
+
+		if (qctxt->cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_MM ||
+		    qctxt->cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_ST) {
+			rv = hw->qdma_cmpt_ctx_conf(dev_hndl, qid_hw,
+						     &descq_ctxt.cmpt_ctxt,
+						     QDMA_HW_ACCESS_WRITE);
+			if (rv < 0) {
+				qdma_log_error("%s: write cmpt ctxt, err:%d\n",
+						__func__, rv);
+				return rv;
+			}
+		}
+	}
+	return QDMA_SUCCESS;
+}
+
+static int mbox_read_queue_contexts(void *dev_hndl, uint16_t qid_hw,
+			uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			struct qdma_descq_context *ctxt)
+{
+	int rv;
+	struct qdma_hw_access *hw = NULL;
+
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	rv = hw->qdma_sw_ctx_conf(dev_hndl, c2h, qid_hw, &ctxt->sw_ctxt,
+				  QDMA_HW_ACCESS_READ);
+	if (rv < 0) {
+		qdma_log_error("%s: read sw ctxt, err:%d\n",
+					__func__, rv);
+		return rv;
+	}
+
+	rv = hw->qdma_hw_ctx_conf(dev_hndl, c2h, qid_hw, &ctxt->hw_ctxt,
+				  QDMA_HW_ACCESS_READ);
+	if (rv < 0) {
+		qdma_log_error("%s: read hw ctxt, err:%d\n",
+					__func__, rv);
+		return rv;
+	}
+
+	rv = hw->qdma_credit_ctx_conf(dev_hndl, c2h, qid_hw, &ctxt->cr_ctxt,
+				      QDMA_HW_ACCESS_READ);
+	if (rv < 0) {
+		qdma_log_error("%s: read credit ctxt, err:%d\n",
+					__func__, rv);
+		return rv;
+	}
+
+	if (st && c2h) {
+		rv = hw->qdma_pfetch_ctx_conf(dev_hndl,
+					qid_hw, &ctxt->pfetch_ctxt,
+					QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error("%s: read pfetch ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+	}
+
+	if (cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_MM ||
+	    cmpt_ctxt_type == QDMA_MBOX_CMPT_WITH_ST) {
+		rv = hw->qdma_cmpt_ctx_conf(dev_hndl,
+					qid_hw, &ctxt->cmpt_ctxt,
+					QDMA_HW_ACCESS_READ);
+		if (rv < 0) {
+			qdma_log_error("%s: read cmpt ctxt, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+	}
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
+				 uint16_t func_id, uint32_t *rcv_msg,
+				 uint32_t *resp_msg)
+{
+	union qdma_mbox_txrx *rcv =  (union qdma_mbox_txrx *)rcv_msg;
+	union qdma_mbox_txrx *resp =  (union qdma_mbox_txrx *)resp_msg;
+	struct mbox_msg_hdr *hdr = &rcv->hdr;
+	struct qdma_hw_access *hw = NULL;
+	int rv = QDMA_SUCCESS;
+	int ret = 0;
+
+	if (!rcv) {
+		qdma_log_error("%s: rcv_msg=%p failure:%d\n",
+						__func__, rcv,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+	qdma_get_hw_access(dev_hndl, &hw);
+
+	switch (rcv->hdr.op) {
+	case MBOX_OP_VF_BYE:
+	{
+		struct qdma_fmap_cfg fmap;
+
+		fmap.qbase = 0;
+		fmap.qmax = 0;
+		rv = hw->qdma_fmap_conf(dev_hndl, hdr->src_func_id, &fmap,
+					QDMA_HW_ACCESS_WRITE);
+
+		qdma_dev_entry_destroy(dma_device_index, hdr->src_func_id);
+
+		ret = QDMA_MBOX_VF_OFFLINE;
+	}
+	break;
+	case MBOX_OP_PF_RESET_VF_BYE:
+	{
+		struct qdma_fmap_cfg fmap;
+
+		fmap.qbase = 0;
+		fmap.qmax = 0;
+		rv = hw->qdma_fmap_conf(dev_hndl, hdr->src_func_id, &fmap,
+					QDMA_HW_ACCESS_WRITE);
+
+		qdma_dev_entry_destroy(dma_device_index, hdr->src_func_id);
+
+		ret = QDMA_MBOX_VF_RESET_BYE;
+	}
+	break;
+	case MBOX_OP_HELLO:
+	{
+		struct mbox_msg_fmap *fmap = &rcv->fmap;
+		struct qdma_fmap_cfg fmap_cfg;
+		struct mbox_msg_hello *rsp_hello = &resp->hello;
+
+		rv = qdma_dev_qinfo_get(dma_device_index, hdr->src_func_id,
+				&fmap->qbase, &fmap->qmax);
+		if (rv < 0)
+			rv = qdma_dev_entry_create(dma_device_index,
+					hdr->src_func_id);
+
+		if (!rv) {
+			rsp_hello->qbase = fmap->qbase;
+			rsp_hello->qmax = fmap->qmax;
+			rsp_hello->dma_device_index = dma_device_index;
+			hw->qdma_get_device_attributes(dev_hndl,
+						       &rsp_hello->dev_cap);
+		}
+		qdma_mbox_memset(&fmap_cfg, 0,
+				 sizeof(struct qdma_fmap_cfg));
+		hw->qdma_fmap_conf(dev_hndl, hdr->src_func_id, &fmap_cfg,
+				   QDMA_HW_ACCESS_WRITE);
+
+		ret = QDMA_MBOX_VF_ONLINE;
+	}
+	break;
+	case MBOX_OP_FMAP:
+	{
+		struct mbox_msg_fmap *fmap = &rcv->fmap;
+		struct qdma_fmap_cfg fmap_cfg;
+
+		fmap_cfg.qbase = fmap->qbase;
+		fmap_cfg.qmax = fmap->qmax;
+
+		rv = hw->qdma_fmap_conf(dev_hndl, hdr->src_func_id,
+				     &fmap_cfg, QDMA_HW_ACCESS_WRITE);
+		if (rv < 0) {
+			qdma_log_error("%s: failed to write fmap, err:%d\n",
+						__func__, rv);
+			return rv;
+		}
+	}
+	break;
+	case MBOX_OP_CSR:
+	{
+		struct mbox_msg_csr *rsp_csr = &resp->csr;
+		struct qdma_dev_attributes dev_cap;
+
+		uint32_t ringsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
+		uint32_t bufsz[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
+		uint32_t tmr_th[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
+		uint32_t cntr_th[QDMA_GLOBAL_CSR_ARRAY_SZ] = {0};
+		int i;
+
+		rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+				QDMA_GLOBAL_CSR_ARRAY_SZ, ringsz,
+				QDMA_CSR_RING_SZ, QDMA_HW_ACCESS_READ);
+		if (rv < 0)
+			goto exit_func;
+
+		hw->qdma_get_device_attributes(dev_hndl, &dev_cap);
+
+		if (dev_cap.st_en) {
+			rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+				QDMA_GLOBAL_CSR_ARRAY_SZ, bufsz,
+				QDMA_CSR_BUF_SZ, QDMA_HW_ACCESS_READ);
+			if (rv < 0 &&
+				(rv != -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED))
+				goto exit_func;
+		}
+
+		if (dev_cap.st_en || dev_cap.mm_cmpt_en) {
+			rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+				QDMA_GLOBAL_CSR_ARRAY_SZ, tmr_th,
+				QDMA_CSR_TIMER_CNT, QDMA_HW_ACCESS_READ);
+			if (rv < 0 &&
+				(rv != -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED))
+				goto exit_func;
+
+			rv = hw->qdma_global_csr_conf(dev_hndl, 0,
+				QDMA_GLOBAL_CSR_ARRAY_SZ, cntr_th,
+				QDMA_CSR_CNT_TH, QDMA_HW_ACCESS_READ);
+			if (rv < 0 &&
+				(rv != -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED))
+				goto exit_func;
+		}
+
+		for (i = 0; i < QDMA_GLOBAL_CSR_ARRAY_SZ; i++) {
+			rsp_csr->csr_info.ringsz[i] = ringsz[i] &
+					0xFFFF;
+			if (!rv) {
+				rsp_csr->csr_info.bufsz[i] = bufsz[i] & 0xFFFF;
+				rsp_csr->csr_info.timer_cnt[i] = tmr_th[i] &
+						0xFF;
+				rsp_csr->csr_info.cnt_thres[i] = cntr_th[i] &
+						0xFF;
+			}
+		}
+
+		if (rv == -QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED)
+			rv = QDMA_SUCCESS;
+	}
+	break;
+	case MBOX_OP_QREQ:
+	{
+		struct mbox_msg_fmap *fmap = &rcv->fmap;
+
+		rv = qdma_dev_update(dma_device_index,
+					  hdr->src_func_id,
+					  fmap->qmax, &fmap->qbase);
+		if (rv == 0) {
+			rv = qdma_dev_qinfo_get(dma_device_index,
+						hdr->src_func_id,
+						&resp->fmap.qbase,
+						&resp->fmap.qmax);
+		}
+		if (rv < 0) {
+			rv = -QDMA_ERR_MBOX_NUM_QUEUES;
+		} else {
+			struct qdma_fmap_cfg fmap_cfg;
+
+			qdma_mbox_memset(&fmap_cfg, 0,
+					 sizeof(struct qdma_fmap_cfg));
+			hw->qdma_fmap_conf(dev_hndl, hdr->src_func_id,
+					&fmap_cfg, QDMA_HW_ACCESS_WRITE);
+		}
+	}
+	break;
+	case MBOX_OP_QNOTIFY_ADD:
+	{
+		struct mbox_msg_q_nitfy *q_notify = &rcv->q_notify;
+		enum qdma_dev_q_range q_range;
+
+		q_range = qdma_dev_is_queue_in_range(dma_device_index,
+				q_notify->hdr.src_func_id,
+				q_notify->qid_hw);
+		if (q_range != QDMA_DEV_Q_IN_RANGE)
+			rv = -QDMA_ERR_MBOX_INV_QID;
+		else
+			rv = qdma_dev_increment_active_queue(dma_device_index,
+					q_notify->hdr.src_func_id,
+					q_notify->q_type);
+	}
+	break;
+	case MBOX_OP_QNOTIFY_DEL:
+	{
+		struct mbox_msg_q_nitfy *q_notify = &rcv->q_notify;
+		enum qdma_dev_q_range q_range;
+
+		q_range = qdma_dev_is_queue_in_range(dma_device_index,
+				q_notify->hdr.src_func_id,
+				q_notify->qid_hw);
+		if (q_range != QDMA_DEV_Q_IN_RANGE)
+			rv = -QDMA_ERR_MBOX_INV_QID;
+		else
+			rv = qdma_dev_decrement_active_queue(dma_device_index,
+					q_notify->hdr.src_func_id,
+					q_notify->q_type);
+	}
+	break;
+	case MBOX_OP_GET_QACTIVE_CNT:
+	{
+		rv = qdma_get_device_active_queue_count(dma_device_index,
+				rcv->hdr.src_func_id,
+				QDMA_DEV_Q_TYPE_H2C);
+
+		resp->qcnt.h2c_queues = rv;
+
+		rv = qdma_get_device_active_queue_count(dma_device_index,
+				rcv->hdr.src_func_id,
+				QDMA_DEV_Q_TYPE_C2H);
+
+		resp->qcnt.c2h_queues = rv;
+
+		rv = qdma_get_device_active_queue_count(dma_device_index,
+				rcv->hdr.src_func_id,
+				QDMA_DEV_Q_TYPE_CMPT);
+
+		resp->qcnt.cmpt_queues = rv;
+	}
+	break;
+	case MBOX_OP_INTR_CTXT_WRT:
+	{
+		struct mbox_msg_intr_ctxt *ictxt = &rcv->intr_ctxt.ctxt;
+		struct qdma_indirect_intr_ctxt *ctxt;
+		uint8_t i;
+		uint32_t ring_index;
+
+		for (i = 0; i < ictxt->num_rings; i++) {
+			ring_index = ictxt->ring_index_list[i];
+
+			ctxt = &ictxt->ictxt[i];
+			rv = hw->qdma_indirect_intr_ctx_conf(dev_hndl,
+						      ring_index,
+						      NULL,
+						      QDMA_HW_ACCESS_CLEAR);
+			if (rv < 0)
+				resp->hdr.status = rv;
+			rv = hw->qdma_indirect_intr_ctx_conf(dev_hndl,
+						      ring_index, ctxt,
+						      QDMA_HW_ACCESS_WRITE);
+			if (rv < 0)
+				resp->hdr.status = rv;
+		}
+	}
+	break;
+	case MBOX_OP_INTR_CTXT_RD:
+	{
+		struct mbox_msg_intr_ctxt *rcv_ictxt = &rcv->intr_ctxt.ctxt;
+		struct mbox_msg_intr_ctxt *rsp_ictxt = &resp->intr_ctxt.ctxt;
+		uint8_t i;
+		uint32_t ring_index;
+
+		for (i = 0; i < rcv_ictxt->num_rings; i++) {
+			ring_index = rcv_ictxt->ring_index_list[i];
+
+			rv = hw->qdma_indirect_intr_ctx_conf(dev_hndl,
+						      ring_index,
+						      &rsp_ictxt->ictxt[i],
+						      QDMA_HW_ACCESS_READ);
+			if (rv < 0)
+				resp->hdr.status = rv;
+		}
+	}
+	break;
+	case MBOX_OP_INTR_CTXT_CLR:
+	{
+		int i;
+		struct mbox_msg_intr_ctxt *ictxt = &rcv->intr_ctxt.ctxt;
+
+		for (i = 0; i < ictxt->num_rings; i++) {
+			rv = hw->qdma_indirect_intr_ctx_conf(dev_hndl,
+					ictxt->ring_index_list[i],
+					NULL, QDMA_HW_ACCESS_CLEAR);
+			if (rv < 0)
+				resp->hdr.status = rv;
+		}
+	}
+	break;
+	case MBOX_OP_INTR_CTXT_INV:
+	{
+		struct mbox_msg_intr_ctxt *ictxt = &rcv->intr_ctxt.ctxt;
+		int i;
+
+		for (i = 0; i < ictxt->num_rings; i++) {
+			rv = hw->qdma_indirect_intr_ctx_conf(dev_hndl,
+					ictxt->ring_index_list[i],
+					NULL, QDMA_HW_ACCESS_INVALIDATE);
+			if (rv < 0)
+				resp->hdr.status = rv;
+		}
+	}
+	break;
+	case MBOX_OP_QCTXT_INV:
+	{
+		struct mbox_msg_qctxt *qctxt = &rcv->qctxt;
+
+		rv = mbox_invalidate_queue_contexts(dev_hndl,
+							dma_device_index,
+							hdr->src_func_id,
+							qctxt->qid_hw,
+							qctxt->st,
+							qctxt->c2h,
+							qctxt->cmpt_ctxt_type);
+	}
+	break;
+	case MBOX_OP_QCTXT_CLR:
+	{
+		struct mbox_msg_qctxt *qctxt = &rcv->qctxt;
+
+		rv = mbox_clear_queue_contexts(dev_hndl,
+						dma_device_index,
+						hdr->src_func_id,
+						qctxt->qid_hw,
+						qctxt->st,
+						qctxt->c2h,
+						qctxt->cmpt_ctxt_type);
+	}
+	break;
+	case MBOX_OP_QCTXT_RD:
+	{
+		struct mbox_msg_qctxt *qctxt = &rcv->qctxt;
+
+		rv = mbox_read_queue_contexts(dev_hndl, qctxt->qid_hw,
+						qctxt->st,
+						qctxt->c2h,
+						qctxt->cmpt_ctxt_type,
+						&resp->qctxt.descq_ctxt);
+	}
+	break;
+	case MBOX_OP_QCTXT_WRT:
+	{
+		struct mbox_msg_qctxt *qctxt = &rcv->qctxt;
+
+		qctxt->descq_conf.func_id = hdr->src_func_id;
+		rv = mbox_write_queue_contexts(dev_hndl,
+				dma_device_index, qctxt);
+	}
+	break;
+	case MBOX_OP_RESET_PREPARE_RESP:
+		return QDMA_MBOX_VF_RESET;
+	case MBOX_OP_RESET_DONE_RESP:
+		return QDMA_MBOX_PF_RESET_DONE;
+	case MBOX_OP_REG_LIST_READ:
+	{
+		struct mbox_read_reg_list *rcv_read_reg_list =
+						&rcv->reg_read_list;
+		struct mbox_read_reg_list *rsp_read_reg_list =
+						&resp->reg_read_list;
+
+		rv = hw->qdma_read_reg_list((void *)dev_hndl, 1,
+				 rcv_read_reg_list->group_num,
+				&rsp_read_reg_list->num_regs,
+				rsp_read_reg_list->reg_list);
+
+		if (rv < 0 || rsp_read_reg_list->num_regs == 0) {
+			rv = -QDMA_ERR_MBOX_REG_READ_FAILED;
+			goto exit_func;
+		}
+	}
+	break;
+	case MBOX_OP_PF_BYE_RESP:
+		return QDMA_MBOX_PF_BYE;
+	default:
+		qdma_log_error("%s: op=%d invalid, err:%d\n",
+						__func__,
+						rcv->hdr.op,
+						-QDMA_ERR_MBOX_INV_MSG);
+		return -QDMA_ERR_MBOX_INV_MSG;
+	break;
+	}
+
+exit_func:
+	resp->hdr.op = rcv->hdr.op + MBOX_MSG_OP_RSP_OFFSET;
+	resp->hdr.dst_func_id = rcv->hdr.src_func_id;
+	resp->hdr.src_func_id = func_id;
+
+	resp->hdr.status = rv;
+
+	return ret;
+}
+
+int qmda_mbox_compose_vf_online(uint16_t func_id,
+				uint16_t qmax, int *qbase, uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_HELLO;
+	msg->hdr.src_func_id = func_id;
+	msg->fmap.qbase = (uint32_t)*qbase;
+	msg->fmap.qmax = qmax;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_offline(uint16_t func_id,
+				 uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_VF_BYE;
+	msg->hdr.src_func_id = func_id;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_reset_offline(uint16_t func_id,
+				 uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_PF_RESET_VF_BYE;
+	msg->hdr.src_func_id = func_id;
+
+	return QDMA_SUCCESS;
+}
+
+
+
+int qdma_mbox_compose_vf_qreq(uint16_t func_id,
+			      uint16_t qmax, int qbase, uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QREQ;
+	msg->hdr.src_func_id = func_id;
+	msg->fmap.qbase = qbase;
+	msg->fmap.qmax = qmax;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_notify_qadd(uint16_t func_id,
+				     uint16_t qid_hw,
+				     enum qdma_dev_q_type q_type,
+				     uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QNOTIFY_ADD;
+	msg->hdr.src_func_id = func_id;
+	msg->q_notify.qid_hw = qid_hw;
+	msg->q_notify.q_type = q_type;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_get_device_active_qcnt(uint16_t func_id,
+		uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_GET_QACTIVE_CNT;
+	msg->hdr.src_func_id = func_id;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_notify_qdel(uint16_t func_id,
+				     uint16_t qid_hw,
+				     enum qdma_dev_q_type q_type,
+				    uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QNOTIFY_DEL;
+	msg->hdr.src_func_id = func_id;
+	msg->q_notify.qid_hw = qid_hw;
+	msg->q_notify.q_type = q_type;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_fmap_prog(uint16_t func_id,
+				   uint16_t qmax, int qbase,
+				   uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+					__func__, raw_data,
+					-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_FMAP;
+	msg->hdr.src_func_id = func_id;
+	msg->fmap.qbase = (uint32_t)qbase;
+	msg->fmap.qmax = qmax;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_qctxt_write(uint16_t func_id,
+			uint16_t qid_hw, uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			struct mbox_descq_conf *descq_conf,
+			uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QCTXT_WRT;
+	msg->hdr.src_func_id = func_id;
+	msg->qctxt.qid_hw = qid_hw;
+	msg->qctxt.c2h = c2h;
+	msg->qctxt.st = st;
+	msg->qctxt.cmpt_ctxt_type = cmpt_ctxt_type;
+
+	qdma_mbox_memcpy(&msg->qctxt.descq_conf, descq_conf,
+	       sizeof(struct mbox_descq_conf));
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_qctxt_read(uint16_t func_id,
+				uint16_t qid_hw, uint8_t st, uint8_t c2h,
+				enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+				uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QCTXT_RD;
+	msg->hdr.src_func_id = func_id;
+	msg->qctxt.qid_hw = qid_hw;
+	msg->qctxt.c2h = c2h;
+	msg->qctxt.st = st;
+	msg->qctxt.cmpt_ctxt_type = cmpt_ctxt_type;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_qctxt_invalidate(uint16_t func_id,
+				uint16_t qid_hw, uint8_t st, uint8_t c2h,
+				enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+				uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QCTXT_INV;
+	msg->hdr.src_func_id = func_id;
+	msg->qctxt.qid_hw = qid_hw;
+	msg->qctxt.c2h = c2h;
+	msg->qctxt.st = st;
+	msg->qctxt.cmpt_ctxt_type = cmpt_ctxt_type;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_qctxt_clear(uint16_t func_id,
+				uint16_t qid_hw, uint8_t st, uint8_t c2h,
+				enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+				uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_QCTXT_CLR;
+	msg->hdr.src_func_id = func_id;
+	msg->qctxt.qid_hw = qid_hw;
+	msg->qctxt.c2h = c2h;
+	msg->qctxt.st = st;
+	msg->qctxt.cmpt_ctxt_type = cmpt_ctxt_type;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_csr_read(uint16_t func_id,
+			       uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_CSR;
+	msg->hdr.src_func_id = func_id;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_reg_read(uint16_t func_id,
+					uint16_t group_num,
+					uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_REG_LIST_READ;
+	msg->hdr.src_func_id = func_id;
+	msg->reg_read_list.group_num = group_num;
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_intr_ctxt_write(uint16_t func_id,
+					 struct mbox_msg_intr_ctxt *intr_ctxt,
+					 uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_INTR_CTXT_WRT;
+	msg->hdr.src_func_id = func_id;
+	qdma_mbox_memcpy(&msg->intr_ctxt.ctxt, intr_ctxt,
+	       sizeof(struct mbox_msg_intr_ctxt));
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_intr_ctxt_read(uint16_t func_id,
+					struct mbox_msg_intr_ctxt *intr_ctxt,
+					uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_INTR_CTXT_RD;
+	msg->hdr.src_func_id = func_id;
+	qdma_mbox_memcpy(&msg->intr_ctxt.ctxt, intr_ctxt,
+	       sizeof(struct mbox_msg_intr_ctxt));
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_intr_ctxt_clear(uint16_t func_id,
+					 struct mbox_msg_intr_ctxt *intr_ctxt,
+					 uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_INTR_CTXT_CLR;
+	msg->hdr.src_func_id = func_id;
+	qdma_mbox_memcpy(&msg->intr_ctxt.ctxt, intr_ctxt,
+	       sizeof(struct mbox_msg_intr_ctxt));
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_compose_vf_intr_ctxt_invalidate(uint16_t func_id,
+				      struct mbox_msg_intr_ctxt *intr_ctxt,
+				      uint32_t *raw_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data) {
+		qdma_log_error("%s: raw_data=%p, err:%d\n",
+						__func__, raw_data,
+						-QDMA_ERR_INV_PARAM);
+		return -QDMA_ERR_INV_PARAM;
+	}
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_INTR_CTXT_INV;
+	msg->hdr.src_func_id = func_id;
+	qdma_mbox_memcpy(&msg->intr_ctxt.ctxt, intr_ctxt,
+	       sizeof(struct mbox_msg_intr_ctxt));
+
+	return QDMA_SUCCESS;
+}
+
+uint8_t qdma_mbox_is_msg_response(uint32_t *send_data, uint32_t *rcv_data)
+{
+	union qdma_mbox_txrx *tx_msg = (union qdma_mbox_txrx *)send_data;
+	union qdma_mbox_txrx *rx_msg = (union qdma_mbox_txrx *)rcv_data;
+
+	return ((tx_msg->hdr.op + MBOX_MSG_OP_RSP_OFFSET) == rx_msg->hdr.op) ?
+			1 : 0;
+}
+
+int qdma_mbox_vf_response_status(uint32_t *rcv_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	return msg->hdr.status;
+}
+
+uint8_t qdma_mbox_vf_func_id_get(uint32_t *rcv_data, uint8_t is_vf)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+	uint16_t func_id;
+
+	if (is_vf)
+		func_id = msg->hdr.dst_func_id;
+	else
+		func_id = msg->hdr.src_func_id;
+
+	return func_id;
+}
+
+int qdma_mbox_vf_active_queues_get(uint32_t *rcv_data,
+		enum qdma_dev_q_type q_type)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+	int queues = 0;
+
+	if (q_type == QDMA_DEV_Q_TYPE_H2C)
+		queues = msg->qcnt.h2c_queues;
+
+	if (q_type == QDMA_DEV_Q_TYPE_C2H)
+		queues = msg->qcnt.c2h_queues;
+
+	if (q_type == QDMA_DEV_Q_TYPE_CMPT)
+		queues = msg->qcnt.cmpt_queues;
+
+	return queues;
+}
+
+
+uint8_t qdma_mbox_vf_parent_func_id_get(uint32_t *rcv_data)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	return msg->hdr.src_func_id;
+}
+
+int qdma_mbox_vf_dev_info_get(uint32_t *rcv_data,
+	struct qdma_dev_attributes *dev_cap, uint32_t *dma_device_index)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	*dev_cap = msg->hello.dev_cap;
+	*dma_device_index = msg->hello.dma_device_index;
+
+	return msg->hdr.status;
+}
+
+int qdma_mbox_vf_qinfo_get(uint32_t *rcv_data, int *qbase, uint16_t *qmax)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	*qbase = msg->fmap.qbase;
+	*qmax = msg->fmap.qmax;
+
+	return msg->hdr.status;
+}
+
+int qdma_mbox_vf_csr_get(uint32_t *rcv_data, struct qdma_csr_info *csr)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	qdma_mbox_memcpy(csr, &msg->csr.csr_info, sizeof(struct qdma_csr_info));
+
+	return msg->hdr.status;
+}
+
+int qdma_mbox_vf_reg_list_get(uint32_t *rcv_data,
+		uint16_t *num_regs, struct qdma_reg_data *reg_list)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	*num_regs = msg->reg_read_list.num_regs;
+	qdma_mbox_memcpy(reg_list, &msg->reg_read_list.reg_list,
+			(*num_regs * sizeof(struct qdma_reg_data)));
+
+	return msg->hdr.status;
+}
+
+int qdma_mbox_vf_context_get(uint32_t *rcv_data,
+			     struct qdma_descq_context *ctxt)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	qdma_mbox_memcpy(ctxt, &msg->qctxt.descq_ctxt,
+			 sizeof(struct qdma_descq_context));
+
+	return msg->hdr.status;
+}
+
+int qdma_mbox_vf_intr_context_get(uint32_t *rcv_data,
+				  struct mbox_msg_intr_ctxt *ictxt)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)rcv_data;
+
+	qdma_mbox_memcpy(ictxt, &msg->intr_ctxt.ctxt,
+			 sizeof(struct mbox_msg_intr_ctxt));
+
+	return msg->hdr.status;
+}
+
+void qdma_mbox_pf_hw_clear_ack(void *dev_hndl)
+{
+	uint32_t v;
+	uint32_t reg;
+	int i;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, 0);
+
+	reg = mbox_base + MBOX_PF_ACK_BASE;
+
+	v = qdma_reg_read(dev_hndl, mbox_base + MBOX_FN_STATUS);
+	if ((v & F_MBOX_FN_STATUS_ACK) == 0)
+		return;
+
+	for (i = 0; i < MBOX_PF_ACK_COUNT; i++, reg += MBOX_PF_ACK_STEP) {
+		v = qdma_reg_read(dev_hndl, reg);
+
+		if (!v)
+			continue;
+
+		/* clear the ack status */
+		qdma_reg_write(dev_hndl, reg, v);
+	}
+}
+
+int qdma_mbox_send(void *dev_hndl, uint8_t is_vf, uint32_t *raw_data)
+{
+	int i;
+	uint32_t reg = MBOX_OUT_MSG_BASE;
+	uint32_t v;
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+	uint16_t dst_func_id = msg->hdr.dst_func_id;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	v = qdma_reg_read(dev_hndl, mbox_base + MBOX_FN_STATUS);
+	if (v & F_MBOX_FN_STATUS_OUT_MSG)
+		return -QDMA_ERR_MBOX_SEND_BUSY;
+
+	if (!is_vf)
+		qdma_reg_write(dev_hndl, mbox_base + MBOX_FN_TARGET,
+				V_MBOX_FN_TARGET_ID(dst_func_id));
+
+	for (i = 0; i < MBOX_MSG_REG_MAX; i++, reg += MBOX_MSG_STEP)
+		qdma_reg_write(dev_hndl, mbox_base + reg, raw_data[i]);
+
+	/* clear the outgoing ack */
+	if (!is_vf)
+		mbox_pf_hw_clear_func_ack(dev_hndl, dst_func_id);
+
+
+	qdma_log_debug("%s %s tx from_id=%d, to_id=%d, opcode=0x%x\n", __func__,
+			is_vf ? "VF" : "PF", msg->hdr.src_func_id,
+			msg->hdr.dst_func_id, msg->hdr.op);
+	qdma_reg_write(dev_hndl, mbox_base + MBOX_FN_CMD, F_MBOX_FN_CMD_SND);
+
+	return QDMA_SUCCESS;
+}
+
+int qdma_mbox_rcv(void *dev_hndl, uint8_t is_vf, uint32_t *raw_data)
+{
+	uint32_t reg = MBOX_IN_MSG_BASE;
+	uint32_t v = 0;
+	int all_zero_msg = 1;
+	int i;
+	uint32_t from_id = 0;
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	v = qdma_reg_read(dev_hndl, mbox_base + MBOX_FN_STATUS);
+
+	if (!(v & M_MBOX_FN_STATUS_IN_MSG))
+		return -QDMA_ERR_MBOX_NO_MSG_IN;
+
+	if (!is_vf) {
+		from_id = G_MBOX_FN_STATUS_SRC(v);
+		qdma_reg_write(dev_hndl, mbox_base + MBOX_FN_TARGET, from_id);
+	}
+
+	for (i = 0; i < MBOX_MSG_REG_MAX; i++, reg += MBOX_MSG_STEP) {
+		raw_data[i] = qdma_reg_read(dev_hndl, mbox_base + reg);
+		/* if rcv'ed message is all zero, stop and disable the mbox,
+		 * the h/w mbox is not working properly
+		 */
+		if (raw_data[i])
+			all_zero_msg = 0;
+	}
+
+	/* ack'ed the sender */
+	qdma_reg_write(dev_hndl, mbox_base + MBOX_FN_CMD, F_MBOX_FN_CMD_RCV);
+	if (all_zero_msg) {
+		qdma_log_error("%s: Message recv'd is all zeros. failure:%d\n",
+					__func__,
+					-QDMA_ERR_MBOX_ALL_ZERO_MSG);
+		return -QDMA_ERR_MBOX_ALL_ZERO_MSG;
+	}
+
+
+	qdma_log_debug("%s %s fid=%d, opcode=0x%x\n", __func__,
+				   is_vf ? "VF" : "PF", msg->hdr.dst_func_id,
+				   msg->hdr.op);
+	if (!is_vf && from_id != msg->hdr.src_func_id)
+		msg->hdr.src_func_id = from_id;
+
+	return QDMA_SUCCESS;
+}
+
+void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf)
+{
+	uint32_t v;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	if (is_vf) {
+		v = qdma_reg_read(dev_hndl, mbox_base + MBOX_FN_STATUS);
+		if (v & M_MBOX_FN_STATUS_IN_MSG)
+			qdma_reg_write(dev_hndl, mbox_base + MBOX_FN_CMD,
+				    F_MBOX_FN_CMD_RCV);
+	} else {
+		qdma_mbox_pf_hw_clear_ack(dev_hndl);
+	}
+}
+
+void qdma_mbox_enable_interrupts(void *dev_hndl, uint8_t is_vf)
+{
+	int vector = 0x0;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	qdma_reg_write(dev_hndl, mbox_base + MBOX_ISR_VEC, vector);
+	qdma_reg_write(dev_hndl, mbox_base + MBOX_ISR_EN, 0x1);
+}
+
+void qdma_mbox_disable_interrupts(void *dev_hndl, uint8_t is_vf)
+{
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	qdma_reg_write(dev_hndl, mbox_base + MBOX_ISR_EN, 0x0);
+}
+
+
+int qdma_mbox_compose_vf_reset_message(uint32_t *raw_data, uint8_t src_funcid,
+				uint8_t dest_funcid)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data)
+		return -QDMA_ERR_INV_PARAM;
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_RESET_PREPARE;
+	msg->hdr.src_func_id = src_funcid;
+	msg->hdr.dst_func_id = dest_funcid;
+	return 0;
+}
+
+int qdma_mbox_compose_pf_reset_done_message(uint32_t *raw_data,
+					uint8_t src_funcid, uint8_t dest_funcid)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data)
+		return -QDMA_ERR_INV_PARAM;
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_RESET_DONE;
+	msg->hdr.src_func_id = src_funcid;
+	msg->hdr.dst_func_id = dest_funcid;
+	return 0;
+}
+
+int qdma_mbox_compose_pf_offline(uint32_t *raw_data, uint8_t src_funcid,
+				uint8_t dest_funcid)
+{
+	union qdma_mbox_txrx *msg = (union qdma_mbox_txrx *)raw_data;
+
+	if (!raw_data)
+		return -QDMA_ERR_INV_PARAM;
+
+	qdma_mbox_memset(raw_data, 0, sizeof(union qdma_mbox_txrx));
+	msg->hdr.op = MBOX_OP_PF_BYE;
+	msg->hdr.src_func_id = src_funcid;
+	msg->hdr.dst_func_id = dest_funcid;
+	return 0;
+}
+
+int qdma_mbox_vf_rcv_msg_handler(uint32_t *rcv_msg, uint32_t *resp_msg)
+{
+	union qdma_mbox_txrx *rcv =  (union qdma_mbox_txrx *)rcv_msg;
+	union qdma_mbox_txrx *resp =  (union qdma_mbox_txrx *)resp_msg;
+	int rv = 0;
+
+	switch (rcv->hdr.op) {
+	case MBOX_OP_RESET_PREPARE:
+		resp->hdr.op = rcv->hdr.op + MBOX_MSG_OP_RSP_OFFSET;
+		resp->hdr.dst_func_id = rcv->hdr.src_func_id;
+		resp->hdr.src_func_id = rcv->hdr.dst_func_id;
+		rv = QDMA_MBOX_VF_RESET;
+		break;
+	case MBOX_OP_RESET_DONE:
+		resp->hdr.op = rcv->hdr.op + MBOX_MSG_OP_RSP_OFFSET;
+		resp->hdr.dst_func_id = rcv->hdr.src_func_id;
+		resp->hdr.src_func_id = rcv->hdr.dst_func_id;
+		rv = QDMA_MBOX_PF_RESET_DONE;
+		break;
+	case MBOX_OP_PF_BYE:
+		resp->hdr.op = rcv->hdr.op + MBOX_MSG_OP_RSP_OFFSET;
+		resp->hdr.dst_func_id = rcv->hdr.src_func_id;
+		resp->hdr.src_func_id = rcv->hdr.dst_func_id;
+		rv = QDMA_MBOX_PF_BYE;
+		break;
+	default:
+		break;
+	}
+	return rv;
+}
+
+uint8_t qdma_mbox_out_status(void *dev_hndl, uint8_t is_vf)
+{
+	uint32_t v;
+	uint32_t mbox_base = get_mbox_offset(dev_hndl, is_vf);
+
+	v = qdma_reg_read(dev_hndl, mbox_base + MBOX_FN_STATUS);
+	if (v & F_MBOX_FN_STATUS_OUT_MSG)
+		return 1;
+	else
+		return 0;
+}
diff --git a/drivers/net/qdma/qdma_access/qdma_mbox_protocol.h b/drivers/net/qdma/qdma_access/qdma_mbox_protocol.h
new file mode 100644
index 0000000000..335e728561
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_mbox_protocol.h
@@ -0,0 +1,681 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_MBOX_PROTOCOL_H_
+#define __QDMA_MBOX_PROTOCOL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * DOC: QDMA message box handling interface definitions
+ *
+ * Header file *qdma_mbox_protocol.h* defines data structures and function
+ * signatures exported for QDMA Mbox message handling.
+ */
+
+#include "qdma_platform.h"
+#include "qdma_resource_mgmt.h"
+
+
+#define QDMA_MBOX_VF_ONLINE			(1)
+#define QDMA_MBOX_VF_OFFLINE		(-1)
+#define QDMA_MBOX_VF_RESET			(2)
+#define QDMA_MBOX_PF_RESET_DONE		(3)
+#define QDMA_MBOX_PF_BYE			(4)
+#define QDMA_MBOX_VF_RESET_BYE            (5)
+
+/** mailbox register max */
+#define MBOX_MSG_REG_MAX		32
+
+#define mbox_invalidate_msg(m)	{ (m)->hdr.op = MBOX_OP_NOOP; }
+
+/**
+ * struct mbox_descq_conf - collective bit-fields of all contexts
+ */
+struct mbox_descq_conf {
+	/** @ring_bs_addr: ring base address */
+	uint64_t ring_bs_addr;
+	/** @cmpt_ring_bs_addr: completion ring base address */
+	uint64_t cmpt_ring_bs_addr;
+	/** @forced_en: enable fetch credit */
+	uint32_t forced_en:1;
+	/** @en_bypass: bypass enable */
+	uint32_t en_bypass:1;
+	/** @irq_arm: arm irq */
+	uint32_t irq_arm:1;
+	/** @wbi_intvl_en: writeback interval enable */
+	uint32_t wbi_intvl_en:1;
+	/** @wbi_chk: writeback pending check */
+	uint32_t wbi_chk:1;
+	/** @at: address translation */
+	uint32_t at:1;
+	/** @wbk_en: writeback enable */
+	uint32_t wbk_en:1;
+	/** @irq_en: irq enable */
+	uint32_t irq_en:1;
+	/** @pfch_en: prefetch enable */
+	uint32_t pfch_en:1;
+	/** @en_bypass_prefetch: prefetch bypass enable */
+	uint32_t en_bypass_prefetch:1;
+	/** @dis_overflow_check: disable overflow check */
+	uint32_t dis_overflow_check:1;
+	/** @cmpt_int_en: completion interrupt enable */
+	uint32_t cmpt_int_en:1;
+	/** @cmpt_at: completion address translation */
+	uint32_t cmpt_at:1;
+	/** @cmpt_color: completion ring initial color bit */
+	uint32_t cmpt_color:1;
+	/** @cmpt_full_upd: completion full update */
+	uint32_t cmpt_full_upd:1;
+	/** @cmpl_stat_en: completion status enable */
+	uint32_t cmpl_stat_en:1;
+	/** @desc_sz: descriptor size */
+	uint32_t desc_sz:2;
+	/** @cmpt_desc_sz: completion ring descriptor size */
+	uint32_t cmpt_desc_sz:2;
+	/** @triggermode: trigger mode */
+	uint32_t triggermode:3;
+	/** @rsvd: reserved */
+	uint32_t rsvd:9;
+	/** @func_id: function ID */
+	uint32_t func_id:16;
+	/** @cnt_thres: counter threshold */
+	uint32_t cnt_thres:8;
+	/** @timer_thres: timer threshold */
+	uint32_t timer_thres:8;
+	/** @intr_id: interrupt id */
+	uint16_t intr_id:11;
+	/** @intr_aggr: interrupt aggregation */
+	uint16_t intr_aggr:1;
+	/** @filler: filler bits */
+	uint16_t filler:4;
+	/** @ringsz: ring size */
+	uint16_t ringsz;
+	/** @bufsz: c2h buffer size */
+	uint16_t bufsz;
+	/** @cmpt_ringsz: completion ring size */
+	uint16_t cmpt_ringsz;
+};
+
+/**
+ * @enum - mbox_cmpt_ctxt_type
+ * @brief  specifies whether cmpt is enabled with MM/ST
+ */
+enum mbox_cmpt_ctxt_type {
+	/** @QDMA_MBOX_CMPT_CTXT_ONLY: only cmpt context programming required */
+	QDMA_MBOX_CMPT_CTXT_ONLY,
+	/** @QDMA_MBOX_CMPT_WITH_MM: completion context with MM */
+	QDMA_MBOX_CMPT_WITH_MM,
+	/** @QDMA_MBOX_CMPT_WITH_ST: complete context with ST */
+	QDMA_MBOX_CMPT_WITH_ST,
+	/** @QDMA_MBOX_CMPT_CTXT_NONE: No completion context */
+	QDMA_MBOX_CMPT_CTXT_NONE
+};
+
+/**
+ * @struct - mbox_msg_intr_ctxt
+ * @brief	interrupt context mailbox message
+ */
+struct mbox_msg_intr_ctxt {
+	/** @num_rings: number of intr context rings be assigned
+	 * for virtual function
+	 */
+	uint8_t num_rings;	/* 1 ~ 8 */
+	/** @ring_index_list: ring index associated for each vector */
+	uint32_t ring_index_list[QDMA_NUM_DATA_VEC_FOR_INTR_CXT];
+	/** @w: interrupt context data for all rings*/
+	struct qdma_indirect_intr_ctxt ictxt[QDMA_NUM_DATA_VEC_FOR_INTR_CXT];
+};
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_hw_init(): Initialize the mobx HW
+ *
+ * @dev_hndl:  device handle
+ * @is_vf:  is VF mbox
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_mbox_hw_init(void *dev_hndl, uint8_t is_vf);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_pf_rcv_msg_handler(): handles the raw message received in pf
+ *
+ * @dma_device_index:  pci bus number
+ * @dev_hndl:  device handle
+ * @func_id:   own function id
+ * @rcv_msg:   received raw message
+ * @resp_msg:  raw response message
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_pf_rcv_msg_handler(void *dev_hndl, uint8_t dma_device_index,
+				 uint16_t func_id, uint32_t *rcv_msg,
+				 uint32_t *resp_msg);
+
+/*****************************************************************************/
+/**
+ * qmda_mbox_compose_vf_online(): compose VF online message
+ *
+ * @func_id:   destination function id
+ * @qmax: number of queues being requested
+ * @qbase: q base at which queues are allocated
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qmda_mbox_compose_vf_online(uint16_t func_id,
+				uint16_t qmax, int *qbase, uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_offline(): compose VF offline message
+ *
+ * @func_id:   destination function id
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_offline(uint16_t func_id,
+				 uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_reset_message(): compose VF reset message
+ *
+ * @raw_data:   output raw message to be sent
+ * @src_funcid: own function id
+ * @dest_funcid: destination function id
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_reset_message(uint32_t *raw_data, uint8_t src_funcid,
+				uint8_t dest_funcid);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_reset_offline(): compose VF BYE for PF initiated RESET
+ *
+ * @func_id: own function id
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_reset_offline(uint16_t func_id,
+				uint32_t *raw_data);
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_pf_reset_done_message(): compose PF reset done message
+ *
+ * @raw_data:   output raw message to be sent
+ * @src_funcid: own function id
+ * @dest_funcid: destination function id
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_pf_reset_done_message(uint32_t *raw_data,
+				uint8_t src_funcid, uint8_t dest_funcid);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_pf_offline(): compose PF offline message
+ *
+ * @raw_data:   output raw message to be sent
+ * @src_funcid: own function id
+ * @dest_funcid: destination function id
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_pf_offline(uint32_t *raw_data, uint8_t src_funcid,
+				uint8_t dest_funcid);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qreq(): compose message to request queues
+ *
+ * @func_id:   destination function id
+ * @qmax: number of queues being requested
+ * @qbase: q base at which queues are allocated
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_qreq(uint16_t func_id,
+			      uint16_t qmax, int qbase, uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_notify_qadd(): compose message to notify queue add
+ *
+ * @func_id:	destination function id
+ * @qid_hw:	number of queues being requested
+ * @q_type:	direction of the of queue
+ * @raw_data:	output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_notify_qadd(uint16_t func_id,
+				     uint16_t qid_hw,
+				     enum qdma_dev_q_type q_type,
+				     uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_notify_qdel(): compose message to notify queue delete
+ *
+ * @func_id:	destination function id
+ * @qid_hw:	number of queues being requested
+ * @q_type:	direction of the of queue
+ * @raw_data:	output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_notify_qdel(uint16_t func_id,
+				     uint16_t qid_hw,
+				     enum qdma_dev_q_type q_type,
+				     uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_notify_qdel(): compose message to get the active
+ * queue count
+ *
+ * @func_id:	destination function id
+ * @raw_data:	output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_get_device_active_qcnt(uint16_t func_id,
+		uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_fmap_prog(): handles the raw message received
+ *
+ * @func_id:   destination function id
+ * @qmax: number of queues being requested
+ * @qbase: q base at which queues are allocated
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_fmap_prog(uint16_t func_id,
+				   uint16_t qmax, int qbase,
+				   uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qctxt_write(): compose queue configuration data for
+ * compose and program
+ *
+ * @func_id:   destination function id
+ * @qid_hw:   HW queue for which the context has to be read
+ * @st:   is st mode
+ * @c2h:   is c2h direction
+ * @cmpt_ctxt_type:   completion context type
+ * @descq_conf:   pointer to queue config data structure
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_qctxt_write(uint16_t func_id,
+			uint16_t qid_hw, uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			struct mbox_descq_conf *descq_conf,
+			uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qctxt_read(): compose message to read context data of a
+ * queue
+ *
+ * @func_id:   destination function id
+ * @qid_hw:   HW queue for which the context has to be read
+ * @st:   is st mode
+ * @c2h:   is c2h direction
+ * @cmpt_ctxt_type:   completion context type
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_qctxt_read(uint16_t func_id,
+			uint16_t qid_hw, uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qctxt_invalidate(): compose queue context invalidate
+ * message
+ *
+ * @func_id:   destination function id
+ * @qid_hw:   HW queue for which the context has to be invalidated
+ * @st:   is st mode
+ * @c2h:   is c2h direction
+ * @cmpt_ctxt_type:   completion context type
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_qctxt_invalidate(uint16_t func_id,
+			uint16_t qid_hw, uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qctxt_clear(): compose queue context clear message
+ *
+ * @func_id:   destination function id
+ * @qid_hw:   HW queue for which the context has to be cleared
+ * @st:   is st mode
+ * @c2h:   is c2h direction
+ * @cmpt_ctxt_type:   completion context type
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_qctxt_clear(uint16_t func_id,
+			uint16_t qid_hw, uint8_t st, uint8_t c2h,
+			enum mbox_cmpt_ctxt_type cmpt_ctxt_type,
+			uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_csr_read(): compose message to read csr info
+ *
+ * @func_id:   destination function id
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_csr_read(uint16_t func_id,
+			       uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_reg_read(): compose message to read the register values
+ *
+ * @func_id:   destination function id
+ * @group_num:  group number for the registers to read
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_reg_read(uint16_t func_id, uint16_t group_num,
+			       uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_intr_ctxt_write(): compose interrupt ring context
+ * programming message
+ *
+ * @func_id:   destination function id
+ * @intr_ctxt:   pointer to interrupt context data structure
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_intr_ctxt_write(uint16_t func_id,
+					 struct mbox_msg_intr_ctxt *intr_ctxt,
+					 uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_intr_ctxt_read(): handles the raw message received
+ *
+ * @func_id:   destination function id
+ * @intr_ctxt:   pointer to interrupt context data structure
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_intr_ctxt_read(uint16_t func_id,
+					struct mbox_msg_intr_ctxt *intr_ctxt,
+					uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_intr_ctxt_clear(): compose interrupt ring context
+ * clear message
+ *
+ * @func_id:   destination function id
+ * @intr_ctxt:   pointer to interrupt context data structure
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_intr_ctxt_clear(uint16_t func_id,
+					 struct mbox_msg_intr_ctxt *intr_ctxt,
+					 uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_compose_vf_qctxt_invalidate(): compose interrupt ring context
+ * invalidate message
+ *
+ * @func_id:   destination function id
+ * @intr_ctxt:   pointer to interrupt context data structure
+ * @raw_data: output raw message to be sent
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_compose_vf_intr_ctxt_invalidate(uint16_t func_id,
+				      struct mbox_msg_intr_ctxt *intr_ctxt,
+				      uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_is_msg_response(): check if the received msg opcode is response
+ *                              sent message opcode
+ *
+ * @send_data: mbox message sent
+ * @rcv_data: mbox message received
+ *
+ * Return:	1  : match and  0: does not match
+ *****************************************************************************/
+uint8_t qdma_mbox_is_msg_response(uint32_t *send_data, uint32_t *rcv_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_response_status(): return the response received for the sent msg
+ *
+ * @rcv_data: mbox message received
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_response_status(uint32_t *rcv_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_func_id_get(): return the vf function id
+ *
+ * @rcv_data: mbox message received
+ * @is_vf:  is VF mbox
+ *
+ * Return:	vf function id
+ *****************************************************************************/
+uint8_t qdma_mbox_vf_func_id_get(uint32_t *rcv_data, uint8_t is_vf);
+
+int qdma_mbox_vf_active_queues_get(uint32_t *rcv_data,
+		enum qdma_dev_q_type q_type);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_parent_func_id_get(): return the vf parent function id
+ *
+ * @rcv_data: mbox message received
+ *
+ * Return:	vf function id
+ *****************************************************************************/
+uint8_t qdma_mbox_vf_parent_func_id_get(uint32_t *rcv_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_dev_info_get(): get dev info from received message
+ *
+ * @rcv_data: mbox message received
+ * @dev_cap: device capability information
+ * @dma_device_index: DMA Identifier to be read using the mbox.
+ *
+ * Return:	response status with dev info received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_dev_info_get(uint32_t *rcv_data,
+		struct qdma_dev_attributes *dev_cap,
+		uint32_t *dma_device_index);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_qinfo_get(): get qinfo from received message
+ *
+ * @rcv_data: mbox message received
+ * @qmax: number of queues
+ * @qbase: q base at which queues are allocated
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_qinfo_get(uint32_t *rcv_data, int *qbase, uint16_t *qmax);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_csr_get(): get csr info from received message
+ *
+ * @rcv_data: mbox message received
+ * @csr: pointer to the csr info
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_csr_get(uint32_t *rcv_data, struct qdma_csr_info *csr);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_reg_list_get(): get reg info from received message
+ *
+ * @rcv_data: mbox message received
+ * @num_regs: number of register read
+ * @reg_list: pointer to the register info
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_reg_list_get(uint32_t *rcv_data,
+		uint16_t *num_regs, struct qdma_reg_data *reg_list);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_context_get(): get queue context info from received message
+ *
+ * @rcv_data: mbox message received
+ * @ctxt: pointer to the queue context info
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_context_get(uint32_t *rcv_data,
+			     struct qdma_descq_context *ctxt);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_context_get(): get intr context info from received message
+ *
+ * @rcv_data: mbox message received
+ * @ctxt: pointer to the intr context info
+ *
+ * Return:	response status received to the sent message
+ *****************************************************************************/
+int qdma_mbox_vf_intr_context_get(uint32_t *rcv_data,
+				  struct mbox_msg_intr_ctxt *ictxt);
+
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_pf_hw_clear_ack() - clear the HW ack
+ *
+ * @dev_hndl:   device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+void qdma_mbox_pf_hw_clear_ack(void *dev_hndl);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_send() - function to send raw data via qdma mailbox
+ *
+ * @dev_hndl:   device handle
+ * @is_vf:	     Whether PF or VF
+ * @raw_data:   pointer to message being sent
+ *
+ * The function sends the raw_data to the outgoing mailbox memory and if PF,
+ * then assert the acknowledge status register bit.
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int qdma_mbox_send(void *dev_hndl, uint8_t is_vf, uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_rcv() - function to receive raw data via qdma mailbox
+ *
+ * @dev_hndl: device handle
+ * @is_vf: Whether PF or VF
+ * @raw_data:  pointer to the message being received
+ *
+ * The function receives the raw_data from the incoming mailbox memory and
+ * then acknowledge the sender by setting msg_rcv field in the command
+ * register.
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int qdma_mbox_rcv(void *dev_hndl, uint8_t is_vf, uint32_t *raw_data);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_enable_interrupts() - Enable the QDMA mailbox interrupt
+ *
+ * @dev_hndl: pointer to xlnx_dma_dev
+ * @is_vf: Whether PF or VF
+ *
+ * @return	none
+ *****************************************************************************/
+void qdma_mbox_enable_interrupts(void *dev_hndl, uint8_t is_vf);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_disable_interrupts() - Disable the QDMA mailbox interrupt
+ *
+ * @dev_hndl: pointer to xlnx_dma_dev
+ * @is_vf: Whether PF or VF
+ *
+ * @return	none
+ *****************************************************************************/
+void qdma_mbox_disable_interrupts(void *dev_hndl, uint8_t is_vf);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_vf_rcv_msg_handler(): handles the raw message received in VF
+ *
+ * @rcv_msg:   received raw message
+ * @resp_msg:  raw response message
+ *
+ * Return:	0  : success and < 0: failure
+ *****************************************************************************/
+int qdma_mbox_vf_rcv_msg_handler(uint32_t *rcv_msg, uint32_t *resp_msg);
+
+/*****************************************************************************/
+/**
+ * qdma_mbox_out_status():
+ *
+ * @dev_hndl: pointer to xlnx_dma_dev
+ * @is_vf: Whether PF or VF
+ *
+ * Return:	0 if MBOX outbox is empty, 1 if MBOX is not empty
+ *****************************************************************************/
+uint8_t qdma_mbox_out_status(void *dev_hndl, uint8_t is_vf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __QDMA_MBOX_PROTOCOL_H_ */
diff --git a/drivers/net/qdma/qdma_access/qdma_platform.c b/drivers/net/qdma/qdma_access/qdma_platform.c
new file mode 100644
index 0000000000..8f9ca2aa78
--- /dev/null
+++ b/drivers/net/qdma/qdma_access/qdma_platform.c
@@ -0,0 +1,224 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include "qdma_access_common.h"
+#include "qdma_platform.h"
+#include "qdma.h"
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+
+static rte_spinlock_t resource_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_spinlock_t reg_access_lock = RTE_SPINLOCK_INITIALIZER;
+
+struct err_code_map error_code_map_list[] = {
+	{QDMA_SUCCESS,				0},
+	{QDMA_ERR_INV_PARAM,			EINVAL},
+	{QDMA_ERR_NO_MEM,			ENOMEM},
+	{QDMA_ERR_HWACC_BUSY_TIMEOUT,		EBUSY},
+	{QDMA_ERR_HWACC_INV_CONFIG_BAR,		EINVAL},
+	{QDMA_ERR_HWACC_NO_PEND_LEGCY_INTR,	EINVAL},
+	{QDMA_ERR_HWACC_BAR_NOT_FOUND,		EINVAL},
+	{QDMA_ERR_HWACC_FEATURE_NOT_SUPPORTED,	EINVAL},
+	{QDMA_ERR_RM_RES_EXISTS,		EPERM},
+	{QDMA_ERR_RM_RES_NOT_EXISTS,		EINVAL},
+	{QDMA_ERR_RM_DEV_EXISTS,		EPERM},
+	{QDMA_ERR_RM_DEV_NOT_EXISTS,		EINVAL},
+	{QDMA_ERR_RM_NO_QUEUES_LEFT,		EPERM},
+	{QDMA_ERR_RM_QMAX_CONF_REJECTED,	EPERM},
+	{QDMA_ERR_MBOX_FMAP_WR_FAILED,		EIO},
+	{QDMA_ERR_MBOX_NUM_QUEUES,		EINVAL},
+	{QDMA_ERR_MBOX_INV_QID,			EINVAL},
+	{QDMA_ERR_MBOX_INV_RINGSZ,		EINVAL},
+	{QDMA_ERR_MBOX_INV_BUFSZ,		EINVAL},
+	{QDMA_ERR_MBOX_INV_CNTR_TH,		EINVAL},
+	{QDMA_ERR_MBOX_INV_TMR_TH,		EINVAL},
+	{QDMA_ERR_MBOX_INV_MSG,			EINVAL},
+	{QDMA_ERR_MBOX_SEND_BUSY,		EBUSY},
+	{QDMA_ERR_MBOX_NO_MSG_IN,		EINVAL},
+	{QDMA_ERR_MBOX_ALL_ZERO_MSG,		EINVAL},
+};
+
+/*****************************************************************************/
+/**
+ * qdma_calloc(): allocate memory and initialize with 0
+ *
+ * @num_blocks:  number of blocks of contiguous memory of @size
+ * @size:    size of each chunk of memory
+ *
+ * Return: pointer to the memory block created on success and NULL on failure
+ *****************************************************************************/
+void *qdma_calloc(uint32_t num_blocks, uint32_t size)
+{
+	return rte_calloc(NULL, num_blocks, size, 0);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_memfree(): free the memory
+ *
+ * @memptr:  pointer to the memory block
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_memfree(void *memptr)
+{
+	return rte_free(memptr);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_resource_lock_take() - take lock to access resource management APIs
+ *
+ * @return	None
+ *****************************************************************************/
+void qdma_resource_lock_take(void)
+{
+	rte_spinlock_lock(&resource_lock);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_resource_lock_give() - release lock after accessing
+ *                             resource management APIs
+ *
+ * @return	None
+ *****************************************************************************/
+void qdma_resource_lock_give(void)
+{
+	rte_spinlock_unlock(&resource_lock);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_reg_write() - Register write API.
+ *
+ * @dev_hndl:   device handle
+ * @reg_offst:  QDMA Config bar register offset to write
+ * @val:	value to be written
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_reg_write(void *dev_hndl, uint32_t reg_offst, uint32_t val)
+{
+	struct qdma_pci_dev *qdma_dev;
+	uint64_t bar_addr;
+
+	qdma_dev = ((struct rte_eth_dev *)dev_hndl)->data->dev_private;
+	bar_addr = (uint64_t)qdma_dev->bar_addr[qdma_dev->config_bar_idx];
+	*((volatile uint32_t *)(bar_addr + reg_offst)) = val;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_reg_read() - Register read API.
+ *
+ * @dev_hndl:   device handle
+ * @reg_offst:  QDMA Config bar register offset to be read
+ *
+ * Return: Value read
+ *****************************************************************************/
+uint32_t qdma_reg_read(void *dev_hndl, uint32_t reg_offst)
+{
+	struct qdma_pci_dev *qdma_dev;
+	uint64_t bar_addr;
+	uint32_t val;
+
+	qdma_dev = ((struct rte_eth_dev *)dev_hndl)->data->dev_private;
+	bar_addr = (uint64_t)qdma_dev->bar_addr[qdma_dev->config_bar_idx];
+	val = *((volatile uint32_t *)(bar_addr + reg_offst));
+
+	return val;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_reg_access_lock() - Lock function for Register access
+ *
+ * @dev_hndl:   device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int qdma_reg_access_lock(void *dev_hndl)
+{
+	(void)dev_hndl;
+	rte_spinlock_lock(&reg_access_lock);
+	return 0;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_reg_access_release() - Release function for Register access
+ *
+ * @dev_hndl:   device handle
+ *
+ * Return:	0   - success and < 0 - failure
+ *****************************************************************************/
+int qdma_reg_access_release(void *dev_hndl)
+{
+	(void)dev_hndl;
+	rte_spinlock_unlock(&reg_access_lock);
+	return 0;
+}
+
+/*****************************************************************************/
+/**
+ * qdma_udelay() - delay function to be used in the common library
+ *
+ * @delay_usec:   delay in microseconds
+ *
+ * Return:	None
+ *****************************************************************************/
+void qdma_udelay(uint32_t delay_usec)
+{
+	rte_delay_us(delay_usec);
+}
+
+/*****************************************************************************/
+/**
+ * qdma_get_hw_access() - function to get the qdma_hw_access
+ *
+ * @dev_hndl:   device ha