[v5,06/22] event/dlb: add probe
diff mbox series

Message ID 1602961456-17392-7-git-send-email-timothy.mcdaniel@intel.com
State Superseded
Delegated to: Jerin Jacob
Headers show
Series
  • Add DLB PMD
Related show

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Timothy McDaniel Oct. 17, 2020, 7:04 p.m. UTC
The DLB hardware is a PCI device. This commit adds
support for probe and other initialization. The
dlb_iface.[ch] files implement a flexible interface
that supports both the PF PMD and the bifurcated PMD.
The bifurcated PMD will be released in a future
patch set. Note that the flexible interface is only
used for configuration, and is not used in the data
path. The shared code is added in pf/base.
Command line parameters are parsed at config time.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb.c                      |  483 ++++++
 drivers/event/dlb/dlb_iface.c                |   27 +
 drivers/event/dlb/dlb_iface.h                |   27 +
 drivers/event/dlb/dlb_priv.h                 |    2 +
 drivers/event/dlb/meson.build                |    6 +-
 drivers/event/dlb/pf/base/dlb_hw_types.h     |  334 ++++
 drivers/event/dlb/pf/base/dlb_osdep.h        |  326 ++++
 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h |  441 +++++
 drivers/event/dlb/pf/base/dlb_osdep_list.h   |  131 ++
 drivers/event/dlb/pf/base/dlb_osdep_types.h  |   31 +
 drivers/event/dlb/pf/base/dlb_regs.h         | 2368 ++++++++++++++++++++++++++
 drivers/event/dlb/pf/base/dlb_resource.c     |  302 ++++
 drivers/event/dlb/pf/base/dlb_resource.h     |  876 ++++++++++
 drivers/event/dlb/pf/dlb_main.c              |  591 +++++++
 drivers/event/dlb/pf/dlb_main.h              |   52 +
 drivers/event/dlb/pf/dlb_pf.c                |  232 +++
 16 files changed, 6228 insertions(+), 1 deletion(-)
 create mode 100644 drivers/event/dlb/dlb.c
 create mode 100644 drivers/event/dlb/dlb_iface.c
 create mode 100644 drivers/event/dlb/dlb_iface.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_hw_types.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_osdep.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_list.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_types.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_regs.h
 create mode 100644 drivers/event/dlb/pf/base/dlb_resource.c
 create mode 100644 drivers/event/dlb/pf/base/dlb_resource.h
 create mode 100644 drivers/event/dlb/pf/dlb_main.c
 create mode 100644 drivers/event/dlb/pf/dlb_main.h
 create mode 100644 drivers/event/dlb/pf/dlb_pf.c

Comments

Jerin Jacob Oct. 18, 2020, 12:49 p.m. UTC | #1
On Sun, Oct 18, 2020 at 12:35 AM Timothy McDaniel
<timothy.mcdaniel@intel.com> wrote:
>
> The DLB hardware is a PCI device. This commit adds
> support for probe and other initialization. The
> dlb_iface.[ch] files implement a flexible interface
> that supports both the PF PMD and the bifurcated PMD.
> The bifurcated PMD will be released in a future
> patch set. Note that the flexible interface is only
> used for configuration, and is not used in the data
> path. The shared code is added in pf/base.
> Command line parameters are parsed at config time.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>


All the review given for DLB2 is applicable here. Also there is a build issue .

Please send the next version of DLB and DLB2 drivers.

FAILED: drivers/libtmp_rte_pmd_dlb_event.a.p/event_dlb_pf_dlb_main.c.o
ccache clang -Idrivers/libtmp_rte_pmd_dlb_event.a.p -Idrivers
-I../drivers -Idrivers/event/dlb -I../drivers/event/dlb
-Ilib/librte_eventdev -I../lib/librte_eventdev -I. -I.. -Iconfig
-I../config -Ilib/librte_eal/include -I../lib/librte_eal/
include -Ilib/librte_eal/linux/include
-I../lib/librte_eal/linux/include -Ilib/librte_eal/x86/include
-I../lib/librte_eal/x86/include -Ilib/librte_eal/common
-I../lib/librte_eal/common -Ilib/librte_eal -I../lib/librte_eal
-Ilib/librte_kvarg
s -I../lib/librte_kvargs -Ilib/librte_metrics -I../lib/librte_metrics
-Ilib/librte_telemetry -I../lib/librte_telemetry -Ilib/librte_ring
-I../lib/librte_ring -Ilib/librte_ethdev -I../lib/librte_ethdev
-Ilib/librte_net -I../lib/librte_net -I
lib/librte_mbuf -I../lib/librte_mbuf -Ilib/librte_mempool
-I../lib/librte_mempool -Ilib/librte_meter -I../lib/librte_meter
-Ilib/librte_hash -I../lib/librte_hash -Ilib/librte_timer
-I../lib/librte_timer -Ilib/librte_cryptodev -I../lib/librt
e_cryptodev -Ilib/librte_pci -I../lib/librte_pci -Idrivers/bus/pci
-I../drivers/bus/pci -I../drivers/bus/pci/linux -Xclang
-fcolor-diagnostics -pipe -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch
-Werror -O2 -g -include rte_config.h -Wextra -Wc
ast-qual -Wdeprecated -Wformat-nonliteral -Wformat-security
-Wmissing-declarations -Wmissing-prototypes -Wnested-externs
-Wold-style-definition -Wpointer-arith -Wsign-compare
-Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packe
d-member -Wno-missing-field-initializers -D_GNU_SOURCE -fPIC
-march=native -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -MD -MQ
drivers/libtmp_rte_pmd_dlb_event.a.p/event_dlb_pf_dlb_main.c.o -MF
drivers/libtmp_rte_pmd_dlb_event.a.p/event_d
lb_pf_dlb_main.c.o.d -o
drivers/libtmp_rte_pmd_dlb_event.a.p/event_dlb_pf_dlb_main.c.o -c
../drivers/event/dlb/pf/dlb_main.c
In file included from ../drivers/event/dlb/pf/dlb_main.c:19:
../drivers/event/dlb/pf/../dlb_priv.h:319:4: error: attribute
'__aligned__' is ignored, place it after "struct" to apply attribute
to type declaration [-Werror,-Wignored-attributes]
}; __rte_cache_aligned
   ^
../lib/librte_eal/include/rte_common.h:377:29: note: expanded from
macro '__rte_cache_aligned'
#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
                            ^
../lib/librte_eal/include/rte_common.h:69:41: note: expanded from
macro '__rte_aligned'
#define __rte_aligned(a) __attribute__((__aligned__(a)))
                                        ^
In file included from ../drivers/event/dlb/pf/dlb_main.c:20:
../drivers/event/dlb/pf/../dlb_inline_fns.h:45:2: error: use of
unknown builtin '__builtin_ia32_movntdq'
[-Wimplicit-function-declaration]
        __builtin_ia32_movntdq((__v2di *)dest + 0, (__v2di)src_data0);
        ^
../drivers/event/dlb/pf/../dlb_inline_fns.h:45:2: note: did you mean
'__builtin_ia32_movntq'?
/usr/lib/clang/10.0.1/include/xmmintrin.h:2122:3: note:
'__builtin_ia32_movntq' declared here
  __builtin_ia32_movntq(__p, __a);
  ^
In file included from ../drivers/event/dlb/pf/dlb_main.c:20:
../drivers/event/dlb/pf/../dlb_inline_fns.h:61:2: error: use of
unknown builtin '__builtin_ia32_movntdq'
[-Wimplicit-function-declaration]
        __builtin_ia32_movntdq((__v2di *)dest, (__v2di)src_data0);
        ^
3 errors generated.
[2239/2479] Compiling C object
drivers/libtmp_rte_pmd_dlb_event.a.p/event_dlb_dlb.c.o
FAILED: drivers/libtmp_rte_pmd_dlb_event.a.p/event_dlb_dlb.c.o

> ---
>  drivers/event/dlb/dlb.c                      |  483 ++++++
>  drivers/event/dlb/dlb_iface.c                |   27 +
>  drivers/event/dlb/dlb_iface.h                |   27 +
>  drivers/event/dlb/dlb_priv.h                 |    2 +
>  drivers/event/dlb/meson.build                |    6 +-
>  drivers/event/dlb/pf/base/dlb_hw_types.h     |  334 ++++
>  drivers/event/dlb/pf/base/dlb_osdep.h        |  326 ++++
>  drivers/event/dlb/pf/base/dlb_osdep_bitmap.h |  441 +++++
>  drivers/event/dlb/pf/base/dlb_osdep_list.h   |  131 ++
>  drivers/event/dlb/pf/base/dlb_osdep_types.h  |   31 +
>  drivers/event/dlb/pf/base/dlb_regs.h         | 2368 ++++++++++++++++++++++++++
>  drivers/event/dlb/pf/base/dlb_resource.c     |  302 ++++
>  drivers/event/dlb/pf/base/dlb_resource.h     |  876 ++++++++++
>  drivers/event/dlb/pf/dlb_main.c              |  591 +++++++
>  drivers/event/dlb/pf/dlb_main.h              |   52 +
>  drivers/event/dlb/pf/dlb_pf.c                |  232 +++
>  16 files changed, 6228 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/event/dlb/dlb.c
>  create mode 100644 drivers/event/dlb/dlb_iface.c
>  create mode 100644 drivers/event/dlb/dlb_iface.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_hw_types.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_osdep.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_list.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_osdep_types.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_regs.h
>  create mode 100644 drivers/event/dlb/pf/base/dlb_resource.c
>  create mode 100644 drivers/event/dlb/pf/base/dlb_resource.h
>  create mode 100644 drivers/event/dlb/pf/dlb_main.c
>  create mode 100644 drivers/event/dlb/pf/dlb_main.h
>  create mode 100644 drivers/event/dlb/pf/dlb_pf.c
>
> diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
> new file mode 100644
> index 0000000..fbec8f1
> --- /dev/null
> +++ b/drivers/event/dlb/dlb.c
> @@ -0,0 +1,483 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#include <assert.h>
> +#include <errno.h>
> +#include <nmmintrin.h>
> +#include <pthread.h>
> +#include <stdbool.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include <sys/fcntl.h>
> +#include <sys/mman.h>
> +#include <unistd.h>
> +
> +#include <rte_common.h>
> +#include <rte_config.h>
> +#include <rte_cycles.h>
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_errno.h>
> +#include <rte_io.h>
> +#include <rte_kvargs.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_mbuf.h>
> +#include <rte_prefetch.h>
> +#include <rte_ring.h>
> +#include <rte_string_fns.h>
> +
> +#include <rte_eventdev.h>
> +#include <rte_eventdev_pmd.h>
> +
> +#include "dlb_priv.h"
> +#include "dlb_iface.h"
> +#include "dlb_inline_fns.h"
> +
> +/*
> + * Resources exposed to eventdev.
> + */
> +#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
> +#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
> +#endif
> +static struct rte_event_dev_info evdev_dlb_default_info = {
> +       .driver_name = "", /* probe will set */
> +       .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
> +       .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
> +#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
> +       .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
> +#else
> +       .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
> +#endif
> +       .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
> +       .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
> +       .max_event_priority_levels = DLB_QID_PRIORITIES,
> +       .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
> +       .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
> +       .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
> +       .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
> +       .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
> +       .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
> +       .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
> +                         RTE_EVENT_DEV_CAP_EVENT_QOS |
> +                         RTE_EVENT_DEV_CAP_BURST_MODE |
> +                         RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
> +                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
> +                         RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
> +};
> +
> +struct process_local_port_data
> +dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
> +
> +static int
> +dlb_hw_query_resources(struct dlb_eventdev *dlb)
> +{
> +       struct dlb_hw_dev *handle = &dlb->qm_instance;
> +       struct dlb_hw_resource_info *dlb_info = &handle->info;
> +       int ret;
> +
> +       ret = dlb_iface_get_num_resources(handle,
> +                                         &dlb->hw_rsrc_query_results);
> +       if (ret) {
> +               DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
> +               return ret;
> +       }
> +
> +       /* Complete filling in device resource info returned to evdev app,
> +        * overriding any default values.
> +        * The capabilities (CAPs) were set at compile time.
> +        */
> +
> +       evdev_dlb_default_info.max_event_queues =
> +               dlb->hw_rsrc_query_results.num_ldb_queues;
> +
> +       evdev_dlb_default_info.max_event_ports =
> +               dlb->hw_rsrc_query_results.num_ldb_ports;
> +
> +       evdev_dlb_default_info.max_num_events =
> +               dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
> +
> +       /* Save off values used when creating the scheduling domain. */
> +
> +       handle->info.num_sched_domains =
> +               dlb->hw_rsrc_query_results.num_sched_domains;
> +
> +       handle->info.hw_rsrc_max.nb_events_limit =
> +               dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
> +
> +       handle->info.hw_rsrc_max.num_queues =
> +               dlb->hw_rsrc_query_results.num_ldb_queues +
> +               dlb->hw_rsrc_query_results.num_dir_ports;
> +
> +       handle->info.hw_rsrc_max.num_ldb_queues =
> +               dlb->hw_rsrc_query_results.num_ldb_queues;
> +
> +       handle->info.hw_rsrc_max.num_ldb_ports =
> +               dlb->hw_rsrc_query_results.num_ldb_ports;
> +
> +       handle->info.hw_rsrc_max.num_dir_ports =
> +               dlb->hw_rsrc_query_results.num_dir_ports;
> +
> +       handle->info.hw_rsrc_max.reorder_window_size =
> +               dlb->hw_rsrc_query_results.num_hist_list_entries;
> +
> +       rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
> +
> +       return 0;
> +}
> +
> +/* Wrapper for string to int conversion. Substituted for atoi(...), which is
> + * unsafe.
> + */
> +#define RTE_BASE_10 10
> +
> +static int
> +dlb_string_to_int(int *result, const char *str)
> +{
> +       long ret;
> +       char *endstr;
> +
> +       if (str == NULL || result == NULL)
> +               return -EINVAL;
> +
> +       errno = 0;
> +       ret = strtol(str, &endstr, RTE_BASE_10);
> +       if (errno)
> +               return -errno;
> +
> +       /* long int and int may be different width for some architectures */
> +       if (ret < INT_MIN || ret > INT_MAX || endstr == str)
> +               return -EINVAL;
> +
> +       *result = ret;
> +       return 0;
> +}
> +
> +static int
> +set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
> +{
> +       int *socket_id = opaque;
> +       int ret;
> +
> +       ret = dlb_string_to_int(socket_id, value);
> +       if (ret < 0)
> +               return ret;
> +
> +       if (*socket_id > RTE_MAX_NUMA_NODES)
> +               return -EINVAL;
> +
> +       return 0;
> +}
> +
> +static int
> +set_max_num_events(const char *key __rte_unused,
> +                  const char *value,
> +                  void *opaque)
> +{
> +       int *max_num_events = opaque;
> +       int ret;
> +
> +       if (value == NULL || opaque == NULL) {
> +               DLB_LOG_ERR("NULL pointer\n");
> +               return -EINVAL;
> +       }
> +
> +       ret = dlb_string_to_int(max_num_events, value);
> +       if (ret < 0)
> +               return ret;
> +
> +       if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
> +               DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
> +                           DLB_MAX_NUM_LDB_CREDITS);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static int
> +set_num_dir_credits(const char *key __rte_unused,
> +                   const char *value,
> +                   void *opaque)
> +{
> +       int *num_dir_credits = opaque;
> +       int ret;
> +
> +       if (value == NULL || opaque == NULL) {
> +               DLB_LOG_ERR("NULL pointer\n");
> +               return -EINVAL;
> +       }
> +
> +       ret = dlb_string_to_int(num_dir_credits, value);
> +       if (ret < 0)
> +               return ret;
> +
> +       if (*num_dir_credits < 0 ||
> +           *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
> +               DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
> +                           DLB_MAX_NUM_DIR_CREDITS);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static int
> +set_dev_id(const char *key __rte_unused,
> +          const char *value,
> +          void *opaque)
> +{
> +       int *dev_id = opaque;
> +       int ret;
> +
> +       if (value == NULL || opaque == NULL) {
> +               DLB_LOG_ERR("NULL pointer\n");
> +               return -EINVAL;
> +       }
> +
> +       ret = dlb_string_to_int(dev_id, value);
> +       if (ret < 0)
> +               return ret;
> +
> +       return 0;
> +}
> +
> +static int
> +set_defer_sched(const char *key __rte_unused,
> +               const char *value,
> +               void *opaque)
> +{
> +       int *defer_sched = opaque;
> +
> +       if (value == NULL || opaque == NULL) {
> +               DLB_LOG_ERR("NULL pointer\n");
> +               return -EINVAL;
> +       }
> +
> +       if (strncmp(value, "on", 2) != 0) {
> +               DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
> +                           value);
> +               return -EINVAL;
> +       }
> +
> +       *defer_sched = 1;
> +
> +       return 0;
> +}
> +
> +static int
> +set_num_atm_inflights(const char *key __rte_unused,
> +                     const char *value,
> +                     void *opaque)
> +{
> +       int *num_atm_inflights = opaque;
> +       int ret;
> +
> +       if (value == NULL || opaque == NULL) {
> +               DLB_LOG_ERR("NULL pointer\n");
> +               return -EINVAL;
> +       }
> +
> +       ret = dlb_string_to_int(num_atm_inflights, value);
> +       if (ret < 0)
> +               return ret;
> +
> +       if (*num_atm_inflights < 0 ||
> +           *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
> +               DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
> +                           DLB_MAX_NUM_ATM_INFLIGHTS);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +void
> +dlb_entry_points_init(struct rte_eventdev *dev)
> +{
> +       static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
> +       };
> +
> +       /* Expose PMD's eventdev interface */
> +       dev->dev_ops = &dlb_eventdev_entry_ops;
> +}
> +
> +int
> +dlb_primary_eventdev_probe(struct rte_eventdev *dev,
> +                          const char *name,
> +                          struct dlb_devargs *dlb_args)
> +{
> +       struct dlb_eventdev *dlb;
> +       int err;
> +
> +       dlb = dev->data->dev_private;
> +
> +       dlb->event_dev = dev; /* backlink */
> +
> +       evdev_dlb_default_info.driver_name = name;
> +
> +       dlb->max_num_events_override = dlb_args->max_num_events;
> +       dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
> +       dlb->defer_sched = dlb_args->defer_sched;
> +       dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
> +
> +       /* Open the interface.
> +        * For vdev mode, this means open the dlb kernel module.
> +        */
> +       err = dlb_iface_open(&dlb->qm_instance, name);
> +       if (err < 0) {
> +               DLB_LOG_ERR("could not open event hardware device, err=%d\n",
> +                           err);
> +               return err;
> +       }
> +
> +       err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
> +       if (err < 0) {
> +               DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
> +                           err);
> +               return err;
> +       }
> +
> +       err = dlb_hw_query_resources(dlb);
> +       if (err) {
> +               DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
> +               return err;
> +       }
> +
> +       err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
> +       if (err < 0) {
> +               DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
> +               return err;
> +       }
> +
> +       rte_spinlock_init(&dlb->qm_instance.resource_lock);
> +
> +       dlb_iface_low_level_io_init(dlb);
> +
> +       dlb_entry_points_init(dev);
> +
> +       return 0;
> +}
> +
> +int
> +dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
> +                            const char *name)
> +{
> +       struct dlb_eventdev *dlb;
> +       int err;
> +
> +       dlb = dev->data->dev_private;
> +
> +       evdev_dlb_default_info.driver_name = name;
> +
> +       err = dlb_iface_open(&dlb->qm_instance, name);
> +       if (err < 0) {
> +               DLB_LOG_ERR("could not open event hardware device, err=%d\n",
> +                           err);
> +               return err;
> +       }
> +
> +       err = dlb_hw_query_resources(dlb);
> +       if (err) {
> +               DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
> +               return err;
> +       }
> +
> +       dlb_iface_low_level_io_init(dlb);
> +
> +       dlb_entry_points_init(dev);
> +
> +       return 0;
> +}
> +
> +int
> +dlb_parse_params(const char *params,
> +                const char *name,
> +                struct dlb_devargs *dlb_args)
> +{
> +       int ret = 0;
> +       static const char * const args[] = { NUMA_NODE_ARG,
> +                                            DLB_MAX_NUM_EVENTS,
> +                                            DLB_NUM_DIR_CREDITS,
> +                                            DEV_ID_ARG,
> +                                            DLB_DEFER_SCHED_ARG,
> +                                            DLB_NUM_ATM_INFLIGHTS_ARG,
> +                                            NULL };
> +
> +       if (params && params[0] != '\0') {
> +               struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
> +
> +               if (kvlist == NULL) {
> +                       DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
> +                                    name);
> +               } else {
> +                       int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
> +                                                    set_numa_node,
> +                                                    &dlb_args->socket_id);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing numa node parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
> +                                                set_max_num_events,
> +                                                &dlb_args->max_num_events);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       ret = rte_kvargs_process(kvlist,
> +                                       DLB_NUM_DIR_CREDITS,
> +                                       set_num_dir_credits,
> +                                       &dlb_args->num_dir_credits_override);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
> +                                                set_dev_id,
> +                                                &dlb_args->dev_id);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing dev_id parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
> +                                                set_defer_sched,
> +                                                &dlb_args->defer_sched);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       ret = rte_kvargs_process(kvlist,
> +                                                DLB_NUM_ATM_INFLIGHTS_ARG,
> +                                                set_num_atm_inflights,
> +                                                &dlb_args->num_atm_inflights);
> +                       if (ret != 0) {
> +                               DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
> +                                           name);
> +                               rte_kvargs_free(kvlist);
> +                               return ret;
> +                       }
> +
> +                       rte_kvargs_free(kvlist);
> +               }
> +       }
> +       return ret;
> +}
> +RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);
> diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
> new file mode 100644
> index 0000000..dd72120
> --- /dev/null
> +++ b/drivers/event/dlb/dlb_iface.c
> @@ -0,0 +1,27 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#include <stdint.h>
> +
> +#include "dlb_priv.h"
> +
> +/* DLB PMD Internal interface function pointers.
> + * If VDEV (bifurcated PMD),  these will resolve to functions that issue ioctls
> + * serviced by DLB kernel module.
> + * If PCI (PF PMD),  these will be implemented locally in user mode.
> + */
> +
> +void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
> +
> +int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
> +
> +int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
> +                                   uint8_t *revision);
> +
> +int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
> +                                  struct dlb_get_num_resources_args *rsrcs);
> +
> +int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
> +                                 enum dlb_cq_poll_modes *mode);
> +
> diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
> new file mode 100644
> index 0000000..416d1b3
> --- /dev/null
> +++ b/drivers/event/dlb/dlb_iface.h
> @@ -0,0 +1,27 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef _DLB_IFACE_H
> +#define _DLB_IFACE_H
> +
> +/* DLB PMD Internal interface function pointers.
> + * If VDEV (bifurcated PMD), these will resolve to functions that issue ioctls
> + * serviced by DLB kernel module.
> + * If PCI (PF PMD), these will be implemented locally in user mode.
> + */
> +
> +extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
> +
> +extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
> +
> +extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
> +                                          uint8_t *revision);
> +
> +extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
> +                                  struct dlb_get_num_resources_args *rsrcs);
> +
> +extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
> +                                        enum dlb_cq_poll_modes *mode);
> +
> +#endif /* _DLB_IFACE_H */
> diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
> index f695abf..892d55f 100644
> --- a/drivers/event/dlb/dlb_priv.h
> +++ b/drivers/event/dlb/dlb_priv.h
> @@ -505,4 +505,6 @@ int dlb_parse_params(const char *params,
>                      const char *name,
>                      struct dlb_devargs *dlb_args);
>
> +void dlb_entry_points_init(struct rte_eventdev *dev);
> +
>  #endif /* _DLB_PRIV_H_ */
> diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
> index 54ba2c8..414b3ed 100644
> --- a/drivers/event/dlb/meson.build
> +++ b/drivers/event/dlb/meson.build
> @@ -1,7 +1,11 @@
>  # SPDX-License-Identifier: BSD-3-Clause
>  # Copyright(c) 2019-2020 Intel Corporation
>
> -sources = files(
> +sources = files('dlb.c',
> +               'dlb_iface.c',
> +               'pf/dlb_main.c',
> +               'pf/dlb_pf.c',
> +               'pf/base/dlb_resource.c'
>  )
>
>  deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
> diff --git a/drivers/event/dlb/pf/base/dlb_hw_types.h b/drivers/event/dlb/pf/base/dlb_hw_types.h
> new file mode 100644
> index 0000000..87b83f8
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_hw_types.h
> @@ -0,0 +1,334 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_HW_TYPES_H
> +#define __DLB_HW_TYPES_H
> +
> +#include "../../dlb_user.h"
> +#include "dlb_osdep_types.h"
> +#include "dlb_osdep_list.h"
> +
> +#define DLB_MAX_NUM_DOMAINS 32
> +#define DLB_MAX_NUM_LDB_QUEUES 128
> +#define DLB_MAX_NUM_LDB_PORTS 64
> +#define DLB_MAX_NUM_DIR_PORTS 128
> +#define DLB_MAX_NUM_LDB_CREDITS 16384
> +#define DLB_MAX_NUM_DIR_CREDITS 4096
> +#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
> +#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
> +#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
> +#define DLB_MAX_NUM_AQOS_ENTRIES 2048
> +#define DLB_MAX_NUM_TOTAL_OUTSTANDING_COMPLETIONS 4096
> +#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
> +#define DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS 4
> +#define DLB_MAX_NUM_SEQUENCE_NUMBER_MODES 6
> +#define DLB_QID_PRIORITIES 8
> +#define DLB_NUM_ARB_WEIGHTS 8
> +#define DLB_MAX_WEIGHT 255
> +#define DLB_MAX_PORT_CREDIT_QUANTUM 1023
> +#define DLB_MAX_CQ_COMP_CHECK_LOOPS 409600
> +#define DLB_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
> +#define DLB_HZ 800000000
> +
> +/* Used for DLB A-stepping workaround for hardware write buffer lock up issue */
> +#define DLB_A_STEP_MAX_PORTS 128
> +
> +#define DLB_PF_DEV_ID 0x270B
> +
> +/* Interrupt related macros */
> +#define DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8
> +#define DLB_PF_NUM_CQ_INTERRUPT_VECTORS         64
> +#define DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS \
> +       (DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
> +        DLB_PF_NUM_CQ_INTERRUPT_VECTORS)
> +#define DLB_PF_NUM_COMPRESSED_MODE_VECTORS \
> +       (DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
> +#define DLB_PF_NUM_PACKED_MODE_VECTORS  DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS
> +#define DLB_PF_COMPRESSED_MODE_CQ_VECTOR_ID DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS
> +
> +#define DLB_PF_NUM_ALARM_INTERRUPT_VECTORS 4
> +#define DLB_INT_ALARM 0
> +#define DLB_INT_INGRESS_ERROR 3
> +
> +#define DLB_ALARM_HW_SOURCE_SYS 0
> +#define DLB_ALARM_HW_SOURCE_DLB 1
> +
> +#define DLB_ALARM_HW_UNIT_CHP 1
> +#define DLB_ALARM_HW_UNIT_LSP 3
> +
> +#define DLB_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6
> +#define DLB_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7
> +#define DLB_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15
> +#define DLB_ALARM_SYS_AID_ILLEGAL_HCW 0
> +#define DLB_ALARM_SYS_AID_ILLEGAL_QID 3
> +#define DLB_ALARM_SYS_AID_DISABLED_QID 4
> +#define DLB_ALARM_SYS_AID_ILLEGAL_CQID 6
> +
> +/* Hardware-defined base addresses */
> +#define DLB_LDB_PP_BASE 0x2100000
> +#define DLB_LDB_PP_STRIDE 0x1000
> +#define DLB_LDB_PP_BOUND \
> +       (DLB_LDB_PP_BASE + DLB_LDB_PP_STRIDE * DLB_MAX_NUM_LDB_PORTS)
> +#define DLB_DIR_PP_BASE 0x2000000
> +#define DLB_DIR_PP_STRIDE 0x1000
> +#define DLB_DIR_PP_BOUND \
> +       (DLB_DIR_PP_BASE + DLB_DIR_PP_STRIDE * DLB_MAX_NUM_DIR_PORTS)
> +
> +struct dlb_freelist {
> +       u32 base;
> +       u32 bound;
> +       u32 offset;
> +};
> +
> +static inline u32 dlb_freelist_count(struct dlb_freelist *list)
> +{
> +       return (list->bound - list->base) - list->offset;
> +}
> +
> +struct dlb_hcw {
> +       u64 data;
> +       /* Word 3 */
> +       u16 opaque;
> +       u8 qid;
> +       u8 sched_type:2;
> +       u8 priority:3;
> +       u8 msg_type:3;
> +       /* Word 4 */
> +       u16 lock_id;
> +       u8 meas_lat:1;
> +       u8 rsvd1:2;
> +       u8 no_dec:1;
> +       u8 cmp_id:4;
> +       u8 cq_token:1;
> +       u8 qe_comp:1;
> +       u8 qe_frag:1;
> +       u8 qe_valid:1;
> +       u8 int_arm:1;
> +       u8 error:1;
> +       u8 rsvd:2;
> +};
> +
> +struct dlb_ldb_queue {
> +       struct dlb_list_entry domain_list;
> +       struct dlb_list_entry func_list;
> +       u32 id;
> +       u32 domain_id;
> +       u32 num_qid_inflights;
> +       struct dlb_freelist aqed_freelist;
> +       u8 sn_cfg_valid;
> +       u32 sn_group;
> +       u32 sn_slot;
> +       u32 num_mappings;
> +       u8 num_pending_additions;
> +       u8 owned;
> +       u8 configured;
> +};
> +
> +/* Directed ports and queues are paired by nature, so the driver tracks them
> + * with a single data structure.
> + */
> +struct dlb_dir_pq_pair {
> +       struct dlb_list_entry domain_list;
> +       struct dlb_list_entry func_list;
> +       u32 id;
> +       u32 domain_id;
> +       u8 ldb_pool_used;
> +       u8 dir_pool_used;
> +       u8 queue_configured;
> +       u8 port_configured;
> +       u8 owned;
> +       u8 enabled;
> +       u32 ref_cnt;
> +};
> +
> +enum dlb_qid_map_state {
> +       /* The slot doesn't contain a valid queue mapping */
> +       DLB_QUEUE_UNMAPPED,
> +       /* The slot contains a valid queue mapping */
> +       DLB_QUEUE_MAPPED,
> +       /* The driver is mapping a queue into this slot */
> +       DLB_QUEUE_MAP_IN_PROGRESS,
> +       /* The driver is unmapping a queue from this slot */
> +       DLB_QUEUE_UNMAP_IN_PROGRESS,
> +       /* The driver is unmapping a queue from this slot, and once complete
> +        * will replace it with another mapping.
> +        */
> +       DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP,
> +};
> +
> +struct dlb_ldb_port_qid_map {
> +       u16 qid;
> +       u8 priority;
> +       u16 pending_qid;
> +       u8 pending_priority;
> +       enum dlb_qid_map_state state;
> +};
> +
> +struct dlb_ldb_port {
> +       struct dlb_list_entry domain_list;
> +       struct dlb_list_entry func_list;
> +       u32 id;
> +       u32 domain_id;
> +       u8 ldb_pool_used;
> +       u8 dir_pool_used;
> +       u8 init_tkn_cnt;
> +       u32 hist_list_entry_base;
> +       u32 hist_list_entry_limit;
> +       /* The qid_map represents the hardware QID mapping state. */
> +       struct dlb_ldb_port_qid_map qid_map[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
> +       u32 ref_cnt;
> +       u8 num_pending_removals;
> +       u8 num_mappings;
> +       u8 owned;
> +       u8 enabled;
> +       u8 configured;
> +};
> +
> +struct dlb_credit_pool {
> +       struct dlb_list_entry domain_list;
> +       struct dlb_list_entry func_list;
> +       u32 id;
> +       u32 domain_id;
> +       u32 total_credits;
> +       u32 avail_credits;
> +       u8 owned;
> +       u8 configured;
> +};
> +
> +struct dlb_sn_group {
> +       u32 mode;
> +       u32 sequence_numbers_per_queue;
> +       u32 slot_use_bitmap;
> +       u32 id;
> +};
> +
> +static inline bool dlb_sn_group_full(struct dlb_sn_group *group)
> +{
> +       u32 mask[6] = {
> +               0xffffffff,  /* 32 SNs per queue */
> +               0x0000ffff,  /* 64 SNs per queue */
> +               0x000000ff,  /* 128 SNs per queue */
> +               0x0000000f,  /* 256 SNs per queue */
> +               0x00000003,  /* 512 SNs per queue */
> +               0x00000001}; /* 1024 SNs per queue */
> +
> +       return group->slot_use_bitmap == mask[group->mode];
> +}
> +
> +static inline int dlb_sn_group_alloc_slot(struct dlb_sn_group *group)
> +{
> +       int bound[6] = {32, 16, 8, 4, 2, 1};
> +       int i;
> +
> +       for (i = 0; i < bound[group->mode]; i++) {
> +               if (!(group->slot_use_bitmap & (1 << i))) {
> +                       group->slot_use_bitmap |= 1 << i;
> +                       return i;
> +               }
> +       }
> +
> +       return -1;
> +}
> +
> +static inline void dlb_sn_group_free_slot(struct dlb_sn_group *group, int slot)
> +{
> +       group->slot_use_bitmap &= ~(1 << slot);
> +}
> +
> +static inline int dlb_sn_group_used_slots(struct dlb_sn_group *group)
> +{
> +       int i, cnt = 0;
> +
> +       for (i = 0; i < 32; i++)
> +               cnt += !!(group->slot_use_bitmap & (1 << i));
> +
> +       return cnt;
> +}
> +
> +struct dlb_domain {
> +       struct dlb_function_resources *parent_func;
> +       struct dlb_list_entry func_list;
> +       struct dlb_list_head used_ldb_queues;
> +       struct dlb_list_head used_ldb_ports;
> +       struct dlb_list_head used_dir_pq_pairs;
> +       struct dlb_list_head used_ldb_credit_pools;
> +       struct dlb_list_head used_dir_credit_pools;
> +       struct dlb_list_head avail_ldb_queues;
> +       struct dlb_list_head avail_ldb_ports;
> +       struct dlb_list_head avail_dir_pq_pairs;
> +       struct dlb_list_head avail_ldb_credit_pools;
> +       struct dlb_list_head avail_dir_credit_pools;
> +       u32 total_hist_list_entries;
> +       u32 avail_hist_list_entries;
> +       u32 hist_list_entry_base;
> +       u32 hist_list_entry_offset;
> +       struct dlb_freelist qed_freelist;
> +       struct dlb_freelist dqed_freelist;
> +       struct dlb_freelist aqed_freelist;
> +       u32 id;
> +       int num_pending_removals;
> +       int num_pending_additions;
> +       u8 configured;
> +       u8 started;
> +};
> +
> +struct dlb_bitmap;
> +
> +struct dlb_function_resources {
> +       u32 num_avail_domains;
> +       struct dlb_list_head avail_domains;
> +       struct dlb_list_head used_domains;
> +       u32 num_avail_ldb_queues;
> +       struct dlb_list_head avail_ldb_queues;
> +       u32 num_avail_ldb_ports;
> +       struct dlb_list_head avail_ldb_ports;
> +       u32 num_avail_dir_pq_pairs;
> +       struct dlb_list_head avail_dir_pq_pairs;
> +       struct dlb_bitmap *avail_hist_list_entries;
> +       struct dlb_bitmap *avail_qed_freelist_entries;
> +       struct dlb_bitmap *avail_dqed_freelist_entries;
> +       struct dlb_bitmap *avail_aqed_freelist_entries;
> +       u32 num_avail_ldb_credit_pools;
> +       struct dlb_list_head avail_ldb_credit_pools;
> +       u32 num_avail_dir_credit_pools;
> +       struct dlb_list_head avail_dir_credit_pools;
> +       u32 num_enabled_ldb_ports;
> +};
> +
> +/* After initialization, each resource in dlb_hw_resources is located in one of
> + * the following lists:
> + * -- The PF's available resources list. These are unconfigured resources owned
> + *     by the PF and not allocated to a DLB scheduling domain.
> + * -- A domain's available resources list. These are domain-owned unconfigured
> + *     resources.
> + * -- A domain's used resources list. These are are domain-owned configured
> + *     resources.
> + *
> + * A resource moves to a new list when a domain is created or destroyed, or
> + * when the resource is configured.
> + */
> +struct dlb_hw_resources {
> +       struct dlb_ldb_queue ldb_queues[DLB_MAX_NUM_LDB_QUEUES];
> +       struct dlb_ldb_port ldb_ports[DLB_MAX_NUM_LDB_PORTS];
> +       struct dlb_dir_pq_pair dir_pq_pairs[DLB_MAX_NUM_DIR_PORTS];
> +       struct dlb_credit_pool ldb_credit_pools[DLB_MAX_NUM_LDB_CREDIT_POOLS];
> +       struct dlb_credit_pool dir_credit_pools[DLB_MAX_NUM_DIR_CREDIT_POOLS];
> +       struct dlb_sn_group sn_groups[DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
> +};
> +
> +struct dlb_hw {
> +       /* BAR 0 address */
> +       void  *csr_kva;
> +       unsigned long csr_phys_addr;
> +       /* BAR 2 address */
> +       void  *func_kva;
> +       unsigned long func_phys_addr;
> +
> +       /* Resource tracking */
> +       struct dlb_hw_resources rsrcs;
> +       struct dlb_function_resources pf;
> +       struct dlb_domain domains[DLB_MAX_NUM_DOMAINS];
> +};
> +
> +#endif /* __DLB_HW_TYPES_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep.h b/drivers/event/dlb/pf/base/dlb_osdep.h
> new file mode 100644
> index 0000000..a6eef2f
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_osdep.h
> @@ -0,0 +1,326 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_OSDEP_H__
> +#define __DLB_OSDEP_H__
> +
> +#include <string.h>
> +#include <time.h>
> +#include <unistd.h>
> +#include <cpuid.h>
> +#include <pthread.h>
> +#include <rte_string_fns.h>
> +#include <rte_cycles.h>
> +#include <rte_io.h>
> +#include <rte_log.h>
> +#include <rte_spinlock.h>
> +#include "../dlb_main.h"
> +#include "dlb_resource.h"
> +#include "../../dlb_log.h"
> +#include "../../dlb_user.h"
> +
> +
> +#define DLB_PCI_REG_READ(reg)        rte_read32((void *)reg)
> +#define DLB_PCI_REG_WRITE(reg, val)   rte_write32(val, (void *)reg)
> +
> +#define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
> +#define DLB_CSR_RD(hw, reg) \
> +       DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
> +#define DLB_CSR_WR(hw, reg, val) \
> +       DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
> +
> +#define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
> +#define DLB_FUNC_RD(hw, reg) \
> +       DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
> +#define DLB_FUNC_WR(hw, reg, val) \
> +       DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
> +
> +extern unsigned int dlb_unregister_timeout_s;
> +/**
> + * os_queue_unregister_timeout_s() - timeout (in seconds) to wait for queue
> + *                                   unregister acknowledgments.
> + */
> +static inline unsigned int os_queue_unregister_timeout_s(void)
> +{
> +       return dlb_unregister_timeout_s;
> +}
> +
> +static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
> +{
> +       return rte_strlcpy(dst, src, sz);
> +}
> +
> +/**
> + * os_udelay() - busy-wait for a number of microseconds
> + * @usecs: delay duration.
> + */
> +static inline void os_udelay(int usecs)
> +{
> +       rte_delay_us(usecs);
> +}
> +
> +/**
> + * os_msleep() - sleep for a number of milliseconds
> + * @usecs: delay duration.
> + */
> +
> +static inline void os_msleep(int msecs)
> +{
> +       rte_delay_ms(msecs);
> +}
> +
> +#define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE : DLB_DIR_PP_BASE)
> +/**
> + * os_map_producer_port() - map a producer port into the caller's address space
> + * @hw: dlb_hw handle for a particular device.
> + * @port_id: port ID
> + * @is_ldb: true for load-balanced port, false for a directed port
> + *
> + * This function maps the requested producer port memory into the caller's
> + * address space.
> + *
> + * Return:
> + * Returns the base address at which the PP memory was mapped, else NULL.
> + */
> +static inline void *os_map_producer_port(struct dlb_hw *hw,
> +                                        u8 port_id,
> +                                        bool is_ldb)
> +{
> +       uint64_t addr;
> +       uint64_t pp_dma_base;
> +
> +
> +       pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
> +       addr = (pp_dma_base + (PAGE_SIZE * port_id));
> +
> +       return (void *)(uintptr_t)addr;
> +
> +}
> +/**
> + * os_unmap_producer_port() - unmap a producer port
> + * @addr: mapped producer port address
> + *
> + * This function undoes os_map_producer_port() by unmapping the producer port
> + * memory from the caller's address space.
> + *
> + * Return:
> + * Returns the base address at which the PP memory was mapped, else NULL.
> + */
> +
> +/* PFPMD - Nothing to do here, since memory was not actually mapped by us */
> +static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
> +{
> +       RTE_SET_USED(hw);
> +       RTE_SET_USED(addr);
> +}
> +/**
> + * os_enqueue_four_hcws() - enqueue four HCWs to DLB
> + * @hw: dlb_hw handle for a particular device.
> + * @hcw: pointer to the 64B-aligned contiguous HCW memory
> + * @addr: producer port address
> + */
> +static inline void os_enqueue_four_hcws(struct dlb_hw *hw,
> +                                       struct dlb_hcw *hcw,
> +                                       void *addr)
> +{
> +       struct dlb_dev *dlb_dev;
> +
> +       dlb_dev = container_of(hw, struct dlb_dev, hw);
> +
> +       dlb_dev->enqueue_four(hcw, addr);
> +}
> +
> +/**
> + * os_fence_hcw() - fence an HCW to ensure it arrives at the device
> + * @hw: dlb_hw handle for a particular device.
> + * @pp_addr: producer port address
> + */
> +static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
> +{
> +       RTE_SET_USED(hw);
> +
> +       /* To ensure outstanding HCWs reach the device, read the PP address. IA
> +        * memory ordering prevents reads from passing older writes, and the
> +        * mfence also ensures this.
> +        */
> +       rte_mb();
> +
> +       *(volatile u64 *)pp_addr;
> +}
> +
> +/* Map to PMDs logging interface */
> +#define DLB_ERR(dev, fmt, args...) \
> +       DLB_LOG_ERR(fmt, ## args)
> +
> +#define DLB_INFO(dev, fmt, args...) \
> +       DLB_LOG_INFO(fmt, ## args)
> +
> +#define DLB_DEBUG(dev, fmt, args...) \
> +       DLB_LOG_DEBUG(fmt, ## args)
> +
> +/**
> + * DLB_HW_ERR() - log an error message
> + * @dlb: dlb_hw handle for a particular device.
> + * @...: variable string args.
> + */
> +#define DLB_HW_ERR(dlb, ...) do {      \
> +       RTE_SET_USED(dlb);              \
> +       DLB_ERR(dlb, __VA_ARGS__);      \
> +} while (0)
> +
> +/**
> + * DLB_HW_INFO() - log an info message
> + * @dlb: dlb_hw handle for a particular device.
> + * @...: variable string args.
> + */
> +#define DLB_HW_INFO(dlb, ...) do {     \
> +       RTE_SET_USED(dlb);              \
> +       DLB_INFO(dlb, __VA_ARGS__);     \
> +} while (0)
> +
> +/*** scheduling functions ***/
> +
> +/* The callback runs until it completes all outstanding QID->CQ
> + * map and unmap requests. To prevent deadlock, this function gives other
> + * threads a chance to grab the resource mutex and configure hardware.
> + */
> +static void *dlb_complete_queue_map_unmap(void *__args)
> +{
> +       struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
> +       int ret;
> +
> +       while (1) {
> +               rte_spinlock_lock(&dlb_dev->resource_mutex);
> +
> +               ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
> +               ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
> +
> +               if (ret != 0) {
> +                       rte_spinlock_unlock(&dlb_dev->resource_mutex);
> +                       /* Relinquish the CPU so the application can process
> +                        * its CQs, so this function does not deadlock.
> +                        */
> +                       sched_yield();
> +               } else
> +                       break;
> +       }
> +
> +       dlb_dev->worker_launched = false;
> +
> +       rte_spinlock_unlock(&dlb_dev->resource_mutex);
> +
> +       return NULL;
> +}
> +
> +
> +/**
> + * os_schedule_work() - launch a thread to process pending map and unmap work
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function launches a thread that will run until all pending
> + * map and unmap procedures are complete.
> + */
> +static inline void os_schedule_work(struct dlb_hw *hw)
> +{
> +       struct dlb_dev *dlb_dev;
> +       pthread_t complete_queue_map_unmap_thread;
> +       int ret;
> +
> +       dlb_dev = container_of(hw, struct dlb_dev, hw);
> +
> +       ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
> +                                    "dlb_queue_unmap_waiter",
> +                                    NULL,
> +                                    dlb_complete_queue_map_unmap,
> +                                    dlb_dev);
> +       if (ret)
> +               DLB_ERR(dlb_dev,
> +               "Could not create queue complete map/unmap thread, err=%d\n",
> +                         ret);
> +       else
> +               dlb_dev->worker_launched = true;
> +}
> +
> +/**
> + * os_worker_active() - query whether the map/unmap worker thread is active
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function returns a boolean indicating whether a thread (launched by
> + * os_schedule_work()) is active. This function is used to determine
> + * whether or not to launch a worker thread.
> + */
> +static inline bool os_worker_active(struct dlb_hw *hw)
> +{
> +       struct dlb_dev *dlb_dev;
> +
> +       dlb_dev = container_of(hw, struct dlb_dev, hw);
> +
> +       return dlb_dev->worker_launched;
> +}
> +
> +/**
> + * os_notify_user_space() - notify user space
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: ID of domain to notify.
> + * @alert_id: alert ID.
> + * @aux_alert_data: additional alert data.
> + *
> + * This function notifies user space of an alert (such as a remote queue
> + * unregister or hardware alarm).
> + *
> + * Return:
> + * Returns 0 upon success, <0 otherwise.
> + */
> +static inline int os_notify_user_space(struct dlb_hw *hw,
> +                                      u32 domain_id,
> +                                      u64 alert_id,
> +                                      u64 aux_alert_data)
> +{
> +       RTE_SET_USED(hw);
> +       RTE_SET_USED(domain_id);
> +       RTE_SET_USED(alert_id);
> +       RTE_SET_USED(aux_alert_data);
> +
> +       /* Not called for PF PMD */
> +       return -1;
> +}
> +
> +enum dlb_dev_revision {
> +       DLB_A0,
> +       DLB_A1,
> +       DLB_A2,
> +       DLB_A3,
> +       DLB_B0,
> +};
> +
> +/**
> + * os_get_dev_revision() - query the device_revision
> + * @hw: dlb_hw handle for a particular device.
> + */
> +static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
> +{
> +       uint32_t a, b, c, d, stepping;
> +
> +       RTE_SET_USED(hw);
> +
> +       __cpuid(0x1, a, b, c, d);
> +
> +       stepping = a & 0xf;
> +
> +       switch (stepping) {
> +       case 0:
> +               return DLB_A0;
> +       case 1:
> +               return DLB_A1;
> +       case 2:
> +               return DLB_A2;
> +       case 3:
> +               return DLB_A3;
> +       default:
> +               /* Treat all revisions >= 4 as B0 */
> +               return DLB_B0;
> +       }
> +}
> +
> +#endif /*  __DLB_OSDEP_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
> new file mode 100644
> index 0000000..00ab732
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
> @@ -0,0 +1,441 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_OSDEP_BITMAP_H__
> +#define __DLB_OSDEP_BITMAP_H__
> +
> +#include <stdint.h>
> +#include <stdbool.h>
> +#include <stdio.h>
> +#include <unistd.h>
> +#include <rte_bitmap.h>
> +#include <rte_string_fns.h>
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +#include "../dlb_main.h"
> +
> +/*************************/
> +/*** Bitmap operations ***/
> +/*************************/
> +struct dlb_bitmap {
> +       struct rte_bitmap *map;
> +       unsigned int len;
> +       struct dlb_hw *hw;
> +};
> +
> +/**
> + * dlb_bitmap_alloc() - alloc a bitmap data structure
> + * @bitmap: pointer to dlb_bitmap structure pointer.
> + * @len: number of entries in the bitmap.
> + *
> + * This function allocates a bitmap and initializes it with length @len. All
> + * entries are initially zero.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or len is 0.
> + * ENOMEM - could not allocate memory for the bitmap data structure.
> + */
> +static inline int dlb_bitmap_alloc(struct dlb_hw *hw,
> +                                  struct dlb_bitmap **bitmap,
> +                                  unsigned int len)
> +{
> +       struct dlb_bitmap *bm;
> +       void *mem;
> +       uint32_t alloc_size;
> +       uint32_t nbits = (uint32_t) len;
> +       RTE_SET_USED(hw);
> +
> +       if (bitmap == NULL || nbits == 0)
> +               return -EINVAL;
> +
> +       /* Allocate DLB bitmap control struct */
> +       bm = rte_malloc("DLB_PF",
> +               sizeof(struct dlb_bitmap),
> +               RTE_CACHE_LINE_SIZE);
> +
> +       if (bm == NULL)
> +               return -ENOMEM;
> +
> +       /* Allocate bitmap memory */
> +       alloc_size = rte_bitmap_get_memory_footprint(nbits);
> +       mem = rte_malloc("DLB_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
> +       if (mem == NULL) {
> +               rte_free(bm);
> +               return -ENOMEM;
> +       }
> +
> +       bm->map = rte_bitmap_init(len, mem, alloc_size);
> +       if (bm->map == NULL) {
> +               rte_free(mem);
> +               rte_free(bm);
> +               return -ENOMEM;
> +       }
> +
> +       bm->len = len;
> +
> +       *bitmap = bm;
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_free() - free a previously allocated bitmap data structure
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * This function frees a bitmap that was allocated with dlb_bitmap_alloc().
> + */
> +static inline void dlb_bitmap_free(struct dlb_bitmap *bitmap)
> +{
> +       if (bitmap == NULL)
> +               return;
> +
> +       rte_free(bitmap->map);
> +       rte_free(bitmap);
> +}
> +
> +/**
> + * dlb_bitmap_fill() - fill a bitmap with all 1s
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * This function sets all bitmap values to 1.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized.
> + */
> +static inline int dlb_bitmap_fill(struct dlb_bitmap *bitmap)
> +{
> +       unsigned int i;
> +
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       for (i = 0; i != bitmap->len; i++)
> +               rte_bitmap_set(bitmap->map, i);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_zero() - fill a bitmap with all 0s
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * This function sets all bitmap values to 0.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized.
> + */
> +static inline int dlb_bitmap_zero(struct dlb_bitmap *bitmap)
> +{
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       rte_bitmap_reset(bitmap->map);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_set() - set a bitmap entry
> + * @bitmap: pointer to dlb_bitmap structure.
> + * @bit: bit index.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
> + *         bitmap length.
> + */
> +static inline int dlb_bitmap_set(struct dlb_bitmap *bitmap,
> +                                unsigned int bit)
> +{
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->len <= bit)
> +               return -EINVAL;
> +
> +       rte_bitmap_set(bitmap->map, bit);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_set_range() - set a range of bitmap entries
> + * @bitmap: pointer to dlb_bitmap structure.
> + * @bit: starting bit index.
> + * @len: length of the range.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
> + *         length.
> + */
> +static inline int dlb_bitmap_set_range(struct dlb_bitmap *bitmap,
> +                                      unsigned int bit,
> +                                      unsigned int len)
> +{
> +       unsigned int i;
> +
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->len <= bit)
> +               return -EINVAL;
> +
> +       for (i = 0; i != len; i++)
> +               rte_bitmap_set(bitmap->map, bit + i);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_clear() - clear a bitmap entry
> + * @bitmap: pointer to dlb_bitmap structure.
> + * @bit: bit index.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
> + *         bitmap length.
> + */
> +static inline int dlb_bitmap_clear(struct dlb_bitmap *bitmap,
> +                                  unsigned int bit)
> +{
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->len <= bit)
> +               return -EINVAL;
> +
> +       rte_bitmap_clear(bitmap->map, bit);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_clear_range() - clear a range of bitmap entries
> + * @bitmap: pointer to dlb_bitmap structure.
> + * @bit: starting bit index.
> + * @len: length of the range.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
> + *         length.
> + */
> +static inline int dlb_bitmap_clear_range(struct dlb_bitmap *bitmap,
> +                                        unsigned int bit,
> +                                        unsigned int len)
> +{
> +       unsigned int i;
> +
> +       if (bitmap  == NULL || bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->len <= bit)
> +               return -EINVAL;
> +
> +       for (i = 0; i != len; i++)
> +               rte_bitmap_clear(bitmap->map, bit + i);
> +
> +       return 0;
> +}
> +
> +/**
> + * dlb_bitmap_find_set_bit_range() - find a range of set bits
> + * @bitmap: pointer to dlb_bitmap structure.
> + * @len: length of the range.
> + *
> + * This function looks for a range of set bits of length @len.
> + *
> + * Return:
> + * Returns the base bit index upon success, < 0 otherwise.
> + *
> + * Errors:
> + * ENOENT - unable to find a length *len* range of set bits.
> + * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
> + */
> +static inline int dlb_bitmap_find_set_bit_range(struct dlb_bitmap *bitmap,
> +                                               unsigned int len)
> +{
> +       unsigned int i, j = 0;
> +
> +       if (bitmap  == NULL || bitmap->map  == NULL || len == 0)
> +               return -EINVAL;
> +
> +       if (bitmap->len < len)
> +               return -ENOENT;
> +
> +       for (i = 0; i != bitmap->len; i++) {
> +               if  (rte_bitmap_get(bitmap->map, i)) {
> +                       if (++j == len)
> +                               return i - j + 1;
> +               } else
> +                       j = 0;
> +       }
> +
> +       /* No set bit range of length len? */
> +       return -ENOENT;
> +}
> +
> +/**
> + * dlb_bitmap_find_set_bit() - find the first set bit
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * This function looks for a single set bit.
> + *
> + * Return:
> + * Returns the base bit index upon success, < 0 otherwise.
> + *
> + * Errors:
> + * ENOENT - the bitmap contains no set bits.
> + * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
> + */
> +static inline int dlb_bitmap_find_set_bit(struct dlb_bitmap *bitmap)
> +{
> +       unsigned int i;
> +
> +       if (bitmap == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       for (i = 0; i != bitmap->len; i++) {
> +               if  (rte_bitmap_get(bitmap->map, i))
> +                       return i;
> +       }
> +
> +       return -ENOENT;
> +}
> +
> +/**
> + * dlb_bitmap_count() - returns the number of set bits
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * This function looks for a single set bit.
> + *
> + * Return:
> + * Returns the number of set bits upon success, <0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized.
> + */
> +static inline int dlb_bitmap_count(struct dlb_bitmap *bitmap)
> +{
> +       int weight = 0;
> +       unsigned int i;
> +
> +       if (bitmap == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       for (i = 0; i != bitmap->len; i++) {
> +               if  (rte_bitmap_get(bitmap->map, i))
> +                       weight++;
> +       }
> +       return weight;
> +}
> +
> +/**
> + * dlb_bitmap_longest_set_range() - returns longest contiguous range of set bits
> + * @bitmap: pointer to dlb_bitmap structure.
> + *
> + * Return:
> + * Returns the bitmap's longest contiguous range of of set bits upon success,
> + * <0 otherwise.
> + *
> + * Errors:
> + * EINVAL - bitmap is NULL or is uninitialized.
> + */
> +static inline int dlb_bitmap_longest_set_range(struct dlb_bitmap *bitmap)
> +{
> +       int max_len = 0, len = 0;
> +       unsigned int i;
> +
> +       if (bitmap == NULL)
> +               return -EINVAL;
> +
> +       if (bitmap->map == NULL)
> +               return -EINVAL;
> +
> +       for (i = 0; i != bitmap->len; i++) {
> +               if  (rte_bitmap_get(bitmap->map, i)) {
> +                       len++;
> +               } else {
> +                       if (len > max_len)
> +                               max_len = len;
> +                       len = 0;
> +               }
> +       }
> +
> +       if (len > max_len)
> +               max_len = len;
> +
> +       return max_len;
> +}
> +
> +/**
> + * dlb_bitmap_or() - store the logical 'or' of two bitmaps into a third
> + * @dest: pointer to dlb_bitmap structure, which will contain the results of
> + *       the 'or' of src1 and src2.
> + * @src1: pointer to dlb_bitmap structure, will be 'or'ed with src2.
> + * @src2: pointer to dlb_bitmap structure, will be 'or'ed with src1.
> + *
> + * This function 'or's two bitmaps together and stores the result in a third
> + * bitmap. The source and destination bitmaps can be the same.
> + *
> + * Return:
> + * Returns the number of set bits upon success, <0 otherwise.
> + *
> + * Errors:
> + * EINVAL - One of the bitmaps is NULL or is uninitialized.
> + */
> +static inline int dlb_bitmap_or(struct dlb_bitmap *dest,
> +                               struct dlb_bitmap *src1,
> +                               struct dlb_bitmap *src2)
> +{
> +       unsigned int i, min;
> +       int numset = 0;
> +
> +       if (dest  == NULL || dest->map == NULL ||
> +           src1 == NULL || src1->map == NULL ||
> +           src2  == NULL || src2->map == NULL)
> +               return -EINVAL;
> +
> +       min = dest->len;
> +       min = (min > src1->len) ? src1->len : min;
> +       min = (min > src2->len) ? src2->len : min;
> +
> +       for (i = 0; i != min; i++) {
> +               if  (rte_bitmap_get(src1->map, i) ||
> +                               rte_bitmap_get(src2->map, i)) {
> +                       rte_bitmap_set(dest->map, i);
> +                       numset++;
> +               } else
> +                       rte_bitmap_clear(dest->map, i);
> +       }
> +
> +       return numset;
> +}
> +
> +#endif /*  __DLB_OSDEP_BITMAP_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_list.h b/drivers/event/dlb/pf/base/dlb_osdep_list.h
> new file mode 100644
> index 0000000..a53b362
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_osdep_list.h
> @@ -0,0 +1,131 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_OSDEP_LIST_H__
> +#define __DLB_OSDEP_LIST_H__
> +
> +#include <rte_tailq.h>
> +
> +struct dlb_list_entry {
> +       TAILQ_ENTRY(dlb_list_entry) node;
> +};
> +
> +/* Dummy - just a struct definition */
> +TAILQ_HEAD(dlb_list_head, dlb_list_entry);
> +
> +/* =================
> + * TAILQ Supplements
> + * =================
> + */
> +
> +#ifndef TAILQ_FOREACH_ENTRY
> +#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter)             \
> +       for ((iter) = TAILQ_FIRST(&head);                       \
> +           (iter)                                              \
> +               && (ptr = container_of(iter, typeof(*(ptr)), name)); \
> +           (iter) = TAILQ_NEXT((iter), node))
> +#endif
> +
> +#ifndef TAILQ_FOREACH_ENTRY_SAFE
> +#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar)  \
> +       for ((iter) = TAILQ_FIRST(&head);                       \
> +           (iter) &&                                           \
> +               (ptr = container_of(iter, typeof(*(ptr)), name)) &&\
> +               ((tvar) = TAILQ_NEXT((iter), node), 1); \
> +           (iter) = (tvar))
> +#endif
> +
> +/* =========
> + * DLB Lists
> + * =========
> + */
> +
> +/**
> + * dlb_list_init_head() - initialize the head of a list
> + * @head: list head
> + */
> +static inline void dlb_list_init_head(struct dlb_list_head *head)
> +{
> +       TAILQ_INIT(head);
> +}
> +
> +/**
> + * dlb_list_add() - add an entry to a list
> + * @head: new entry will be added after this list header
> + * @entry: new list entry to be added
> + */
> +static inline void dlb_list_add(struct dlb_list_head *head,
> +                               struct dlb_list_entry *entry)
> +{
> +       TAILQ_INSERT_TAIL(head, entry, node);
> +}
> +
> +/**
> + * @head: list head
> + * @entry: list entry to be deleted
> + */
> +static inline void dlb_list_del(struct dlb_list_head *head,
> +                               struct dlb_list_entry *entry)
> +{
> +       TAILQ_REMOVE(head, entry, node);
> +}
> +
> +/**
> + * dlb_list_empty() - check if a list is empty
> + * @head: list head
> + *
> + * Return:
> + * Returns 1 if empty, 0 if not.
> + */
> +static inline bool dlb_list_empty(struct dlb_list_head *head)
> +{
> +       return TAILQ_EMPTY(head);
> +}
> +
> +/**
> + * dlb_list_empty() - check if a list is empty
> + * @src_head: list to be added
> + * @ head: where src_head will be inserted
> + */
> +static inline void dlb_list_splice(struct dlb_list_head *src_head,
> +                                  struct dlb_list_head *head)
> +{
> +       TAILQ_CONCAT(head, src_head, node);
> +}
> +
> +/**
> + * DLB_LIST_HEAD() - retrieve the head of the list
> + * @head: list head
> + * @type: type of the list variable
> + * @name: name of the dlb_list within the struct
> + */
> +#define DLB_LIST_HEAD(head, type, name)                                \
> +       (TAILQ_FIRST(&head) ?                                   \
> +               container_of(TAILQ_FIRST(&head), type, name) :  \
> +               NULL)
> +
> +/**
> + * DLB_LIST_FOR_EACH() - iterate over a list
> + * @head: list head
> + * @ptr: pointer to struct containing a struct dlb_list_entry
> + * @name: name of the dlb_list_entry field within the containing struct
> + * @iter: iterator variable
> + */
> +#define DLB_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
> +       TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
> +
> +/**
> + * DLB_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
> + * an element is removed from the list while processing it.
> + * @ptr: pointer to struct containing a struct dlb_list_entry
> + * @ptr_tmp: pointer to struct containing a struct dlb_list_entry (temporary)
> + * @head: list head
> + * @name: name of the dlb_list_entry field within the containing struct
> + * @iter: iterator variable
> + * @iter_tmp: iterator variable (temporary)
> + */
> +#define DLB_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter, saf_iter) \
> +       TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_iter)
> +
> +#endif /*  __DLB_OSDEP_LIST_H__ */
> diff --git a/drivers/event/dlb/pf/base/dlb_osdep_types.h b/drivers/event/dlb/pf/base/dlb_osdep_types.h
> new file mode 100644
> index 0000000..2e9d7d8
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_osdep_types.h
> @@ -0,0 +1,31 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_OSDEP_TYPES_H
> +#define __DLB_OSDEP_TYPES_H
> +
> +#include <linux/types.h>
> +
> +#include <inttypes.h>
> +#include <ctype.h>
> +#include <stdint.h>
> +#include <stdbool.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <errno.h>
> +
> +/* Types for user mode PF PMD */
> +typedef uint8_t         u8;
> +typedef int8_t          s8;
> +typedef uint16_t        u16;
> +typedef int16_t         s16;
> +typedef uint32_t        u32;
> +typedef int32_t         s32;
> +typedef uint64_t        u64;
> +
> +#define __iomem
> +
> +/* END types for user mode PF PMD */
> +
> +#endif /* __DLB_OSDEP_TYPES_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_regs.h b/drivers/event/dlb/pf/base/dlb_regs.h
> new file mode 100644
> index 0000000..a1c63f3
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_regs.h
> @@ -0,0 +1,2368 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_REGS_H
> +#define __DLB_REGS_H
> +
> +#include "dlb_osdep_types.h"
> +
> +#define DLB_MSIX_MEM_VECTOR_CTRL(x) \
> +       (0x100000c + (x) * 0x10)
> +#define DLB_MSIX_MEM_VECTOR_CTRL_RST 0x1
> +union dlb_msix_mem_vector_ctrl {
> +       struct {
> +               u32 vec_mask : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_TOTAL_VAS 0x124
> +#define DLB_SYS_TOTAL_VAS_RST 0x20
> +union dlb_sys_total_vas {
> +       struct {
> +               u32 total_vas : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_ALARM_PF_SYND2 0x508
> +#define DLB_SYS_ALARM_PF_SYND2_RST 0x0
> +union dlb_sys_alarm_pf_synd2 {
> +       struct {
> +               u32 lock_id : 16;
> +               u32 meas : 1;
> +               u32 debug : 7;
> +               u32 cq_pop : 1;
> +               u32 qe_uhl : 1;
> +               u32 qe_orsp : 1;
> +               u32 qe_valid : 1;
> +               u32 cq_int_rearm : 1;
> +               u32 dsi_error : 1;
> +               u32 rsvd0 : 2;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_ALARM_PF_SYND1 0x504
> +#define DLB_SYS_ALARM_PF_SYND1_RST 0x0
> +union dlb_sys_alarm_pf_synd1 {
> +       struct {
> +               u32 dsi : 16;
> +               u32 qid : 8;
> +               u32 qtype : 2;
> +               u32 qpri : 3;
> +               u32 msg_type : 3;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_ALARM_PF_SYND0 0x500
> +#define DLB_SYS_ALARM_PF_SYND0_RST 0x0
> +union dlb_sys_alarm_pf_synd0 {
> +       struct {
> +               u32 syndrome : 8;
> +               u32 rtype : 2;
> +               u32 rsvd0 : 2;
> +               u32 from_dmv : 1;
> +               u32 is_ldb : 1;
> +               u32 cls : 2;
> +               u32 aid : 6;
> +               u32 unit : 4;
> +               u32 source : 4;
> +               u32 more : 1;
> +               u32 valid : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_VASQID_V(x) \
> +       (0xf60 + (x) * 0x1000)
> +#define DLB_SYS_LDB_VASQID_V_RST 0x0
> +union dlb_sys_ldb_vasqid_v {
> +       struct {
> +               u32 vasqid_v : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_VASQID_V(x) \
> +       (0xf68 + (x) * 0x1000)
> +#define DLB_SYS_DIR_VASQID_V_RST 0x0
> +union dlb_sys_dir_vasqid_v {
> +       struct {
> +               u32 vasqid_v : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_WBUF_DIR_FLAGS(x) \
> +       (0xf70 + (x) * 0x1000)
> +#define DLB_SYS_WBUF_DIR_FLAGS_RST 0x0
> +union dlb_sys_wbuf_dir_flags {
> +       struct {
> +               u32 wb_v : 4;
> +               u32 cl : 1;
> +               u32 busy : 1;
> +               u32 opt : 1;
> +               u32 rsvd0 : 25;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_WBUF_LDB_FLAGS(x) \
> +       (0xf78 + (x) * 0x1000)
> +#define DLB_SYS_WBUF_LDB_FLAGS_RST 0x0
> +union dlb_sys_wbuf_ldb_flags {
> +       struct {
> +               u32 wb_v : 4;
> +               u32 cl : 1;
> +               u32 busy : 1;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_QID_V(x) \
> +       (0x8000034 + (x) * 0x1000)
> +#define DLB_SYS_LDB_QID_V_RST 0x0
> +union dlb_sys_ldb_qid_v {
> +       struct {
> +               u32 qid_v : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_QID_CFG_V(x) \
> +       (0x8000030 + (x) * 0x1000)
> +#define DLB_SYS_LDB_QID_CFG_V_RST 0x0
> +union dlb_sys_ldb_qid_cfg_v {
> +       struct {
> +               u32 sn_cfg_v : 1;
> +               u32 fid_cfg_v : 1;
> +               u32 rsvd0 : 30;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_QID_V(x) \
> +       (0x8000040 + (x) * 0x1000)
> +#define DLB_SYS_DIR_QID_V_RST 0x0
> +union dlb_sys_dir_qid_v {
> +       struct {
> +               u32 qid_v : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_POOL_ENBLD(x) \
> +       (0x8000070 + (x) * 0x1000)
> +#define DLB_SYS_LDB_POOL_ENBLD_RST 0x0
> +union dlb_sys_ldb_pool_enbld {
> +       struct {
> +               u32 pool_enabled : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_POOL_ENBLD(x) \
> +       (0x8000080 + (x) * 0x1000)
> +#define DLB_SYS_DIR_POOL_ENBLD_RST 0x0
> +union dlb_sys_dir_pool_enbld {
> +       struct {
> +               u32 pool_enabled : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP2VPP(x) \
> +       (0x8000090 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP2VPP_RST 0x0
> +union dlb_sys_ldb_pp2vpp {
> +       struct {
> +               u32 vpp : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP2VPP(x) \
> +       (0x8000094 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP2VPP_RST 0x0
> +union dlb_sys_dir_pp2vpp {
> +       struct {
> +               u32 vpp : 7;
> +               u32 rsvd0 : 25;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP_V(x) \
> +       (0x8000128 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP_V_RST 0x0
> +union dlb_sys_ldb_pp_v {
> +       struct {
> +               u32 pp_v : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ_ISR(x) \
> +       (0x8000124 + (x) * 0x1000)
> +#define DLB_SYS_LDB_CQ_ISR_RST 0x0
> +/* CQ Interrupt Modes */
> +#define DLB_CQ_ISR_MODE_DIS  0
> +#define DLB_CQ_ISR_MODE_MSI  1
> +#define DLB_CQ_ISR_MODE_MSIX 2
> +union dlb_sys_ldb_cq_isr {
> +       struct {
> +               u32 vector : 6;
> +               u32 vf : 4;
> +               u32 en_code : 2;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ2VF_PF(x) \
> +       (0x8000120 + (x) * 0x1000)
> +#define DLB_SYS_LDB_CQ2VF_PF_RST 0x0
> +union dlb_sys_ldb_cq2vf_pf {
> +       struct {
> +               u32 vf : 4;
> +               u32 is_pf : 1;
> +               u32 rsvd0 : 27;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP2VAS(x) \
> +       (0x800011c + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP2VAS_RST 0x0
> +union dlb_sys_ldb_pp2vas {
> +       struct {
> +               u32 vas : 5;
> +               u32 rsvd0 : 27;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP2LDBPOOL(x) \
> +       (0x8000118 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP2LDBPOOL_RST 0x0
> +union dlb_sys_ldb_pp2ldbpool {
> +       struct {
> +               u32 ldbpool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP2DIRPOOL(x) \
> +       (0x8000114 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP2DIRPOOL_RST 0x0
> +union dlb_sys_ldb_pp2dirpool {
> +       struct {
> +               u32 dirpool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP2VF_PF(x) \
> +       (0x8000110 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP2VF_PF_RST 0x0
> +union dlb_sys_ldb_pp2vf_pf {
> +       struct {
> +               u32 vf : 4;
> +               u32 is_pf : 1;
> +               u32 rsvd0 : 27;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP_ADDR_U(x) \
> +       (0x800010c + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP_ADDR_U_RST 0x0
> +union dlb_sys_ldb_pp_addr_u {
> +       struct {
> +               u32 addr_u : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_PP_ADDR_L(x) \
> +       (0x8000108 + (x) * 0x1000)
> +#define DLB_SYS_LDB_PP_ADDR_L_RST 0x0
> +union dlb_sys_ldb_pp_addr_l {
> +       struct {
> +               u32 rsvd0 : 7;
> +               u32 addr_l : 25;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ_ADDR_U(x) \
> +       (0x8000104 + (x) * 0x1000)
> +#define DLB_SYS_LDB_CQ_ADDR_U_RST 0x0
> +union dlb_sys_ldb_cq_addr_u {
> +       struct {
> +               u32 addr_u : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ_ADDR_L(x) \
> +       (0x8000100 + (x) * 0x1000)
> +#define DLB_SYS_LDB_CQ_ADDR_L_RST 0x0
> +union dlb_sys_ldb_cq_addr_l {
> +       struct {
> +               u32 rsvd0 : 6;
> +               u32 addr_l : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP_V(x) \
> +       (0x8000228 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP_V_RST 0x0
> +union dlb_sys_dir_pp_v {
> +       struct {
> +               u32 pp_v : 1;
> +               u32 mb_dm : 1;
> +               u32 rsvd0 : 30;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_ISR(x) \
> +       (0x8000224 + (x) * 0x1000)
> +#define DLB_SYS_DIR_CQ_ISR_RST 0x0
> +union dlb_sys_dir_cq_isr {
> +       struct {
> +               u32 vector : 6;
> +               u32 vf : 4;
> +               u32 en_code : 2;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ2VF_PF(x) \
> +       (0x8000220 + (x) * 0x1000)
> +#define DLB_SYS_DIR_CQ2VF_PF_RST 0x0
> +union dlb_sys_dir_cq2vf_pf {
> +       struct {
> +               u32 vf : 4;
> +               u32 is_pf : 1;
> +               u32 rsvd0 : 27;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP2VAS(x) \
> +       (0x800021c + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP2VAS_RST 0x0
> +union dlb_sys_dir_pp2vas {
> +       struct {
> +               u32 vas : 5;
> +               u32 rsvd0 : 27;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP2LDBPOOL(x) \
> +       (0x8000218 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP2LDBPOOL_RST 0x0
> +union dlb_sys_dir_pp2ldbpool {
> +       struct {
> +               u32 ldbpool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP2DIRPOOL(x) \
> +       (0x8000214 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP2DIRPOOL_RST 0x0
> +union dlb_sys_dir_pp2dirpool {
> +       struct {
> +               u32 dirpool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP2VF_PF(x) \
> +       (0x8000210 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP2VF_PF_RST 0x0
> +union dlb_sys_dir_pp2vf_pf {
> +       struct {
> +               u32 vf : 4;
> +               u32 is_pf : 1;
> +               u32 is_hw_dsi : 1;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP_ADDR_U(x) \
> +       (0x800020c + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP_ADDR_U_RST 0x0
> +union dlb_sys_dir_pp_addr_u {
> +       struct {
> +               u32 addr_u : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_PP_ADDR_L(x) \
> +       (0x8000208 + (x) * 0x1000)
> +#define DLB_SYS_DIR_PP_ADDR_L_RST 0x0
> +union dlb_sys_dir_pp_addr_l {
> +       struct {
> +               u32 rsvd0 : 7;
> +               u32 addr_l : 25;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_ADDR_U(x) \
> +       (0x8000204 + (x) * 0x1000)
> +#define DLB_SYS_DIR_CQ_ADDR_U_RST 0x0
> +union dlb_sys_dir_cq_addr_u {
> +       struct {
> +               u32 addr_u : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_ADDR_L(x) \
> +       (0x8000200 + (x) * 0x1000)
> +#define DLB_SYS_DIR_CQ_ADDR_L_RST 0x0
> +union dlb_sys_dir_cq_addr_l {
> +       struct {
> +               u32 rsvd0 : 6;
> +               u32 addr_l : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_INGRESS_ALARM_ENBL 0x300
> +#define DLB_SYS_INGRESS_ALARM_ENBL_RST 0x0
> +union dlb_sys_ingress_alarm_enbl {
> +       struct {
> +               u32 illegal_hcw : 1;
> +               u32 illegal_pp : 1;
> +               u32 disabled_pp : 1;
> +               u32 illegal_qid : 1;
> +               u32 disabled_qid : 1;
> +               u32 illegal_ldb_qid_cfg : 1;
> +               u32 illegal_cqid : 1;
> +               u32 rsvd0 : 25;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_CQ_MODE 0x30c
> +#define DLB_SYS_CQ_MODE_RST 0x0
> +union dlb_sys_cq_mode {
> +       struct {
> +               u32 ldb_cq64 : 1;
> +               u32 dir_cq64 : 1;
> +               u32 rsvd0 : 30;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_MSIX_ACK 0x400
> +#define DLB_SYS_MSIX_ACK_RST 0x0
> +union dlb_sys_msix_ack {
> +       struct {
> +               u32 msix_0_ack : 1;
> +               u32 msix_1_ack : 1;
> +               u32 msix_2_ack : 1;
> +               u32 msix_3_ack : 1;
> +               u32 msix_4_ack : 1;
> +               u32 msix_5_ack : 1;
> +               u32 msix_6_ack : 1;
> +               u32 msix_7_ack : 1;
> +               u32 msix_8_ack : 1;
> +               u32 rsvd0 : 23;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_MSIX_PASSTHRU 0x404
> +#define DLB_SYS_MSIX_PASSTHRU_RST 0x0
> +union dlb_sys_msix_passthru {
> +       struct {
> +               u32 msix_0_passthru : 1;
> +               u32 msix_1_passthru : 1;
> +               u32 msix_2_passthru : 1;
> +               u32 msix_3_passthru : 1;
> +               u32 msix_4_passthru : 1;
> +               u32 msix_5_passthru : 1;
> +               u32 msix_6_passthru : 1;
> +               u32 msix_7_passthru : 1;
> +               u32 msix_8_passthru : 1;
> +               u32 rsvd0 : 23;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_MSIX_MODE 0x408
> +#define DLB_SYS_MSIX_MODE_RST 0x0
> +/* MSI-X Modes */
> +#define DLB_MSIX_MODE_PACKED     0
> +#define DLB_MSIX_MODE_COMPRESSED 1
> +union dlb_sys_msix_mode {
> +       struct {
> +               u32 mode : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS 0x440
> +#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS_RST 0x0
> +union dlb_sys_dir_cq_31_0_occ_int_sts {
> +       struct {
> +               u32 cq_0_occ_int : 1;
> +               u32 cq_1_occ_int : 1;
> +               u32 cq_2_occ_int : 1;
> +               u32 cq_3_occ_int : 1;
> +               u32 cq_4_occ_int : 1;
> +               u32 cq_5_occ_int : 1;
> +               u32 cq_6_occ_int : 1;
> +               u32 cq_7_occ_int : 1;
> +               u32 cq_8_occ_int : 1;
> +               u32 cq_9_occ_int : 1;
> +               u32 cq_10_occ_int : 1;
> +               u32 cq_11_occ_int : 1;
> +               u32 cq_12_occ_int : 1;
> +               u32 cq_13_occ_int : 1;
> +               u32 cq_14_occ_int : 1;
> +               u32 cq_15_occ_int : 1;
> +               u32 cq_16_occ_int : 1;
> +               u32 cq_17_occ_int : 1;
> +               u32 cq_18_occ_int : 1;
> +               u32 cq_19_occ_int : 1;
> +               u32 cq_20_occ_int : 1;
> +               u32 cq_21_occ_int : 1;
> +               u32 cq_22_occ_int : 1;
> +               u32 cq_23_occ_int : 1;
> +               u32 cq_24_occ_int : 1;
> +               u32 cq_25_occ_int : 1;
> +               u32 cq_26_occ_int : 1;
> +               u32 cq_27_occ_int : 1;
> +               u32 cq_28_occ_int : 1;
> +               u32 cq_29_occ_int : 1;
> +               u32 cq_30_occ_int : 1;
> +               u32 cq_31_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS 0x444
> +#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS_RST 0x0
> +union dlb_sys_dir_cq_63_32_occ_int_sts {
> +       struct {
> +               u32 cq_32_occ_int : 1;
> +               u32 cq_33_occ_int : 1;
> +               u32 cq_34_occ_int : 1;
> +               u32 cq_35_occ_int : 1;
> +               u32 cq_36_occ_int : 1;
> +               u32 cq_37_occ_int : 1;
> +               u32 cq_38_occ_int : 1;
> +               u32 cq_39_occ_int : 1;
> +               u32 cq_40_occ_int : 1;
> +               u32 cq_41_occ_int : 1;
> +               u32 cq_42_occ_int : 1;
> +               u32 cq_43_occ_int : 1;
> +               u32 cq_44_occ_int : 1;
> +               u32 cq_45_occ_int : 1;
> +               u32 cq_46_occ_int : 1;
> +               u32 cq_47_occ_int : 1;
> +               u32 cq_48_occ_int : 1;
> +               u32 cq_49_occ_int : 1;
> +               u32 cq_50_occ_int : 1;
> +               u32 cq_51_occ_int : 1;
> +               u32 cq_52_occ_int : 1;
> +               u32 cq_53_occ_int : 1;
> +               u32 cq_54_occ_int : 1;
> +               u32 cq_55_occ_int : 1;
> +               u32 cq_56_occ_int : 1;
> +               u32 cq_57_occ_int : 1;
> +               u32 cq_58_occ_int : 1;
> +               u32 cq_59_occ_int : 1;
> +               u32 cq_60_occ_int : 1;
> +               u32 cq_61_occ_int : 1;
> +               u32 cq_62_occ_int : 1;
> +               u32 cq_63_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS 0x448
> +#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS_RST 0x0
> +union dlb_sys_dir_cq_95_64_occ_int_sts {
> +       struct {
> +               u32 cq_64_occ_int : 1;
> +               u32 cq_65_occ_int : 1;
> +               u32 cq_66_occ_int : 1;
> +               u32 cq_67_occ_int : 1;
> +               u32 cq_68_occ_int : 1;
> +               u32 cq_69_occ_int : 1;
> +               u32 cq_70_occ_int : 1;
> +               u32 cq_71_occ_int : 1;
> +               u32 cq_72_occ_int : 1;
> +               u32 cq_73_occ_int : 1;
> +               u32 cq_74_occ_int : 1;
> +               u32 cq_75_occ_int : 1;
> +               u32 cq_76_occ_int : 1;
> +               u32 cq_77_occ_int : 1;
> +               u32 cq_78_occ_int : 1;
> +               u32 cq_79_occ_int : 1;
> +               u32 cq_80_occ_int : 1;
> +               u32 cq_81_occ_int : 1;
> +               u32 cq_82_occ_int : 1;
> +               u32 cq_83_occ_int : 1;
> +               u32 cq_84_occ_int : 1;
> +               u32 cq_85_occ_int : 1;
> +               u32 cq_86_occ_int : 1;
> +               u32 cq_87_occ_int : 1;
> +               u32 cq_88_occ_int : 1;
> +               u32 cq_89_occ_int : 1;
> +               u32 cq_90_occ_int : 1;
> +               u32 cq_91_occ_int : 1;
> +               u32 cq_92_occ_int : 1;
> +               u32 cq_93_occ_int : 1;
> +               u32 cq_94_occ_int : 1;
> +               u32 cq_95_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS 0x44c
> +#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS_RST 0x0
> +union dlb_sys_dir_cq_127_96_occ_int_sts {
> +       struct {
> +               u32 cq_96_occ_int : 1;
> +               u32 cq_97_occ_int : 1;
> +               u32 cq_98_occ_int : 1;
> +               u32 cq_99_occ_int : 1;
> +               u32 cq_100_occ_int : 1;
> +               u32 cq_101_occ_int : 1;
> +               u32 cq_102_occ_int : 1;
> +               u32 cq_103_occ_int : 1;
> +               u32 cq_104_occ_int : 1;
> +               u32 cq_105_occ_int : 1;
> +               u32 cq_106_occ_int : 1;
> +               u32 cq_107_occ_int : 1;
> +               u32 cq_108_occ_int : 1;
> +               u32 cq_109_occ_int : 1;
> +               u32 cq_110_occ_int : 1;
> +               u32 cq_111_occ_int : 1;
> +               u32 cq_112_occ_int : 1;
> +               u32 cq_113_occ_int : 1;
> +               u32 cq_114_occ_int : 1;
> +               u32 cq_115_occ_int : 1;
> +               u32 cq_116_occ_int : 1;
> +               u32 cq_117_occ_int : 1;
> +               u32 cq_118_occ_int : 1;
> +               u32 cq_119_occ_int : 1;
> +               u32 cq_120_occ_int : 1;
> +               u32 cq_121_occ_int : 1;
> +               u32 cq_122_occ_int : 1;
> +               u32 cq_123_occ_int : 1;
> +               u32 cq_124_occ_int : 1;
> +               u32 cq_125_occ_int : 1;
> +               u32 cq_126_occ_int : 1;
> +               u32 cq_127_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS 0x460
> +#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS_RST 0x0
> +union dlb_sys_ldb_cq_31_0_occ_int_sts {
> +       struct {
> +               u32 cq_0_occ_int : 1;
> +               u32 cq_1_occ_int : 1;
> +               u32 cq_2_occ_int : 1;
> +               u32 cq_3_occ_int : 1;
> +               u32 cq_4_occ_int : 1;
> +               u32 cq_5_occ_int : 1;
> +               u32 cq_6_occ_int : 1;
> +               u32 cq_7_occ_int : 1;
> +               u32 cq_8_occ_int : 1;
> +               u32 cq_9_occ_int : 1;
> +               u32 cq_10_occ_int : 1;
> +               u32 cq_11_occ_int : 1;
> +               u32 cq_12_occ_int : 1;
> +               u32 cq_13_occ_int : 1;
> +               u32 cq_14_occ_int : 1;
> +               u32 cq_15_occ_int : 1;
> +               u32 cq_16_occ_int : 1;
> +               u32 cq_17_occ_int : 1;
> +               u32 cq_18_occ_int : 1;
> +               u32 cq_19_occ_int : 1;
> +               u32 cq_20_occ_int : 1;
> +               u32 cq_21_occ_int : 1;
> +               u32 cq_22_occ_int : 1;
> +               u32 cq_23_occ_int : 1;
> +               u32 cq_24_occ_int : 1;
> +               u32 cq_25_occ_int : 1;
> +               u32 cq_26_occ_int : 1;
> +               u32 cq_27_occ_int : 1;
> +               u32 cq_28_occ_int : 1;
> +               u32 cq_29_occ_int : 1;
> +               u32 cq_30_occ_int : 1;
> +               u32 cq_31_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS 0x464
> +#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS_RST 0x0
> +union dlb_sys_ldb_cq_63_32_occ_int_sts {
> +       struct {
> +               u32 cq_32_occ_int : 1;
> +               u32 cq_33_occ_int : 1;
> +               u32 cq_34_occ_int : 1;
> +               u32 cq_35_occ_int : 1;
> +               u32 cq_36_occ_int : 1;
> +               u32 cq_37_occ_int : 1;
> +               u32 cq_38_occ_int : 1;
> +               u32 cq_39_occ_int : 1;
> +               u32 cq_40_occ_int : 1;
> +               u32 cq_41_occ_int : 1;
> +               u32 cq_42_occ_int : 1;
> +               u32 cq_43_occ_int : 1;
> +               u32 cq_44_occ_int : 1;
> +               u32 cq_45_occ_int : 1;
> +               u32 cq_46_occ_int : 1;
> +               u32 cq_47_occ_int : 1;
> +               u32 cq_48_occ_int : 1;
> +               u32 cq_49_occ_int : 1;
> +               u32 cq_50_occ_int : 1;
> +               u32 cq_51_occ_int : 1;
> +               u32 cq_52_occ_int : 1;
> +               u32 cq_53_occ_int : 1;
> +               u32 cq_54_occ_int : 1;
> +               u32 cq_55_occ_int : 1;
> +               u32 cq_56_occ_int : 1;
> +               u32 cq_57_occ_int : 1;
> +               u32 cq_58_occ_int : 1;
> +               u32 cq_59_occ_int : 1;
> +               u32 cq_60_occ_int : 1;
> +               u32 cq_61_occ_int : 1;
> +               u32 cq_62_occ_int : 1;
> +               u32 cq_63_occ_int : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_ALARM_HW_SYND 0x50c
> +#define DLB_SYS_ALARM_HW_SYND_RST 0x0
> +union dlb_sys_alarm_hw_synd {
> +       struct {
> +               u32 syndrome : 8;
> +               u32 rtype : 2;
> +               u32 rsvd0 : 2;
> +               u32 from_dmv : 1;
> +               u32 is_ldb : 1;
> +               u32 cls : 2;
> +               u32 aid : 6;
> +               u32 unit : 4;
> +               u32 source : 4;
> +               u32 more : 1;
> +               u32 valid : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_SYS_SYS_ALARM_INT_ENABLE 0xc001048
> +#define DLB_SYS_SYS_ALARM_INT_ENABLE_RST 0x7fffff
> +union dlb_sys_sys_alarm_int_enable {
> +       struct {
> +               u32 cq_addr_overflow_error : 1;
> +               u32 ingress_perr : 1;
> +               u32 egress_perr : 1;
> +               u32 alarm_perr : 1;
> +               u32 vf_to_pf_isr_pend_error : 1;
> +               u32 pf_to_vf_isr_pend_error : 1;
> +               u32 timeout_error : 1;
> +               u32 dmvw_sm_error : 1;
> +               u32 pptr_sm_par_error : 1;
> +               u32 pptr_sm_len_error : 1;
> +               u32 sch_sm_error : 1;
> +               u32 wbuf_flag_error : 1;
> +               u32 dmvw_cl_error : 1;
> +               u32 dmvr_cl_error : 1;
> +               u32 cmpl_data_error : 1;
> +               u32 cmpl_error : 1;
> +               u32 fifo_underflow : 1;
> +               u32 fifo_overflow : 1;
> +               u32 sb_ep_parity_err : 1;
> +               u32 ti_parity_err : 1;
> +               u32 ri_parity_err : 1;
> +               u32 cfgm_ppw_err : 1;
> +               u32 system_csr_perr : 1;
> +               u32 rsvd0 : 9;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(x) \
> +       (0x20000000 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST 0x0
> +union dlb_lsp_cq_ldb_tot_sch_cnt_ctrl {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_DSBL(x) \
> +       (0x20000124 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_DSBL_RST 0x1
> +union dlb_lsp_cq_ldb_dsbl {
> +       struct {
> +               u32 disabled : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH(x) \
> +       (0x20000120 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH_RST 0x0
> +union dlb_lsp_cq_ldb_tot_sch_cnth {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL(x) \
> +       (0x2000011c + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL_RST 0x0
> +union dlb_lsp_cq_ldb_tot_sch_cntl {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(x) \
> +       (0x20000118 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST 0x0
> +union dlb_lsp_cq_ldb_tkn_depth_sel {
> +       struct {
> +               u32 token_depth_select : 4;
> +               u32 ignore_depth : 1;
> +               u32 enab_shallow_cq : 1;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_TKN_CNT(x) \
> +       (0x20000114 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_TKN_CNT_RST 0x0
> +union dlb_lsp_cq_ldb_tkn_cnt {
> +       struct {
> +               u32 token_count : 11;
> +               u32 rsvd0 : 21;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_INFL_LIM(x) \
> +       (0x20000110 + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_INFL_LIM_RST 0x0
> +union dlb_lsp_cq_ldb_infl_lim {
> +       struct {
> +               u32 limit : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_LDB_INFL_CNT(x) \
> +       (0x2000010c + (x) * 0x1000)
> +#define DLB_LSP_CQ_LDB_INFL_CNT_RST 0x0
> +union dlb_lsp_cq_ldb_infl_cnt {
> +       struct {
> +               u32 count : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ2QID(x, y) \
> +       (0x20000104 + (x) * 0x1000 + (y) * 0x4)
> +#define DLB_LSP_CQ2QID_RST 0x0
> +union dlb_lsp_cq2qid {
> +       struct {
> +               u32 qid_p0 : 7;
> +               u32 rsvd3 : 1;
> +               u32 qid_p1 : 7;
> +               u32 rsvd2 : 1;
> +               u32 qid_p2 : 7;
> +               u32 rsvd1 : 1;
> +               u32 qid_p3 : 7;
> +               u32 rsvd0 : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ2PRIOV(x) \
> +       (0x20000100 + (x) * 0x1000)
> +#define DLB_LSP_CQ2PRIOV_RST 0x0
> +union dlb_lsp_cq2priov {
> +       struct {
> +               u32 prio : 24;
> +               u32 v : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_DIR_DSBL(x) \
> +       (0x20000310 + (x) * 0x1000)
> +#define DLB_LSP_CQ_DIR_DSBL_RST 0x1
> +union dlb_lsp_cq_dir_dsbl {
> +       struct {
> +               u32 disabled : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(x) \
> +       (0x2000030c + (x) * 0x1000)
> +#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST 0x0
> +union dlb_lsp_cq_dir_tkn_depth_sel_dsi {
> +       struct {
> +               u32 token_depth_select : 4;
> +               u32 disable_wb_opt : 1;
> +               u32 ignore_depth : 1;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH(x) \
> +       (0x20000308 + (x) * 0x1000)
> +#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH_RST 0x0
> +union dlb_lsp_cq_dir_tot_sch_cnth {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL(x) \
> +       (0x20000304 + (x) * 0x1000)
> +#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL_RST 0x0
> +union dlb_lsp_cq_dir_tot_sch_cntl {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CQ_DIR_TKN_CNT(x) \
> +       (0x20000300 + (x) * 0x1000)
> +#define DLB_LSP_CQ_DIR_TKN_CNT_RST 0x0
> +union dlb_lsp_cq_dir_tkn_cnt {
> +       struct {
> +               u32 count : 11;
> +               u32 rsvd0 : 21;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_QID2CQIDX(x, y) \
> +       (0x20000400 + (x) * 0x1000 + (y) * 0x4)
> +#define DLB_LSP_QID_LDB_QID2CQIDX_RST 0x0
> +union dlb_lsp_qid_ldb_qid2cqidx {
> +       struct {
> +               u32 cq_p0 : 8;
> +               u32 cq_p1 : 8;
> +               u32 cq_p2 : 8;
> +               u32 cq_p3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_QID2CQIDX2(x, y) \
> +       (0x20000500 + (x) * 0x1000 + (y) * 0x4)
> +#define DLB_LSP_QID_LDB_QID2CQIDX2_RST 0x0
> +union dlb_lsp_qid_ldb_qid2cqidx2 {
> +       struct {
> +               u32 cq_p0 : 8;
> +               u32 cq_p1 : 8;
> +               u32 cq_p2 : 8;
> +               u32 cq_p3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_ATQ_ENQUEUE_CNT(x) \
> +       (0x2000066c + (x) * 0x1000)
> +#define DLB_LSP_QID_ATQ_ENQUEUE_CNT_RST 0x0
> +union dlb_lsp_qid_atq_enqueue_cnt {
> +       struct {
> +               u32 count : 15;
> +               u32 rsvd0 : 17;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_INFL_LIM(x) \
> +       (0x2000064c + (x) * 0x1000)
> +#define DLB_LSP_QID_LDB_INFL_LIM_RST 0x0
> +union dlb_lsp_qid_ldb_infl_lim {
> +       struct {
> +               u32 limit : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_INFL_CNT(x) \
> +       (0x2000062c + (x) * 0x1000)
> +#define DLB_LSP_QID_LDB_INFL_CNT_RST 0x0
> +union dlb_lsp_qid_ldb_infl_cnt {
> +       struct {
> +               u32 count : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_AQED_ACTIVE_LIM(x) \
> +       (0x20000628 + (x) * 0x1000)
> +#define DLB_LSP_QID_AQED_ACTIVE_LIM_RST 0x0
> +union dlb_lsp_qid_aqed_active_lim {
> +       struct {
> +               u32 limit : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_AQED_ACTIVE_CNT(x) \
> +       (0x20000624 + (x) * 0x1000)
> +#define DLB_LSP_QID_AQED_ACTIVE_CNT_RST 0x0
> +union dlb_lsp_qid_aqed_active_cnt {
> +       struct {
> +               u32 count : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_ENQUEUE_CNT(x) \
> +       (0x20000604 + (x) * 0x1000)
> +#define DLB_LSP_QID_LDB_ENQUEUE_CNT_RST 0x0
> +union dlb_lsp_qid_ldb_enqueue_cnt {
> +       struct {
> +               u32 count : 15;
> +               u32 rsvd0 : 17;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_LDB_REPLAY_CNT(x) \
> +       (0x20000600 + (x) * 0x1000)
> +#define DLB_LSP_QID_LDB_REPLAY_CNT_RST 0x0
> +union dlb_lsp_qid_ldb_replay_cnt {
> +       struct {
> +               u32 count : 15;
> +               u32 rsvd0 : 17;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_QID_DIR_ENQUEUE_CNT(x) \
> +       (0x20000700 + (x) * 0x1000)
> +#define DLB_LSP_QID_DIR_ENQUEUE_CNT_RST 0x0
> +union dlb_lsp_qid_dir_enqueue_cnt {
> +       struct {
> +               u32 count : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CTRL_CONFIG_0 0x2800002c
> +#define DLB_LSP_CTRL_CONFIG_0_RST 0x12cc
> +union dlb_lsp_ctrl_config_0 {
> +       struct {
> +               u32 atm_cq_qid_priority_prot : 1;
> +               u32 ldb_arb_ignore_empty : 1;
> +               u32 ldb_arb_mode : 2;
> +               u32 ldb_arb_threshold : 18;
> +               u32 cfg_cq_sla_upd_always : 1;
> +               u32 cfg_cq_wcn_upd_always : 1;
> +               u32 spare : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1 0x28000028
> +#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1_RST 0x0
> +union dlb_lsp_cfg_arb_weight_atm_nalb_qid_1 {
> +       struct {
> +               u32 slot4_weight : 8;
> +               u32 slot5_weight : 8;
> +               u32 slot6_weight : 8;
> +               u32 slot7_weight : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0 0x28000024
> +#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_RST 0x0
> +union dlb_lsp_cfg_arb_weight_atm_nalb_qid_0 {
> +       struct {
> +               u32 slot0_weight : 8;
> +               u32 slot1_weight : 8;
> +               u32 slot2_weight : 8;
> +               u32 slot3_weight : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1 0x28000020
> +#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1_RST 0x0
> +union dlb_lsp_cfg_arb_weight_ldb_qid_1 {
> +       struct {
> +               u32 slot4_weight : 8;
> +               u32 slot5_weight : 8;
> +               u32 slot6_weight : 8;
> +               u32 slot7_weight : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0 0x2800001c
> +#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0_RST 0x0
> +union dlb_lsp_cfg_arb_weight_ldb_qid_0 {
> +       struct {
> +               u32 slot0_weight : 8;
> +               u32 slot1_weight : 8;
> +               u32 slot2_weight : 8;
> +               u32 slot3_weight : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_LDB_SCHED_CTRL 0x28100000
> +#define DLB_LSP_LDB_SCHED_CTRL_RST 0x0
> +union dlb_lsp_ldb_sched_ctrl {
> +       struct {
> +               u32 cq : 8;
> +               u32 qidix : 3;
> +               u32 value : 1;
> +               u32 nalb_haswork_v : 1;
> +               u32 rlist_haswork_v : 1;
> +               u32 slist_haswork_v : 1;
> +               u32 inflight_ok_v : 1;
> +               u32 aqed_nfull_v : 1;
> +               u32 spare0 : 15;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_DIR_SCH_CNT_H 0x2820000c
> +#define DLB_LSP_DIR_SCH_CNT_H_RST 0x0
> +union dlb_lsp_dir_sch_cnt_h {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_DIR_SCH_CNT_L 0x28200008
> +#define DLB_LSP_DIR_SCH_CNT_L_RST 0x0
> +union dlb_lsp_dir_sch_cnt_l {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_LDB_SCH_CNT_H 0x28200004
> +#define DLB_LSP_LDB_SCH_CNT_H_RST 0x0
> +union dlb_lsp_ldb_sch_cnt_h {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_LSP_LDB_SCH_CNT_L 0x28200000
> +#define DLB_LSP_LDB_SCH_CNT_L_RST 0x0
> +union dlb_lsp_ldb_sch_cnt_l {
> +       struct {
> +               u32 count : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_DP_DIR_CSR_CTRL 0x38000018
> +#define DLB_DP_DIR_CSR_CTRL_RST 0xc0000000
> +union dlb_dp_dir_csr_ctrl {
> +       struct {
> +               u32 cfg_int_dis : 1;
> +               u32 cfg_int_dis_sbe : 1;
> +               u32 cfg_int_dis_mbe : 1;
> +               u32 spare0 : 27;
> +               u32 cfg_vasr_dis : 1;
> +               u32 cfg_int_dis_synd : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1 0x38000014
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1_RST 0xfffefdfc
> +union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_1 {
> +       struct {
> +               u32 pri4 : 8;
> +               u32 pri5 : 8;
> +               u32 pri6 : 8;
> +               u32 pri7 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0 0x38000010
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0_RST 0xfbfaf9f8
> +union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x3800000c
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
> +union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_1 {
> +       struct {
> +               u32 pri4 : 8;
> +               u32 pri5 : 8;
> +               u32 pri6 : 8;
> +               u32 pri7 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x38000008
> +#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
> +union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1 0x6800001c
> +#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1_RST 0xfffefdfc
> +union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_1 {
> +       struct {
> +               u32 pri4 : 8;
> +               u32 pri5 : 8;
> +               u32 pri6 : 8;
> +               u32 pri7 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0 0x68000018
> +#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0_RST 0xfbfaf9f8
> +union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1 0x68000014
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1_RST 0xfffefdfc
> +union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_1 {
> +       struct {
> +               u32 pri4 : 8;
> +               u32 pri5 : 8;
> +               u32 pri6 : 8;
> +               u32 pri7 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0 0x68000010
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0_RST 0xfbfaf9f8
> +union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x6800000c
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
> +union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_1 {
> +       struct {
> +               u32 pri4 : 8;
> +               u32 pri5 : 8;
> +               u32 pri6 : 8;
> +               u32 pri7 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x68000008
> +#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
> +union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX(x, y) \
> +       (0x70000000 + (x) * 0x1000 + (y) * 0x4)
> +#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX_RST 0x0
> +union dlb_atm_pipe_qid_ldb_qid2cqidx {
> +       struct {
> +               u32 cq_p0 : 8;
> +               u32 cq_p1 : 8;
> +               u32 cq_p2 : 8;
> +               u32 cq_p3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN 0x7800000c
> +#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN_RST 0xfffefdfc
> +union dlb_atm_pipe_cfg_ctrl_arb_weights_sched_bin {
> +       struct {
> +               u32 bin0 : 8;
> +               u32 bin1 : 8;
> +               u32 bin2 : 8;
> +               u32 bin3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN 0x78000008
> +#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN_RST 0xfffefdfc
> +union dlb_atm_pipe_ctrl_arb_weights_rdy_bin {
> +       struct {
> +               u32 bin0 : 8;
> +               u32 bin1 : 8;
> +               u32 bin2 : 8;
> +               u32 bin3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_QID_FID_LIM(x) \
> +       (0x80000014 + (x) * 0x1000)
> +#define DLB_AQED_PIPE_QID_FID_LIM_RST 0x7ff
> +union dlb_aqed_pipe_qid_fid_lim {
> +       struct {
> +               u32 qid_fid_limit : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_FL_POP_PTR(x) \
> +       (0x80000010 + (x) * 0x1000)
> +#define DLB_AQED_PIPE_FL_POP_PTR_RST 0x0
> +union dlb_aqed_pipe_fl_pop_ptr {
> +       struct {
> +               u32 pop_ptr : 11;
> +               u32 generation : 1;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_FL_PUSH_PTR(x) \
> +       (0x8000000c + (x) * 0x1000)
> +#define DLB_AQED_PIPE_FL_PUSH_PTR_RST 0x0
> +union dlb_aqed_pipe_fl_push_ptr {
> +       struct {
> +               u32 push_ptr : 11;
> +               u32 generation : 1;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_FL_BASE(x) \
> +       (0x80000008 + (x) * 0x1000)
> +#define DLB_AQED_PIPE_FL_BASE_RST 0x0
> +union dlb_aqed_pipe_fl_base {
> +       struct {
> +               u32 base : 11;
> +               u32 rsvd0 : 21;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_FL_LIM(x) \
> +       (0x80000004 + (x) * 0x1000)
> +#define DLB_AQED_PIPE_FL_LIM_RST 0x800
> +union dlb_aqed_pipe_fl_lim {
> +       struct {
> +               u32 limit : 11;
> +               u32 freelist_disable : 1;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0 0x88000008
> +#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0_RST 0xfffe
> +union dlb_aqed_pipe_cfg_ctrl_arb_weights_tqpri_atm_0 {
> +       struct {
> +               u32 pri0 : 8;
> +               u32 pri1 : 8;
> +               u32 pri2 : 8;
> +               u32 pri3 : 8;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_RO_PIPE_QID2GRPSLT(x) \
> +       (0x90000000 + (x) * 0x1000)
> +#define DLB_RO_PIPE_QID2GRPSLT_RST 0x0
> +union dlb_ro_pipe_qid2grpslt {
> +       struct {
> +               u32 slot : 5;
> +               u32 rsvd1 : 3;
> +               u32 group : 2;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_RO_PIPE_GRP_SN_MODE 0x98000008
> +#define DLB_RO_PIPE_GRP_SN_MODE_RST 0x0
> +union dlb_ro_pipe_grp_sn_mode {
> +       struct {
> +               u32 sn_mode_0 : 3;
> +               u32 reserved0 : 5;
> +               u32 sn_mode_1 : 3;
> +               u32 reserved1 : 5;
> +               u32 sn_mode_2 : 3;
> +               u32 reserved2 : 5;
> +               u32 sn_mode_3 : 3;
> +               u32 reserved3 : 5;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN(x) \
> +       (0xa000003c + (x) * 0x1000)
> +#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN_RST 0x1
> +union dlb_chp_cfg_dir_pp_sw_alarm_en {
> +       struct {
> +               u32 alarm_enable : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_WD_ENB(x) \
> +       (0xa0000038 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_WD_ENB_RST 0x0
> +union dlb_chp_dir_cq_wd_enb {
> +       struct {
> +               u32 wd_enable : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_LDB_PP2POOL(x) \
> +       (0xa0000034 + (x) * 0x1000)
> +#define DLB_CHP_DIR_LDB_PP2POOL_RST 0x0
> +union dlb_chp_dir_ldb_pp2pool {
> +       struct {
> +               u32 pool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_DIR_PP2POOL(x) \
> +       (0xa0000030 + (x) * 0x1000)
> +#define DLB_CHP_DIR_DIR_PP2POOL_RST 0x0
> +union dlb_chp_dir_dir_pp2pool {
> +       struct {
> +               u32 pool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_LDB_CRD_CNT(x) \
> +       (0xa000002c + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_LDB_CRD_CNT_RST 0x0
> +union dlb_chp_dir_pp_ldb_crd_cnt {
> +       struct {
> +               u32 count : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_DIR_CRD_CNT(x) \
> +       (0xa0000028 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_DIR_CRD_CNT_RST 0x0
> +union dlb_chp_dir_pp_dir_crd_cnt {
> +       struct {
> +               u32 count : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_TMR_THRESHOLD(x) \
> +       (0xa0000024 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST 0x0
> +union dlb_chp_dir_cq_tmr_threshold {
> +       struct {
> +               u32 timer_thrsh : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INT_ENB(x) \
> +       (0xa0000020 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_INT_ENB_RST 0x0
> +union dlb_chp_dir_cq_int_enb {
> +       struct {
> +               u32 en_tim : 1;
> +               u32 en_depth : 1;
> +               u32 rsvd0 : 30;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(x) \
> +       (0xa000001c + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST 0x0
> +union dlb_chp_dir_cq_int_depth_thrsh {
> +       struct {
> +               u32 depth_threshold : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(x) \
> +       (0xa0000018 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST 0x0
> +union dlb_chp_dir_cq_tkn_depth_sel {
> +       struct {
> +               u32 token_depth_select : 4;
> +               u32 rsvd0 : 28;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(x) \
> +       (0xa0000014 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST 0x1
> +union dlb_chp_dir_pp_ldb_min_crd_qnt {
> +       struct {
> +               u32 quanta : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(x) \
> +       (0xa0000010 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST 0x1
> +union dlb_chp_dir_pp_dir_min_crd_qnt {
> +       struct {
> +               u32 quanta : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_LDB_CRD_LWM(x) \
> +       (0xa000000c + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_LDB_CRD_LWM_RST 0x0
> +union dlb_chp_dir_pp_ldb_crd_lwm {
> +       struct {
> +               u32 lwm : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_LDB_CRD_HWM(x) \
> +       (0xa0000008 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_LDB_CRD_HWM_RST 0x0
> +union dlb_chp_dir_pp_ldb_crd_hwm {
> +       struct {
> +               u32 hwm : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_DIR_CRD_LWM(x) \
> +       (0xa0000004 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_DIR_CRD_LWM_RST 0x0
> +union dlb_chp_dir_pp_dir_crd_lwm {
> +       struct {
> +               u32 lwm : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_DIR_CRD_HWM(x) \
> +       (0xa0000000 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_DIR_CRD_HWM_RST 0x0
> +union dlb_chp_dir_pp_dir_crd_hwm {
> +       struct {
> +               u32 hwm : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN(x) \
> +       (0xa0000148 + (x) * 0x1000)
> +#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN_RST 0x1
> +union dlb_chp_cfg_ldb_pp_sw_alarm_en {
> +       struct {
> +               u32 alarm_enable : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_WD_ENB(x) \
> +       (0xa0000144 + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_WD_ENB_RST 0x0
> +union dlb_chp_ldb_cq_wd_enb {
> +       struct {
> +               u32 wd_enable : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_SN_CHK_ENBL(x) \
> +       (0xa0000140 + (x) * 0x1000)
> +#define DLB_CHP_SN_CHK_ENBL_RST 0x0
> +union dlb_chp_sn_chk_enbl {
> +       struct {
> +               u32 en : 1;
> +               u32 rsvd0 : 31;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_HIST_LIST_BASE(x) \
> +       (0xa000013c + (x) * 0x1000)
> +#define DLB_CHP_HIST_LIST_BASE_RST 0x0
> +union dlb_chp_hist_list_base {
> +       struct {
> +               u32 base : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_HIST_LIST_LIM(x) \
> +       (0xa0000138 + (x) * 0x1000)
> +#define DLB_CHP_HIST_LIST_LIM_RST 0x0
> +union dlb_chp_hist_list_lim {
> +       struct {
> +               u32 limit : 13;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_LDB_PP2POOL(x) \
> +       (0xa0000134 + (x) * 0x1000)
> +#define DLB_CHP_LDB_LDB_PP2POOL_RST 0x0
> +union dlb_chp_ldb_ldb_pp2pool {
> +       struct {
> +               u32 pool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_DIR_PP2POOL(x) \
> +       (0xa0000130 + (x) * 0x1000)
> +#define DLB_CHP_LDB_DIR_PP2POOL_RST 0x0
> +union dlb_chp_ldb_dir_pp2pool {
> +       struct {
> +               u32 pool : 6;
> +               u32 rsvd0 : 26;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_LDB_CRD_CNT(x) \
> +       (0xa000012c + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_LDB_CRD_CNT_RST 0x0
> +union dlb_chp_ldb_pp_ldb_crd_cnt {
> +       struct {
> +               u32 count : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_DIR_CRD_CNT(x) \
> +       (0xa0000128 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_DIR_CRD_CNT_RST 0x0
> +union dlb_chp_ldb_pp_dir_crd_cnt {
> +       struct {
> +               u32 count : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_TMR_THRESHOLD(x) \
> +       (0xa0000124 + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST 0x0
> +union dlb_chp_ldb_cq_tmr_threshold {
> +       struct {
> +               u32 thrsh : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_INT_ENB(x) \
> +       (0xa0000120 + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_INT_ENB_RST 0x0
> +union dlb_chp_ldb_cq_int_enb {
> +       struct {
> +               u32 en_tim : 1;
> +               u32 en_depth : 1;
> +               u32 rsvd0 : 30;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(x) \
> +       (0xa000011c + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST 0x0
> +union dlb_chp_ldb_cq_int_depth_thrsh {
> +       struct {
> +               u32 depth_threshold : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(x) \
> +       (0xa0000118 + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST 0x0
> +union dlb_chp_ldb_cq_tkn_depth_sel {
> +       struct {
> +               u32 token_depth_select : 4;
> +               u32 rsvd0 : 28;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(x) \
> +       (0xa0000114 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST 0x1
> +union dlb_chp_ldb_pp_ldb_min_crd_qnt {
> +       struct {
> +               u32 quanta : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(x) \
> +       (0xa0000110 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST 0x1
> +union dlb_chp_ldb_pp_dir_min_crd_qnt {
> +       struct {
> +               u32 quanta : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_LDB_CRD_LWM(x) \
> +       (0xa000010c + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_LDB_CRD_LWM_RST 0x0
> +union dlb_chp_ldb_pp_ldb_crd_lwm {
> +       struct {
> +               u32 lwm : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_LDB_CRD_HWM(x) \
> +       (0xa0000108 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_LDB_CRD_HWM_RST 0x0
> +union dlb_chp_ldb_pp_ldb_crd_hwm {
> +       struct {
> +               u32 hwm : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_DIR_CRD_LWM(x) \
> +       (0xa0000104 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_DIR_CRD_LWM_RST 0x0
> +union dlb_chp_ldb_pp_dir_crd_lwm {
> +       struct {
> +               u32 lwm : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_DIR_CRD_HWM(x) \
> +       (0xa0000100 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_DIR_CRD_HWM_RST 0x0
> +union dlb_chp_ldb_pp_dir_crd_hwm {
> +       struct {
> +               u32 hwm : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_DEPTH(x) \
> +       (0xa0000218 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_DEPTH_RST 0x0
> +union dlb_chp_dir_cq_depth {
> +       struct {
> +               u32 cq_depth : 11;
> +               u32 rsvd0 : 21;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_WPTR(x) \
> +       (0xa0000214 + (x) * 0x1000)
> +#define DLB_CHP_DIR_CQ_WPTR_RST 0x0
> +union dlb_chp_dir_cq_wptr {
> +       struct {
> +               u32 write_pointer : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_LDB_PUSH_PTR(x) \
> +       (0xa0000210 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST 0x0
> +union dlb_chp_dir_pp_ldb_push_ptr {
> +       struct {
> +               u32 push_pointer : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_DIR_PUSH_PTR(x) \
> +       (0xa000020c + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST 0x0
> +union dlb_chp_dir_pp_dir_push_ptr {
> +       struct {
> +               u32 push_pointer : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_STATE_RESET(x) \
> +       (0xa0000204 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_STATE_RESET_RST 0x0
> +union dlb_chp_dir_pp_state_reset {
> +       struct {
> +               u32 rsvd1 : 7;
> +               u32 dir_type : 1;
> +               u32 rsvd0 : 23;
> +               u32 reset_pp_state : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_PP_CRD_REQ_STATE(x) \
> +       (0xa0000200 + (x) * 0x1000)
> +#define DLB_CHP_DIR_PP_CRD_REQ_STATE_RST 0x0
> +union dlb_chp_dir_pp_crd_req_state {
> +       struct {
> +               u32 dir_crd_req_active_valid : 1;
> +               u32 dir_crd_req_active_check : 1;
> +               u32 dir_crd_req_active_busy : 1;
> +               u32 rsvd1 : 1;
> +               u32 ldb_crd_req_active_valid : 1;
> +               u32 ldb_crd_req_active_check : 1;
> +               u32 ldb_crd_req_active_busy : 1;
> +               u32 rsvd0 : 1;
> +               u32 no_pp_credit_update : 1;
> +               u32 crd_req_state : 23;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_DEPTH(x) \
> +       (0xa0000320 + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_DEPTH_RST 0x0
> +union dlb_chp_ldb_cq_depth {
> +       struct {
> +               u32 depth : 11;
> +               u32 reserved : 2;
> +               u32 rsvd0 : 19;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_WPTR(x) \
> +       (0xa000031c + (x) * 0x1000)
> +#define DLB_CHP_LDB_CQ_WPTR_RST 0x0
> +union dlb_chp_ldb_cq_wptr {
> +       struct {
> +               u32 write_pointer : 10;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_LDB_PUSH_PTR(x) \
> +       (0xa0000318 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST 0x0
> +union dlb_chp_ldb_pp_ldb_push_ptr {
> +       struct {
> +               u32 push_pointer : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_DIR_PUSH_PTR(x) \
> +       (0xa0000314 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST 0x0
> +union dlb_chp_ldb_pp_dir_push_ptr {
> +       struct {
> +               u32 push_pointer : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_HIST_LIST_POP_PTR(x) \
> +       (0xa000030c + (x) * 0x1000)
> +#define DLB_CHP_HIST_LIST_POP_PTR_RST 0x0
> +union dlb_chp_hist_list_pop_ptr {
> +       struct {
> +               u32 pop_ptr : 13;
> +               u32 generation : 1;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_HIST_LIST_PUSH_PTR(x) \
> +       (0xa0000308 + (x) * 0x1000)
> +#define DLB_CHP_HIST_LIST_PUSH_PTR_RST 0x0
> +union dlb_chp_hist_list_push_ptr {
> +       struct {
> +               u32 push_ptr : 13;
> +               u32 generation : 1;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_STATE_RESET(x) \
> +       (0xa0000304 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_STATE_RESET_RST 0x0
> +union dlb_chp_ldb_pp_state_reset {
> +       struct {
> +               u32 rsvd1 : 7;
> +               u32 dir_type : 1;
> +               u32 rsvd0 : 23;
> +               u32 reset_pp_state : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_PP_CRD_REQ_STATE(x) \
> +       (0xa0000300 + (x) * 0x1000)
> +#define DLB_CHP_LDB_PP_CRD_REQ_STATE_RST 0x0
> +union dlb_chp_ldb_pp_crd_req_state {
> +       struct {
> +               u32 dir_crd_req_active_valid : 1;
> +               u32 dir_crd_req_active_check : 1;
> +               u32 dir_crd_req_active_busy : 1;
> +               u32 rsvd1 : 1;
> +               u32 ldb_crd_req_active_valid : 1;
> +               u32 ldb_crd_req_active_check : 1;
> +               u32 ldb_crd_req_active_busy : 1;
> +               u32 rsvd0 : 1;
> +               u32 no_pp_credit_update : 1;
> +               u32 crd_req_state : 23;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_ORD_QID_SN(x) \
> +       (0xa0000408 + (x) * 0x1000)
> +#define DLB_CHP_ORD_QID_SN_RST 0x0
> +union dlb_chp_ord_qid_sn {
> +       struct {
> +               u32 sn : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_ORD_QID_SN_MAP(x) \
> +       (0xa0000404 + (x) * 0x1000)
> +#define DLB_CHP_ORD_QID_SN_MAP_RST 0x0
> +union dlb_chp_ord_qid_sn_map {
> +       struct {
> +               u32 mode : 3;
> +               u32 slot : 5;
> +               u32 grp : 2;
> +               u32 rsvd0 : 22;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_POOL_CRD_CNT(x) \
> +       (0xa000050c + (x) * 0x1000)
> +#define DLB_CHP_LDB_POOL_CRD_CNT_RST 0x0
> +union dlb_chp_ldb_pool_crd_cnt {
> +       struct {
> +               u32 count : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_QED_FL_BASE(x) \
> +       (0xa0000508 + (x) * 0x1000)
> +#define DLB_CHP_QED_FL_BASE_RST 0x0
> +union dlb_chp_qed_fl_base {
> +       struct {
> +               u32 base : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_QED_FL_LIM(x) \
> +       (0xa0000504 + (x) * 0x1000)
> +#define DLB_CHP_QED_FL_LIM_RST 0x8000
> +union dlb_chp_qed_fl_lim {
> +       struct {
> +               u32 limit : 14;
> +               u32 rsvd1 : 1;
> +               u32 freelist_disable : 1;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_POOL_CRD_LIM(x) \
> +       (0xa0000500 + (x) * 0x1000)
> +#define DLB_CHP_LDB_POOL_CRD_LIM_RST 0x0
> +union dlb_chp_ldb_pool_crd_lim {
> +       struct {
> +               u32 limit : 16;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_QED_FL_POP_PTR(x) \
> +       (0xa0000604 + (x) * 0x1000)
> +#define DLB_CHP_QED_FL_POP_PTR_RST 0x0
> +union dlb_chp_qed_fl_pop_ptr {
> +       struct {
> +               u32 pop_ptr : 14;
> +               u32 reserved0 : 1;
> +               u32 generation : 1;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_QED_FL_PUSH_PTR(x) \
> +       (0xa0000600 + (x) * 0x1000)
> +#define DLB_CHP_QED_FL_PUSH_PTR_RST 0x0
> +union dlb_chp_qed_fl_push_ptr {
> +       struct {
> +               u32 push_ptr : 14;
> +               u32 reserved0 : 1;
> +               u32 generation : 1;
> +               u32 rsvd0 : 16;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_POOL_CRD_CNT(x) \
> +       (0xa000070c + (x) * 0x1000)
> +#define DLB_CHP_DIR_POOL_CRD_CNT_RST 0x0
> +union dlb_chp_dir_pool_crd_cnt {
> +       struct {
> +               u32 count : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DQED_FL_BASE(x) \
> +       (0xa0000708 + (x) * 0x1000)
> +#define DLB_CHP_DQED_FL_BASE_RST 0x0
> +union dlb_chp_dqed_fl_base {
> +       struct {
> +               u32 base : 12;
> +               u32 rsvd0 : 20;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DQED_FL_LIM(x) \
> +       (0xa0000704 + (x) * 0x1000)
> +#define DLB_CHP_DQED_FL_LIM_RST 0x2000
> +union dlb_chp_dqed_fl_lim {
> +       struct {
> +               u32 limit : 12;
> +               u32 rsvd1 : 1;
> +               u32 freelist_disable : 1;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_POOL_CRD_LIM(x) \
> +       (0xa0000700 + (x) * 0x1000)
> +#define DLB_CHP_DIR_POOL_CRD_LIM_RST 0x0
> +union dlb_chp_dir_pool_crd_lim {
> +       struct {
> +               u32 limit : 14;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DQED_FL_POP_PTR(x) \
> +       (0xa0000804 + (x) * 0x1000)
> +#define DLB_CHP_DQED_FL_POP_PTR_RST 0x0
> +union dlb_chp_dqed_fl_pop_ptr {
> +       struct {
> +               u32 pop_ptr : 12;
> +               u32 reserved0 : 1;
> +               u32 generation : 1;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DQED_FL_PUSH_PTR(x) \
> +       (0xa0000800 + (x) * 0x1000)
> +#define DLB_CHP_DQED_FL_PUSH_PTR_RST 0x0
> +union dlb_chp_dqed_fl_push_ptr {
> +       struct {
> +               u32 push_ptr : 12;
> +               u32 reserved0 : 1;
> +               u32 generation : 1;
> +               u32 rsvd0 : 18;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_CTRL_DIAG_02 0xa8000154
> +#define DLB_CHP_CTRL_DIAG_02_RST 0x0
> +union dlb_chp_ctrl_diag_02 {
> +       struct {
> +               u32 control : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_CFG_CHP_CSR_CTRL 0xa8000130
> +#define DLB_CHP_CFG_CHP_CSR_CTRL_RST 0xc0003fff
> +#define DLB_CHP_CFG_EXCESS_TOKENS_SHIFT 12
> +union dlb_chp_cfg_chp_csr_ctrl {
> +       struct {
> +               u32 int_inf_alarm_enable_0 : 1;
> +               u32 int_inf_alarm_enable_1 : 1;
> +               u32 int_inf_alarm_enable_2 : 1;
> +               u32 int_inf_alarm_enable_3 : 1;
> +               u32 int_inf_alarm_enable_4 : 1;
> +               u32 int_inf_alarm_enable_5 : 1;
> +               u32 int_inf_alarm_enable_6 : 1;
> +               u32 int_inf_alarm_enable_7 : 1;
> +               u32 int_inf_alarm_enable_8 : 1;
> +               u32 int_inf_alarm_enable_9 : 1;
> +               u32 int_inf_alarm_enable_10 : 1;
> +               u32 int_inf_alarm_enable_11 : 1;
> +               u32 int_inf_alarm_enable_12 : 1;
> +               u32 int_cor_alarm_enable : 1;
> +               u32 csr_control_spare : 14;
> +               u32 cfg_vasr_dis : 1;
> +               u32 counter_clear : 1;
> +               u32 blk_cor_report : 1;
> +               u32 blk_cor_synd : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_INTR_ARMED1 0xa8000068
> +#define DLB_CHP_LDB_CQ_INTR_ARMED1_RST 0x0
> +union dlb_chp_ldb_cq_intr_armed1 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_LDB_CQ_INTR_ARMED0 0xa8000064
> +#define DLB_CHP_LDB_CQ_INTR_ARMED0_RST 0x0
> +union dlb_chp_ldb_cq_intr_armed0 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INTR_ARMED3 0xa8000024
> +#define DLB_CHP_DIR_CQ_INTR_ARMED3_RST 0x0
> +union dlb_chp_dir_cq_intr_armed3 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INTR_ARMED2 0xa8000020
> +#define DLB_CHP_DIR_CQ_INTR_ARMED2_RST 0x0
> +union dlb_chp_dir_cq_intr_armed2 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INTR_ARMED1 0xa800001c
> +#define DLB_CHP_DIR_CQ_INTR_ARMED1_RST 0x0
> +union dlb_chp_dir_cq_intr_armed1 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CHP_DIR_CQ_INTR_ARMED0 0xa8000018
> +#define DLB_CHP_DIR_CQ_INTR_ARMED0_RST 0x0
> +union dlb_chp_dir_cq_intr_armed0 {
> +       struct {
> +               u32 armed : 32;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CFG_MSTR_DIAG_RESET_STS 0xb8000004
> +#define DLB_CFG_MSTR_DIAG_RESET_STS_RST 0x1ff
> +union dlb_cfg_mstr_diag_reset_sts {
> +       struct {
> +               u32 chp_pf_reset_done : 1;
> +               u32 rop_pf_reset_done : 1;
> +               u32 lsp_pf_reset_done : 1;
> +               u32 nalb_pf_reset_done : 1;
> +               u32 ap_pf_reset_done : 1;
> +               u32 dp_pf_reset_done : 1;
> +               u32 qed_pf_reset_done : 1;
> +               u32 dqed_pf_reset_done : 1;
> +               u32 aqed_pf_reset_done : 1;
> +               u32 rsvd1 : 6;
> +               u32 pf_reset_active : 1;
> +               u32 chp_vf_reset_done : 1;
> +               u32 rop_vf_reset_done : 1;
> +               u32 lsp_vf_reset_done : 1;
> +               u32 nalb_vf_reset_done : 1;
> +               u32 ap_vf_reset_done : 1;
> +               u32 dp_vf_reset_done : 1;
> +               u32 qed_vf_reset_done : 1;
> +               u32 dqed_vf_reset_done : 1;
> +               u32 aqed_vf_reset_done : 1;
> +               u32 rsvd0 : 6;
> +               u32 vf_reset_active : 1;
> +       } field;
> +       u32 val;
> +};
> +
> +#define DLB_CFG_MSTR_BCAST_RESET_VF_START 0xc8100000
> +#define DLB_CFG_MSTR_BCAST_RESET_VF_START_RST 0x0
> +/* HW Reset Types */
> +#define VF_RST_TYPE_CQ_LDB   0
> +#define VF_RST_TYPE_QID_LDB  1
> +#define VF_RST_TYPE_POOL_LDB 2
> +#define VF_RST_TYPE_CQ_DIR   8
> +#define VF_RST_TYPE_QID_DIR  9
> +#define VF_RST_TYPE_POOL_DIR 10
> +union dlb_cfg_mstr_bcast_reset_vf_start {
> +       struct {
> +               u32 vf_reset_start : 1;
> +               u32 reserved : 3;
> +               u32 vf_reset_type : 4;
> +               u32 vf_reset_id : 24;
> +       } field;
> +       u32 val;
> +};
> +
> +#endif /* __DLB_REGS_H */
> diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
> new file mode 100644
> index 0000000..9c4267b
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_resource.c
> @@ -0,0 +1,302 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#include "dlb_hw_types.h"
> +#include "../../dlb_user.h"
> +#include "dlb_resource.h"
> +#include "dlb_osdep.h"
> +#include "dlb_osdep_bitmap.h"
> +#include "dlb_osdep_types.h"
> +#include "dlb_regs.h"
> +
> +void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
> +{
> +       union dlb_dp_dir_csr_ctrl r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
> +
> +       r0.field.cfg_vasr_dis = 1;
> +
> +       DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
> +}
> +
> +void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
> +{
> +       union dlb_chp_cfg_chp_csr_ctrl r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
> +
> +       r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
> +
> +       DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
> +}
> +
> +void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
> +{
> +       union dlb_sys_cq_mode r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
> +
> +       r0.field.ldb_cq64 = 1;
> +
> +       DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
> +}
> +
> +void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
> +{
> +       union dlb_sys_cq_mode r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
> +
> +       r0.field.dir_cq64 = 1;
> +
> +       DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
> +}
> +
> +void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
> +{
> +       union dlb_sys_sys_alarm_int_enable r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
> +
> +       r0.field.pf_to_vf_isr_pend_error = 0;
> +
> +       DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
> +}
> +
> +void dlb_hw_get_num_resources(struct dlb_hw *hw,
> +                             struct dlb_get_num_resources_args *arg)
> +{
> +       struct dlb_function_resources *rsrcs;
> +       struct dlb_bitmap *map;
> +
> +       rsrcs = &hw->pf;
> +
> +       arg->num_sched_domains = rsrcs->num_avail_domains;
> +
> +       arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
> +
> +       arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
> +
> +       arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
> +
> +       map = rsrcs->avail_aqed_freelist_entries;
> +
> +       arg->num_atomic_inflights = dlb_bitmap_count(map);
> +
> +       arg->max_contiguous_atomic_inflights =
> +               dlb_bitmap_longest_set_range(map);
> +
> +       map = rsrcs->avail_hist_list_entries;
> +
> +       arg->num_hist_list_entries = dlb_bitmap_count(map);
> +
> +       arg->max_contiguous_hist_list_entries =
> +               dlb_bitmap_longest_set_range(map);
> +
> +       map = rsrcs->avail_qed_freelist_entries;
> +
> +       arg->num_ldb_credits = dlb_bitmap_count(map);
> +
> +       arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
> +
> +       map = rsrcs->avail_dqed_freelist_entries;
> +
> +       arg->num_dir_credits = dlb_bitmap_count(map);
> +
> +       arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
> +
> +       arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
> +
> +       arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
> +}
> +
> +static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
> +{
> +       dlb_list_init_head(&rsrc->avail_domains);
> +       dlb_list_init_head(&rsrc->used_domains);
> +       dlb_list_init_head(&rsrc->avail_ldb_queues);
> +       dlb_list_init_head(&rsrc->avail_ldb_ports);
> +       dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
> +       dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
> +       dlb_list_init_head(&rsrc->avail_dir_credit_pools);
> +}
> +
> +static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
> +{
> +       dlb_list_init_head(&domain->used_ldb_queues);
> +       dlb_list_init_head(&domain->used_ldb_ports);
> +       dlb_list_init_head(&domain->used_dir_pq_pairs);
> +       dlb_list_init_head(&domain->used_ldb_credit_pools);
> +       dlb_list_init_head(&domain->used_dir_credit_pools);
> +       dlb_list_init_head(&domain->avail_ldb_queues);
> +       dlb_list_init_head(&domain->avail_ldb_ports);
> +       dlb_list_init_head(&domain->avail_dir_pq_pairs);
> +       dlb_list_init_head(&domain->avail_ldb_credit_pools);
> +       dlb_list_init_head(&domain->avail_dir_credit_pools);
> +}
> +
> +int dlb_resource_init(struct dlb_hw *hw)
> +{
> +       struct dlb_list_entry *list;
> +       unsigned int i;
> +
> +       /* For optimal load-balancing, ports that map to one or more QIDs in
> +        * common should not be in numerical sequence. This is application
> +        * dependent, but the driver interleaves port IDs as much as possible
> +        * to reduce the likelihood of this. This initial allocation maximizes
> +        * the average distance between an ID and its immediate neighbors (i.e.
> +        * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
> +        * 3, etc.).
> +        */
> +       u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
> +               0,  31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
> +               48, 15, 46, 13, 44, 11, 42,  9, 40,  7, 38,  5, 36,  3, 34, 1,
> +               32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
> +               16, 47, 14, 45, 12, 43, 10, 41,  8, 39,  6, 37,  4, 35,  2, 33
> +       };
> +
> +       /* Zero-out resource tracking data structures */
> +       memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
> +       memset(&hw->pf, 0, sizeof(hw->pf));
> +
> +       dlb_init_fn_rsrc_lists(&hw->pf);
> +
> +       for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
> +               memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
> +               dlb_init_domain_rsrc_lists(&hw->domains[i]);
> +               hw->domains[i].parent_func = &hw->pf;
> +       }
> +
> +       /* Give all resources to the PF driver */
> +       hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
> +       for (i = 0; i < hw->pf.num_avail_domains; i++) {
> +               list = &hw->domains[i].func_list;
> +
> +               dlb_list_add(&hw->pf.avail_domains, list);
> +       }
> +
> +       hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
> +       for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
> +               list = &hw->rsrcs.ldb_queues[i].func_list;
> +
> +               dlb_list_add(&hw->pf.avail_ldb_queues, list);
> +       }
> +
> +       hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
> +       for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
> +               struct dlb_ldb_port *port;
> +
> +               port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
> +
> +               dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
> +       }
> +
> +       hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
> +       for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
> +               list = &hw->rsrcs.dir_pq_pairs[i].func_list;
> +
> +               dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
> +       }
> +
> +       hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
> +       for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
> +               list = &hw->rsrcs.ldb_credit_pools[i].func_list;
> +
> +               dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
> +       }
> +
> +       hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
> +       for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
> +               list = &hw->rsrcs.dir_credit_pools[i].func_list;
> +
> +               dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
> +       }
> +
> +       /* There are 5120 history list entries, which allows us to overprovision
> +        * the inflight limit (4096) by 1k.
> +        */
> +       if (dlb_bitmap_alloc(hw,
> +                            &hw->pf.avail_hist_list_entries,
> +                            DLB_MAX_NUM_HIST_LIST_ENTRIES))
> +               return -1;
> +
> +       if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
> +               return -1;
> +
> +       if (dlb_bitmap_alloc(hw,
> +                            &hw->pf.avail_qed_freelist_entries,
> +                            DLB_MAX_NUM_LDB_CREDITS))
> +               return -1;
> +
> +       if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
> +               return -1;
> +
> +       if (dlb_bitmap_alloc(hw,
> +                            &hw->pf.avail_dqed_freelist_entries,
> +                            DLB_MAX_NUM_DIR_CREDITS))
> +               return -1;
> +
> +       if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
> +               return -1;
> +
> +       if (dlb_bitmap_alloc(hw,
> +                            &hw->pf.avail_aqed_freelist_entries,
> +                            DLB_MAX_NUM_AQOS_ENTRIES))
> +               return -1;
> +
> +       if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
> +               return -1;
> +
> +       /* Initialize the hardware resource IDs */
> +       for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
> +               hw->domains[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
> +               hw->rsrcs.ldb_queues[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
> +               hw->rsrcs.ldb_ports[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
> +               hw->rsrcs.dir_pq_pairs[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
> +               hw->rsrcs.ldb_credit_pools[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
> +               hw->rsrcs.dir_credit_pools[i].id = i;
> +
> +       for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
> +               hw->rsrcs.sn_groups[i].id = i;
> +               /* Default mode (0) is 32 sequence numbers per queue */
> +               hw->rsrcs.sn_groups[i].mode = 0;
> +               hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
> +               hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
> +       }
> +
> +       return 0;
> +}
> +
> +void dlb_resource_free(struct dlb_hw *hw)
> +{
> +       dlb_bitmap_free(hw->pf.avail_hist_list_entries);
> +
> +       dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
> +
> +       dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
> +
> +       dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
> +}
> +
> +void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
> +{
> +       union dlb_sys_sys_alarm_int_enable r0;
> +
> +       r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
> +
> +       r0.field.vf_to_pf_isr_pend_error = 0;
> +
> +       DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
> +}
> diff --git a/drivers/event/dlb/pf/base/dlb_resource.h b/drivers/event/dlb/pf/base/dlb_resource.h
> new file mode 100644
> index 0000000..4f48b73
> --- /dev/null
> +++ b/drivers/event/dlb/pf/base/dlb_resource.h
> @@ -0,0 +1,876 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_RESOURCE_H
> +#define __DLB_RESOURCE_H
> +
> +#include "dlb_hw_types.h"
> +#include "dlb_osdep_types.h"
> +
> +/**
> + * dlb_resource_init() - initialize the device
> + * @hw: pointer to struct dlb_hw.
> + *
> + * This function initializes the device's software state (pointed to by the hw
> + * argument) and programs global scheduling QoS registers. This function should
> + * be called during driver initialization.
> + *
> + * The dlb_hw struct must be unique per DLB device and persist until the device
> + * is reset.
> + *
> + * Return:
> + * Returns 0 upon success, -1 otherwise.
> + */
> +int dlb_resource_init(struct dlb_hw *hw);
> +
> +/**
> + * dlb_resource_free() - free device state memory
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function frees software state pointed to by dlb_hw. This function
> + * should be called when resetting the device or unloading the driver.
> + */
> +void dlb_resource_free(struct dlb_hw *hw);
> +
> +/**
> + * dlb_resource_reset() - reset in-use resources to their initial state
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function resets in-use resources, and makes them available for use.
> + */
> +void dlb_resource_reset(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_create_sched_domain() - create a scheduling domain
> + * @hw: dlb_hw handle for a particular device.
> + * @args: scheduling domain creation arguments.
> + * @resp: response structure.
> + *
> + * This function creates a scheduling domain containing the resources specified
> + * in args. The individual resources (queues, ports, credit pools) can be
> + * configured after creating a scheduling domain.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the domain ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, or the requested domain name
> + *         is already in use.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_sched_domain(struct dlb_hw *hw,
> +                              struct dlb_create_sched_domain_args *args,
> +                              struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_ldb_pool() - create a load-balanced credit pool
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: credit pool creation arguments.
> + * @resp: response structure.
> + *
> + * This function creates a load-balanced credit pool containing the number of
> + * requested credits.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the pool ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, the domain is not configured,
> + *         or the domain has already been started.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_create_ldb_pool_args *args,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_dir_pool() - create a directed credit pool
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: credit pool creation arguments.
> + * @resp: response structure.
> + *
> + * This function creates a directed credit pool containing the number of
> + * requested credits.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the pool ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, the domain is not configured,
> + *         or the domain has already been started.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_dir_pool(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_create_dir_pool_args *args,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_ldb_queue() - create a load-balanced queue
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: queue creation arguments.
> + * @resp: response structure.
> + *
> + * This function creates a load-balanced queue.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the queue ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, the domain is not configured,
> + *         the domain has already been started, or the requested queue name is
> + *         already in use.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
> +                           u32 domain_id,
> +                           struct dlb_create_ldb_queue_args *args,
> +                           struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_dir_queue() - create a directed queue
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: queue creation arguments.
> + * @resp: response structure.
> + *
> + * This function creates a directed queue.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the queue ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, the domain is not configured,
> + *         or the domain has already been started.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_dir_queue(struct dlb_hw *hw,
> +                           u32 domain_id,
> +                           struct dlb_create_dir_queue_args *args,
> +                           struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_dir_port() - create a directed port
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port creation arguments.
> + * @pop_count_dma_base: base address of the pop count memory. This can be
> + *                     a PA or an IOVA.
> + * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
> + * @resp: response structure.
> + *
> + * This function creates a directed port.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the port ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
> + *         pool ID is invalid, a pointer address is not properly aligned, the
> + *         domain is not configured, or the domain has already been started.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_dir_port(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_create_dir_port_args *args,
> +                          u64 pop_count_dma_base,
> +                          u64 cq_dma_base,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_create_ldb_port() - create a load-balanced port
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port creation arguments.
> + * @pop_count_dma_base: base address of the pop count memory. This can be
> + *                      a PA or an IOVA.
> + * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
> + * @resp: response structure.
> + *
> + * This function creates a load-balanced port.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the port ID.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
> + *         pool ID is invalid, a pointer address is not properly aligned, the
> + *         domain is not configured, or the domain has already been started.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_create_ldb_port(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_create_ldb_port_args *args,
> +                          u64 pop_count_dma_base,
> +                          u64 cq_dma_base,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_start_domain() - start a scheduling domain
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: start domain arguments.
> + * @resp: response structure.
> + *
> + * This function starts a scheduling domain, which allows applications to send
> + * traffic through it. Once a domain is started, its resources can no longer be
> + * configured (besides QID remapping and port enable/disable).
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - the domain is not configured, or the domain is already started.
> + */
> +int dlb_hw_start_domain(struct dlb_hw *hw,
> +                       u32 domain_id,
> +                       struct dlb_start_domain_args *args,
> +                       struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_map_qid() - map a load-balanced queue to a load-balanced port
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: map QID arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to schedule QEs from the specified queue to
> + * the specified port. Each load-balanced port can be mapped to up to 8 queues;
> + * each load-balanced queue can potentially map to all the load-balanced ports.
> + *
> + * A successful return does not necessarily mean the mapping was configured. If
> + * this function is unable to immediately map the queue to the port, it will
> + * add the requested operation to a per-port list of pending map/unmap
> + * operations, and (if it's not already running) launch a kernel thread that
> + * periodically attempts to process all pending operations. In a sense, this is
> + * an asynchronous function.
> + *
> + * This asynchronicity creates two views of the state of hardware: the actual
> + * hardware state and the requested state (as if every request completed
> + * immediately). If there are any pending map/unmap operations, the requested
> + * state will differ from the actual state. All validation is performed with
> + * respect to the pending state; for instance, if there are 8 pending map
> + * operations for port X, a request for a 9th will fail because a load-balanced
> + * port can only map up to 8 queues.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
> + *         the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_map_qid(struct dlb_hw *hw,
> +                  u32 domain_id,
> +                  struct dlb_map_qid_args *args,
> +                  struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: unmap QID arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to stop scheduling QEs from the specified
> + * queue to the specified port.
> + *
> + * A successful return does not necessarily mean the mapping was removed. If
> + * this function is unable to immediately unmap the queue from the port, it
> + * will add the requested operation to a per-port list of pending map/unmap
> + * operations, and (if it's not already running) launch a kernel thread that
> + * periodically attempts to process all pending operations. See
> + * dlb_hw_map_qid() for more details.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
> + *         the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_unmap_qid(struct dlb_hw *hw,
> +                    u32 domain_id,
> +                    struct dlb_unmap_qid_args *args,
> +                    struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_finish_unmap_qid_procedures() - finish any pending unmap procedures
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function attempts to finish any outstanding unmap procedures.
> + * This function should be called by the kernel thread responsible for
> + * finishing map/unmap procedures.
> + *
> + * Return:
> + * Returns the number of procedures that weren't completed.
> + */
> +unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw);
> +
> +/**
> + * dlb_finish_map_qid_procedures() - finish any pending map procedures
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function attempts to finish any outstanding map procedures.
> + * This function should be called by the kernel thread responsible for
> + * finishing map/unmap procedures.
> + *
> + * Return:
> + * Returns the number of procedures that weren't completed.
> + */
> +unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_enable_ldb_port() - enable a load-balanced port for scheduling
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port enable arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to schedule QEs to a load-balanced port.
> + * Ports are enabled by default.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid or the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_enable_ldb_port(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_enable_ldb_port_args *args,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_disable_ldb_port() - disable a load-balanced port for scheduling
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port disable arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to stop scheduling QEs to a load-balanced
> + * port. Ports are enabled by default.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid or the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_disable_ldb_port(struct dlb_hw *hw,
> +                           u32 domain_id,
> +                           struct dlb_disable_ldb_port_args *args,
> +                           struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_enable_dir_port() - enable a directed port for scheduling
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port enable arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to schedule QEs to a directed port.
> + * Ports are enabled by default.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid or the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_enable_dir_port(struct dlb_hw *hw,
> +                          u32 domain_id,
> +                          struct dlb_enable_dir_port_args *args,
> +                          struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_disable_dir_port() - disable a directed port for scheduling
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: port disable arguments.
> + * @resp: response structure.
> + *
> + * This function configures the DLB to stop scheduling QEs to a directed port.
> + * Ports are enabled by default.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid or the domain is not configured.
> + * EFAULT - Internal error (resp->status not set).
> + */
> +int dlb_hw_disable_dir_port(struct dlb_hw *hw,
> +                           u32 domain_id,
> +                           struct dlb_disable_dir_port_args *args,
> +                           struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_configure_ldb_cq_interrupt() - configure load-balanced CQ for interrupts
> + * @hw: dlb_hw handle for a particular device.
> + * @port_id: load-balancd port ID.
> + * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
> + *         else a value up to 64.
> + * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
> + * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
> + *     greater than 0.
> + *
> + * This function configures the DLB registers for load-balanced CQ's interrupts.
> + * This doesn't enable the CQ's interrupt; that can be done with
> + * dlb_arm_cq_interrupt() or through an interrupt arm QE.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid.
> + */
> +int dlb_configure_ldb_cq_interrupt(struct dlb_hw *hw,
> +                                  int port_id,
> +                                  int vector,
> +                                  int mode,
> +                                  u16 threshold);
> +
> +/**
> + * dlb_configure_dir_cq_interrupt() - configure directed CQ for interrupts
> + * @hw: dlb_hw handle for a particular device.
> + * @port_id: load-balancd port ID.
> + * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
> + *         else a value up to 64.
> + * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
> + * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
> + *     greater than 0.
> + *
> + * This function configures the DLB registers for directed CQ's interrupts.
> + * This doesn't enable the CQ's interrupt; that can be done with
> + * dlb_arm_cq_interrupt() or through an interrupt arm QE.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise.
> + *
> + * Errors:
> + * EINVAL - The port ID is invalid.
> + */
> +int dlb_configure_dir_cq_interrupt(struct dlb_hw *hw,
> +                                  int port_id,
> +                                  int vector,
> +                                  int mode,
> +                                  u16 threshold);
> +
> +/**
> + * dlb_enable_alarm_interrupts() - enable certain hardware alarm interrupts
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function configures the ingress error alarm. (Other alarms are enabled
> + * by default.)
> + */
> +void dlb_enable_alarm_interrupts(struct dlb_hw *hw);
> +
> +/**
> + * dlb_disable_alarm_interrupts() - disable certain hardware alarm interrupts
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function configures the ingress error alarm. (Other alarms are disabled
> + * by default.)
> + */
> +void dlb_disable_alarm_interrupts(struct dlb_hw *hw);
> +
> +/**
> + * dlb_set_msix_mode() - enable certain hardware alarm interrupts
> + * @hw: dlb_hw handle for a particular device.
> + * @mode: MSI-X mode (DLB_MSIX_MODE_PACKED or DLB_MSIX_MODE_COMPRESSED)
> + *
> + * This function configures the hardware to use either packed or compressed
> + * mode. This function should not be called if using MSI interrupts.
> + */
> +void dlb_set_msix_mode(struct dlb_hw *hw, int mode);
> +
> +/**
> + * dlb_arm_cq_interrupt() - arm a CQ's interrupt
> + * @hw: dlb_hw handle for a particular device.
> + * @port_id: port ID
> + * @is_ldb: true for load-balanced port, false for a directed port
> + *
> + * This function arms the CQ's interrupt. The CQ must be configured prior to
> + * calling this function.
> + *
> + * The function does no parameter validation; that is the caller's
> + * responsibility.
> + *
> + * Return: returns 0 upon success, <0 otherwise.
> + *
> + * EINVAL - Invalid port ID.
> + */
> +int dlb_arm_cq_interrupt(struct dlb_hw *hw, int port_id, bool is_ldb);
> +
> +/**
> + * dlb_read_compressed_cq_intr_status() - read compressed CQ interrupt status
> + * @hw: dlb_hw handle for a particular device.
> + * @ldb_interrupts: 2-entry array of u32 bitmaps
> + * @dir_interrupts: 4-entry array of u32 bitmaps
> + *
> + * This function can be called from a compressed CQ interrupt handler to
> + * determine which CQ interrupts have fired. The caller should take appropriate
> + * (such as waking threads blocked on a CQ's interrupt) then ack the interrupts
> + * with dlb_ack_compressed_cq_intr().
> + */
> +void dlb_read_compressed_cq_intr_status(struct dlb_hw *hw,
> +                                       u32 *ldb_interrupts,
> +                                       u32 *dir_interrupts);
> +
> +/**
> + * dlb_ack_compressed_cq_intr_status() - ack compressed CQ interrupts
> + * @hw: dlb_hw handle for a particular device.
> + * @ldb_interrupts: 2-entry array of u32 bitmaps
> + * @dir_interrupts: 4-entry array of u32 bitmaps
> + *
> + * This function ACKs compressed CQ interrupts. Its arguments should be the
> + * same ones passed to dlb_read_compressed_cq_intr_status().
> + */
> +void dlb_ack_compressed_cq_intr(struct dlb_hw *hw,
> +                               u32 *ldb_interrupts,
> +                               u32 *dir_interrupts);
> +
> +/**
> + * dlb_process_alarm_interrupt() - process an alarm interrupt
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function reads the alarm syndrome, logs its, and acks the interrupt.
> + * This function should be called from the alarm interrupt handler when
> + * interrupt vector DLB_INT_ALARM fires.
> + */
> +void dlb_process_alarm_interrupt(struct dlb_hw *hw);
> +
> +/**
> + * dlb_process_ingress_error_interrupt() - process ingress error interrupts
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function reads the alarm syndrome, logs it, notifies user-space, and
> + * acks the interrupt. This function should be called from the alarm interrupt
> + * handler when interrupt vector DLB_INT_INGRESS_ERROR fires.
> + */
> +void dlb_process_ingress_error_interrupt(struct dlb_hw *hw);
> +
> +/**
> + * dlb_get_group_sequence_numbers() - return a group's number of SNs per queue
> + * @hw: dlb_hw handle for a particular device.
> + * @group_id: sequence number group ID.
> + *
> + * This function returns the configured number of sequence numbers per queue
> + * for the specified group.
> + *
> + * Return:
> + * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
> + */
> +int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id);
> +
> +/**
> + * dlb_get_group_sequence_number_occupancy() - return a group's in-use slots
> + * @hw: dlb_hw handle for a particular device.
> + * @group_id: sequence number group ID.
> + *
> + * This function returns the group's number of in-use slots (i.e. load-balanced
> + * queues using the specified group).
> + *
> + * Return:
> + * Returns -EINVAL if group_id is invalid, else the group's occupancy.
> + */
> +int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
> +                                           unsigned int group_id);
> +
> +/**
> + * dlb_set_group_sequence_numbers() - assign a group's number of SNs per queue
> + * @hw: dlb_hw handle for a particular device.
> + * @group_id: sequence number group ID.
> + * @val: requested amount of sequence numbers per queue.
> + *
> + * This function configures the group's number of sequence numbers per queue.
> + * val can be a power-of-two between 32 and 1024, inclusive. This setting can
> + * be configured until the first ordered load-balanced queue is configured, at
> + * which point the configuration is locked.
> + *
> + * Return:
> + * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
> + * ordered queue is configured.
> + */
> +int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
> +                                  unsigned int group_id,
> +                                  unsigned long val);
> +
> +/**
> + * dlb_reset_domain() - reset a scheduling domain
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + *
> + * This function resets and frees a DLB scheduling domain and its associated
> + * resources.
> + *
> + * Pre-condition: the driver must ensure software has stopped sending QEs
> + * through this domain's producer ports before invoking this function, or
> + * undefined behavior will result.
> + *
> + * Return:
> + * Returns 0 upon success, -1 otherwise.
> + *
> + * EINVAL - Invalid domain ID, or the domain is not configured.
> + * EFAULT - Internal error. (Possibly caused if software is the pre-condition
> + *         is not met.)
> + * ETIMEDOUT - Hardware component didn't reset in the expected time.
> + */
> +int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id);
> +
> +/**
> + * dlb_ldb_port_owned_by_domain() - query whether a port is owned by a domain
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @port_id: port ID.
> + *
> + * This function returns whether a load-balanced port is owned by a specified
> + * domain.
> + *
> + * Return:
> + * Returns 0 if false, 1 if true, <0 otherwise.
> + *
> + * EINVAL - Invalid domain or port ID, or the domain is not configured.
> + */
> +int dlb_ldb_port_owned_by_domain(struct dlb_hw *hw,
> +                                u32 domain_id,
> +                                u32 port_id);
> +
> +/**
> + * dlb_dir_port_owned_by_domain() - query whether a port is owned by a domain
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @port_id: port ID.
> + *
> + * This function returns whether a directed port is owned by a specified
> + * domain.
> + *
> + * Return:
> + * Returns 0 if false, 1 if true, <0 otherwise.
> + *
> + * EINVAL - Invalid domain or port ID, or the domain is not configured.
> + */
> +int dlb_dir_port_owned_by_domain(struct dlb_hw *hw,
> +                                u32 domain_id,
> +                                u32 port_id);
> +
> +/**
> + * dlb_hw_get_num_resources() - query the PCI function's available resources
> + * @arg: pointer to resource counts.
> + *
> + * This function returns the number of available resources for the PF.
> + */
> +void dlb_hw_get_num_resources(struct dlb_hw *hw,
> +                             struct dlb_get_num_resources_args *arg);
> +
> +/**
> + * dlb_hw_get_num_used_resources() - query the PCI function's used resources
> + * @arg: pointer to resource counts.
> + *
> + * This function returns the number of resources in use by the PF. It fills in
> + * the fields that args points to, except the following:
> + * - max_contiguous_atomic_inflights
> + * - max_contiguous_hist_list_entries
> + * - max_contiguous_ldb_credits
> + * - max_contiguous_dir_credits
> + */
> +void dlb_hw_get_num_used_resources(struct dlb_hw *hw,
> +                                  struct dlb_get_num_resources_args *arg);
> +
> +/**
> + * dlb_disable_dp_vasr_feature() - disable directed pipe VAS reset hardware
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function disables certain hardware in the directed pipe,
> + * necessary to workaround a DLB VAS reset issue.
> + */
> +void dlb_disable_dp_vasr_feature(struct dlb_hw *hw);
> +
> +/**
> + * dlb_enable_excess_tokens_alarm() - enable interrupts for the excess token
> + * pop alarm
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function enables the PF ingress error alarm interrupt to fire when an
> + * excess token pop occurs.
> + */
> +void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw);
> +
> +/**
> + * dlb_disable_excess_tokens_alarm() - disable interrupts for the excess token
> + * pop alarm
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function disables the PF ingress error alarm interrupt to fire when an
> + * excess token pop occurs.
> + */
> +void dlb_disable_excess_tokens_alarm(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: queue depth args
> + *
> + * This function returns the depth of a load-balanced queue.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the depth.
> + *
> + * Errors:
> + * EINVAL - Invalid domain ID or queue ID.
> + */
> +int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
> +                              u32 domain_id,
> +                              struct dlb_get_ldb_queue_depth_args *args,
> +                              struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_get_dir_queue_depth() - returns the depth of a directed queue
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: queue depth args
> + *
> + * This function returns the depth of a directed queue.
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the depth.
> + *
> + * Errors:
> + * EINVAL - Invalid domain ID or queue ID.
> + */
> +int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
> +                              u32 domain_id,
> +                              struct dlb_get_dir_queue_depth_args *args,
> +                              struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_pending_port_unmaps() - returns the number of unmap operations in
> + *     progress for a load-balanced port.
> + * @hw: dlb_hw handle for a particular device.
> + * @domain_id: domain ID.
> + * @args: number of unmaps in progress args
> + *
> + * Return:
> + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
> + * assigned a detailed error code from enum dlb_error. If successful, resp->id
> + * contains the number of unmaps in progress.
> + *
> + * Errors:
> + * EINVAL - Invalid port ID.
> + */
> +int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
> +                              u32 domain_id,
> +                              struct dlb_pending_port_unmaps_args *args,
> +                              struct dlb_cmd_response *resp);
> +
> +/**
> + * dlb_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
> + *     ports.
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function must be called prior to configuring scheduling domains.
> + */
> +void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports
> + * @hw: dlb_hw handle for a particular device.
> + *
> + * This function must be called prior to configuring scheduling domains.
> + */
> +void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_set_qe_arbiter_weights() - program QE arbiter weights
> + * @hw: dlb_hw handle for a particular device.
> + * @weight: 8-entry array of arbiter weights.
> + *
> + * weight[N] programs priority N's weight. In cases where the 8 priorities are
> + * reduced to 4 bins, the mapping is:
> + * - weight[1] programs bin 0
> + * - weight[3] programs bin 1
> + * - weight[5] programs bin 2
> + * - weight[7] programs bin 3
> + */
> +void dlb_hw_set_qe_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
> +
> +/**
> + * dlb_hw_set_qid_arbiter_weights() - program QID arbiter weights
> + * @hw: dlb_hw handle for a particular device.
> + * @weight: 8-entry array of arbiter weights.
> + *
> + * weight[N] programs priority N's weight. In cases where the 8 priorities are
> + * reduced to 4 bins, the mapping is:
> + * - weight[1] programs bin 0
> + * - weight[3] programs bin 1
> + * - weight[5] programs bin 2
> + * - weight[7] programs bin 3
> + */
> +void dlb_hw_set_qid_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
> +
> +/**
> + * dlb_hw_enable_pp_sw_alarms() - enable out-of-credit alarm for all producer
> + * ports
> + * @hw: dlb_hw handle for a particular device.
> + */
> +void dlb_hw_enable_pp_sw_alarms(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_disable_pp_sw_alarms() - disable out-of-credit alarm for all producer
> + * ports
> + * @hw: dlb_hw handle for a particular device.
> + */
> +void dlb_hw_disable_pp_sw_alarms(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_disable_pf_to_vf_isr_pend_err() - disable alarm triggered by PF
> + *     access to VF's ISR pending register
> + * @hw: dlb_hw handle for a particular device.
> + */
> +void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw);
> +
> +/**
> + * dlb_hw_disable_vf_to_pf_isr_pend_err() - disable alarm triggered by VF
> + *     access to PF's ISR pending register
> + * @hw: dlb_hw handle for a particular device.
> + */
> +void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw);
> +
> +#endif /* __DLB_RESOURCE_H */
> diff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c
> new file mode 100644
> index 0000000..714ae5a
> --- /dev/null
> +++ b/drivers/event/dlb/pf/dlb_main.c
> @@ -0,0 +1,591 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#include <stdint.h>
> +#include <stdbool.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <assert.h>
> +#include <unistd.h>
> +#include <string.h>
> +
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +
> +#include "base/dlb_resource.h"
> +#include "base/dlb_osdep.h"
> +#include "base/dlb_regs.h"
> +#include "../dlb_priv.h"
> +#include "../dlb_inline_fns.h"
> +#include "../dlb_user.h"
> +#include "dlb_main.h"
> +
> +unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
> +
> +#define DLB_PCI_CFG_SPACE_SIZE 256
> +#define DLB_PCI_CAP_POINTER 0x34
> +#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
> +#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
> +#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
> +#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
> +#define DLB_PCI_EXT_CAP_ID_ERR 1
> +#define DLB_PCI_ERR_UNCOR_MASK 8
> +#define DLB_PCI_ERR_UNC_UNSUP  0x00100000
> +
> +#define DLB_PCI_EXP_DEVCTL 8
> +#define DLB_PCI_LNKCTL 16
> +#define DLB_PCI_SLTCTL 24
> +#define DLB_PCI_RTCTL 28
> +#define DLB_PCI_EXP_DEVCTL2 40
> +#define DLB_PCI_LNKCTL2 48
> +#define DLB_PCI_SLTCTL2 56
> +#define DLB_PCI_CMD 4
> +#define DLB_PCI_X_CMD 2
> +#define DLB_PCI_EXP_DEVSTA 10
> +#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
> +#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
> +#define DLB_PCI_PASID_CTRL 6
> +#define DLB_PCI_PASID_CAP 4
> +
> +#define DLB_PCI_CAP_ID_EXP       0x10
> +#define DLB_PCI_CAP_ID_MSIX      0x11
> +#define DLB_PCI_EXT_CAP_ID_PAS   0x1B
> +#define DLB_PCI_EXT_CAP_ID_PRI   0x13
> +#define DLB_PCI_EXT_CAP_ID_ACS   0xD
> +
> +#define DLB_PCI_PASID_CAP_EXEC          0x2
> +#define DLB_PCI_PASID_CAP_PRIV          0x4
> +#define DLB_PCI_PASID_CTRL_ENABLE       0x1
> +#define DLB_PCI_PRI_CTRL_ENABLE         0x1
> +#define DLB_PCI_PRI_ALLOC_REQ           0xC
> +#define DLB_PCI_PRI_CTRL                0x4
> +#define DLB_PCI_MSIX_FLAGS              0x2
> +#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000
> +#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000
> +#define DLB_PCI_ERR_ROOT_STATUS         0x30
> +#define DLB_PCI_ERR_COR_STATUS          0x10
> +#define DLB_PCI_ERR_UNCOR_STATUS        0x4
> +#define DLB_PCI_COMMAND_INTX_DISABLE    0x400
> +#define DLB_PCI_ACS_CAP                 0x4
> +#define DLB_PCI_ACS_CTRL                0x6
> +#define DLB_PCI_ACS_SV                  0x1
> +#define DLB_PCI_ACS_RR                  0x4
> +#define DLB_PCI_ACS_CR                  0x8
> +#define DLB_PCI_ACS_UF                  0x10
> +#define DLB_PCI_ACS_EC                  0x20
> +
> +static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
> +{
> +       uint32_t hdr;
> +       size_t sz;
> +       int pos;
> +
> +       pos = DLB_PCI_CFG_SPACE_SIZE;
> +       sz = sizeof(hdr);
> +
> +       while (pos > 0xFF) {
> +               if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
> +                       return -1;
> +
> +               if (DLB_PCI_EXT_CAP_ID(hdr) == id)
> +                       return pos;
> +
> +               pos = DLB_PCI_EXT_CAP_NEXT(hdr);
> +       }
> +
> +       return -1;
> +}
> +
> +static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
> +{
> +       uint8_t pos;
> +       int ret;
> +       uint16_t hdr;
> +
> +       ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
> +       pos &= 0xFC;
> +
> +       if (ret != 1)
> +               return -1;
> +
> +       while (pos > 0x3F) {
> +               ret = rte_pci_read_config(pdev, &hdr, 2, pos);
> +               if (ret != 2)
> +                       return -1;
> +
> +               if (DLB_PCI_CAP_ID(hdr) == id)
> +                       return pos;
> +
> +               if (DLB_PCI_CAP_ID(hdr) == 0xFF)
> +                       return -1;
> +
> +               pos = DLB_PCI_CAP_NEXT(hdr);
> +       }
> +
> +       return -1;
> +}
> +
> +static int dlb_mask_ur_err(struct rte_pci_device *pdev)
> +{
> +       uint32_t mask;
> +       size_t sz = sizeof(mask);
> +       int pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);
> +
> +       if (pos < 0) {
> +               printf("[%s()] failed to find the aer capability\n",
> +                      __func__);
> +               return pos;
> +       }
> +
> +       pos += DLB_PCI_ERR_UNCOR_MASK;
> +
> +       if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
> +               printf("[%s()] Failed to read uncorrectable error mask reg\n",
> +                      __func__);
> +               return -1;
> +       }
> +
> +       /* Mask Unsupported Request errors */
> +       mask |= DLB_PCI_ERR_UNC_UNSUP;
> +
> +       if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
> +               printf("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
> +                      __func__, pos);
> +               return -1;
> +       }
> +
> +       return 0;
> +}
> +
> +struct dlb_dev *
> +dlb_probe(struct rte_pci_device *pdev)
> +{
> +       struct dlb_dev *dlb_dev;
> +       int ret = 0;
> +
> +       DLB_INFO(dlb_dev, "probe\n");
> +
> +       dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
> +                            RTE_CACHE_LINE_SIZE);
> +
> +       if (dlb_dev == NULL) {
> +               ret = -ENOMEM;
> +               goto dlb_dev_malloc_fail;
> +       }
> +
> +       /* PCI Bus driver has already mapped bar space into process.
> +        * Save off our IO register and FUNC addresses.
> +        */
> +
> +       /* BAR 0 */
> +       if (pdev->mem_resource[0].addr == NULL) {
> +               DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
> +               ret = -EINVAL;
> +               goto pci_mmap_bad_addr;
> +       }
> +       dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
> +       dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
> +
> +       DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
> +                (void *)dlb_dev->hw.func_kva,
> +                (void *)dlb_dev->hw.func_phys_addr,
> +                pdev->mem_resource[0].len);
> +
> +       /* BAR 2 */
> +       if (pdev->mem_resource[2].addr == NULL) {
> +               DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
> +               ret = -EINVAL;
> +               goto pci_mmap_bad_addr;
> +       }
> +       dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
> +       dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
> +
> +       DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
> +                (void *)dlb_dev->hw.csr_kva,
> +                (void *)dlb_dev->hw.csr_phys_addr,
> +                pdev->mem_resource[2].len);
> +
> +       dlb_dev->pdev = pdev;
> +
> +       ret = dlb_pf_reset(dlb_dev);
> +       if (ret)
> +               goto dlb_reset_fail;
> +
> +       /* DLB incorrectly sends URs in response to certain messages. Mask UR
> +        * errors to prevent these from being propagated to the MCA.
> +        */
> +       ret = dlb_mask_ur_err(pdev);
> +       if (ret)
> +               goto mask_ur_err_fail;
> +
> +       ret = dlb_pf_init_driver_state(dlb_dev);
> +       if (ret)
> +               goto init_driver_state_fail;
> +
> +       ret = dlb_resource_init(&dlb_dev->hw);
> +       if (ret)
> +               goto resource_init_fail;
> +
> +       dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
> +
> +       dlb_pf_init_hardware(dlb_dev);
> +
> +       return dlb_dev;
> +
> +resource_init_fail:
> +       dlb_resource_free(&dlb_dev->hw);
> +init_driver_state_fail:
> +mask_ur_err_fail:
> +dlb_reset_fail:
> +pci_mmap_bad_addr:
> +       rte_free(dlb_dev);
> +dlb_dev_malloc_fail:
> +       rte_errno = ret;
> +       return NULL;
> +}
> +
> +int
> +dlb_pf_reset(struct dlb_dev *dlb_dev)
> +{
> +       int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
> +       uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
> +       uint16_t rt_ctl_word, pri_reqs_dword,  pri_ctrl_word;
> +       struct rte_pci_device *pdev = dlb_dev->pdev;
> +       uint16_t devsta_busy_word, devctl_word;
> +       int pcie_cap_offset, pri_cap_offset;
> +       uint16_t slt_word, slt_word2, cmd;
> +       int ret = 0, i = 0;
> +       uint32_t dword[16];
> +       off_t off;
> +
> +       /* Save PCI config state */
> +
> +       for (i = 0; i < 16; i++) {
> +               if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
> +                       return ret;
> +       }
> +
> +       pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
> +
> +       if (pcie_cap_offset < 0) {
> +               printf("[%s()] failed to find the pcie capability\n",
> +                      __func__);
> +               return pcie_cap_offset;
> +       }
> +
> +       off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
> +       if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
> +               dev_ctl_word = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_LNKCTL;
> +       if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
> +               lnk_word = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_SLTCTL;
> +       if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
> +               slt_word = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_RTCTL;
> +       if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
> +               rt_ctl_word = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
> +       if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
> +               dev_ctl2_word = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_LNKCTL2;
> +       if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
> +               lnk_word2 = 0;
> +
> +       off = pcie_cap_offset + DLB_PCI_SLTCTL2;
> +       if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
> +               slt_word2 = 0;
> +
> +       pri_cap_offset = dlb_pci_find_ext_capability(pdev,
> +                                                    DLB_PCI_EXT_CAP_ID_PRI);
> +       if (pri_cap_offset >= 0) {
> +               off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
> +               if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
> +                       pri_reqs_dword = 0;
> +       }
> +
> +       /* clear the PCI command register before issuing the FLR */
> +
> +       off = DLB_PCI_CMD;
> +       cmd = 0;
> +       if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> +               printf("[%s()] failed to write pci config space at offset %d\n",
> +                      __func__, (int)off);
> +               return -1;
> +       }
> +
> +       /* issue the FLR */
> +       for (wait_count = 0; wait_count < 4; wait_count++) {
> +               int sleep_time;
> +
> +               off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
> +               ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
> +               if (ret != 2) {
> +                       printf("[%s()] failed to read the pci device status\n",
> +                              __func__);
> +                       return ret;
> +               }
> +
> +               if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
> +                       break;
> +
> +               sleep_time = (1 << (wait_count)) * 100;
> +               rte_delay_ms(sleep_time);
> +       }
> +
> +       if (wait_count == 4) {
> +               printf("[%s()] wait for pci pending transactions timed out\n",
> +                      __func__);
> +               return -1;
> +       }
> +
> +       off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
> +       ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
> +       if (ret != 2) {
> +               printf("[%s()] failed to read the pcie device control\n",
> +                      __func__);
> +               return ret;
> +       }
> +
> +       devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
> +
> +       if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
> +               printf("[%s()] failed to write the pcie device control at offset %d\n",
> +                      __func__, (int)off);
> +               return -1;
> +       }
> +
> +       rte_delay_ms(100);
> +
> +       /* Restore PCI config state */
> +
> +       if (pcie_cap_offset >= 0) {
> +               off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
> +               if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write the pcie device control at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_LNKCTL;
> +               if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_SLTCTL;
> +               if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_RTCTL;
> +               if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
> +               if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_LNKCTL2;
> +               if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pcie_cap_offset + DLB_PCI_SLTCTL2;
> +               if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +       }
> +
> +       if (pri_cap_offset >= 0) {
> +               pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
> +
> +               off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
> +               if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = pri_cap_offset + DLB_PCI_PRI_CTRL;
> +               if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +       }
> +
> +       err_cap_offset = dlb_pci_find_ext_capability(pdev,
> +                                                    DLB_PCI_EXT_CAP_ID_ERR);
> +       if (err_cap_offset >= 0) {
> +               uint32_t tmp;
> +
> +               off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
> +               if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> +                       tmp = 0;
> +
> +               if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
> +               if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> +                       tmp = 0;
> +
> +               if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
> +               if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
> +                       tmp = 0;
> +
> +               if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +       }
> +
> +       for (i = 16; i > 0; i--) {
> +               off = (i - 1) * 4;
> +               if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +       }
> +
> +       off = DLB_PCI_CMD;
> +       if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> +               cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
> +               if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space\n",
> +                              __func__);
> +                       return -1;
> +               }
> +       }
> +
> +       msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
> +       if (msix_cap_offset >= 0) {
> +               off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
> +               if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> +                       cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
> +                       cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
> +                       if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> +                               printf("[%s()] failed to write msix flags\n",
> +                                      __func__);
> +                               return -1;
> +                       }
> +               }
> +
> +               off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
> +               if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
> +                       cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
> +                       if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
> +                               printf("[%s()] failed to write msix flags\n",
> +                                      __func__);
> +                               return -1;
> +                       }
> +               }
> +       }
> +
> +       acs_cap_offset = dlb_pci_find_ext_capability(pdev,
> +                                                    DLB_PCI_EXT_CAP_ID_ACS);
> +       if (acs_cap_offset >= 0) {
> +               uint16_t acs_cap, acs_ctrl, acs_mask;
> +               off = acs_cap_offset + DLB_PCI_ACS_CAP;
> +               if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
> +                       acs_cap = 0;
> +
> +               off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> +               if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
> +                       acs_ctrl = 0;
> +
> +               acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
> +               acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
> +               acs_ctrl |= (acs_cap & acs_mask);
> +
> +               if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +
> +               off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> +               if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
> +                       acs_ctrl = 0;
> +
> +               acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
> +               acs_ctrl &= ~acs_mask;
> +
> +               off = acs_cap_offset + DLB_PCI_ACS_CTRL;
> +               if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
> +                       printf("[%s()] failed to write pci config space at offset %d\n",
> +                              __func__, (int)off);
> +                       return -1;
> +               }
> +       }
> +
> +       return 0;
> +}
> +
> +/*******************************/
> +/****** Driver management ******/
> +/*******************************/
> +
> +int
> +dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
> +{
> +       if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B))
> +               dlb_dev->enqueue_four = dlb_movdir64b;
> +       else
> +               dlb_dev->enqueue_four = dlb_movntdq;
> +
> +       /* Initialize software state */
> +       rte_spinlock_init(&dlb_dev->resource_mutex);
> +       rte_spinlock_init(&dlb_dev->measurement_lock);
> +
> +       return 0;
> +}
> +
> +void
> +dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
> +{
> +       dlb_disable_dp_vasr_feature(&dlb_dev->hw);
> +
> +       dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
> +
> +       if (dlb_dev->revision >= DLB_REV_B0) {
> +               dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
> +               dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
> +       }
> +
> +       if (dlb_dev->revision >= DLB_REV_B0) {
> +               dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
> +               dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
> +       }
> +}
> diff --git a/drivers/event/dlb/pf/dlb_main.h b/drivers/event/dlb/pf/dlb_main.h
> new file mode 100644
> index 0000000..abe9c02
> --- /dev/null
> +++ b/drivers/event/dlb/pf/dlb_main.h
> @@ -0,0 +1,52 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#ifndef __DLB_MAIN_H
> +#define __DLB_MAIN_H
> +
> +#include <rte_debug.h>
> +#include <rte_log.h>
> +#include <rte_spinlock.h>
> +#include <rte_pci.h>
> +#include <rte_bus_pci.h>
> +
> +#ifndef PAGE_SIZE
> +#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> +#endif
> +
> +#include "base/dlb_hw_types.h"
> +#include "../dlb_user.h"
> +
> +#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
> +
> +struct dlb_dev {
> +       struct rte_pci_device *pdev;
> +       struct dlb_hw hw;
> +       /* struct list_head list; */
> +       struct device *dlb_device;
> +       /* The enqueue_four function enqueues four HCWs (one cache-line worth)
> +        * to the DLB, using whichever mechanism is supported by the platform
> +        * on which this driver is running.
> +        */
> +       void (*enqueue_four)(void *qe4, void *pp_addr);
> +       bool domain_reset_failed;
> +       /* The resource mutex serializes access to driver data structures and
> +        * hardware registers.
> +        */
> +       rte_spinlock_t resource_mutex;
> +       rte_spinlock_t measurement_lock;
> +       bool worker_launched;
> +       u8 revision;
> +};
> +
> +struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
> +void dlb_reset_done(struct dlb_dev *dlb_dev);
> +
> +/* pf_ops */
> +int dlb_pf_init_driver_state(struct dlb_dev *dev);
> +void dlb_pf_free_driver_state(struct dlb_dev *dev);
> +void dlb_pf_init_hardware(struct dlb_dev *dev);
> +int dlb_pf_reset(struct dlb_dev *dlb_dev);
> +
> +#endif /* __DLB_MAIN_H */
> diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
> new file mode 100644
> index 0000000..7fc85e9
> --- /dev/null
> +++ b/drivers/event/dlb/pf/dlb_pf.c
> @@ -0,0 +1,232 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2020 Intel Corporation
> + */
> +
> +#include <stdint.h>
> +#include <stdbool.h>
> +#include <stdio.h>
> +#include <sys/mman.h>
> +#include <sys/fcntl.h>
> +#include <sys/time.h>
> +#include <errno.h>
> +#include <assert.h>
> +#include <unistd.h>
> +#include <string.h>
> +#include <rte_debug.h>
> +#include <rte_log.h>
> +#include <rte_dev.h>
> +#include <rte_devargs.h>
> +#include <rte_mbuf.h>
> +#include <rte_ring.h>
> +#include <rte_errno.h>
> +#include <rte_kvargs.h>
> +#include <rte_malloc.h>
> +#include <rte_cycles.h>
> +#include <rte_io.h>
> +#include <rte_memory.h>
> +#include <rte_string_fns.h>
> +
> +#include "../dlb_priv.h"
> +#include "../dlb_iface.h"
> +#include "../dlb_inline_fns.h"
> +#include "dlb_main.h"
> +#include "base/dlb_hw_types.h"
> +#include "base/dlb_osdep.h"
> +#include "base/dlb_resource.h"
> +
> +static void
> +dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
> +{
> +       int i;
> +
> +       /* Addresses will be initialized at port create */
> +       for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
> +               /* First directed ports */
> +
> +               /* producer port */
> +               dlb_port[i][DLB_DIR].pp_addr = NULL;
> +
> +               /* popcount */
> +               dlb_port[i][DLB_DIR].ldb_popcount = NULL;
> +               dlb_port[i][DLB_DIR].dir_popcount = NULL;
> +
> +               /* consumer queue */
> +               dlb_port[i][DLB_DIR].cq_base = NULL;
> +               dlb_port[i][DLB_DIR].mmaped = true;
> +
> +               /* Now load balanced ports */
> +
> +               /* producer port */
> +               dlb_port[i][DLB_LDB].pp_addr = NULL;
> +
> +               /* popcount */
> +               dlb_port[i][DLB_LDB].ldb_popcount = NULL;
> +               dlb_port[i][DLB_LDB].dir_popcount = NULL;
> +
> +               /* consumer queue */
> +               dlb_port[i][DLB_LDB].cq_base = NULL;
> +               dlb_port[i][DLB_LDB].mmaped = true;
> +       }
> +}
> +
> +static int
> +dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
> +{
> +       RTE_SET_USED(handle);
> +       RTE_SET_USED(name);
> +
> +       return 0;
> +}
> +
> +static int
> +dlb_pf_get_device_version(struct dlb_hw_dev *handle,
> +                         uint8_t *revision)
> +{
> +       struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> +
> +       *revision = dlb_dev->revision;
> +
> +       return 0;
> +}
> +
> +static int
> +dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
> +                        struct dlb_get_num_resources_args *rsrcs)
> +{
> +       struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> +
> +       dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
> +
> +       return 0;
> +}
> +
> +static int
> +dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
> +                       enum dlb_cq_poll_modes *mode)
> +{
> +       struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> +
> +       if (dlb_dev->revision >= DLB_REV_B0)
> +               *mode = DLB_CQ_POLL_MODE_SPARSE;
> +       else
> +               *mode = DLB_CQ_POLL_MODE_STD;
> +
> +       return 0;
> +}
> +
> +static void
> +dlb_pf_iface_fn_ptrs_init(void)
> +{
> +       dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
> +       dlb_iface_open = dlb_pf_open;
> +       dlb_iface_get_device_version = dlb_pf_get_device_version;
> +       dlb_iface_get_num_resources = dlb_pf_get_num_resources;
> +       dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
> +}
> +
> +/* PCI DEV HOOKS */
> +static int
> +dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
> +{
> +       int ret = 0;
> +       struct rte_pci_device *pci_dev;
> +       struct dlb_devargs dlb_args = {
> +               .socket_id = rte_socket_id(),
> +               .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
> +               .num_dir_credits_override = -1,
> +               .defer_sched = 0,
> +               .num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
> +       };
> +       struct dlb_eventdev *dlb;
> +
> +       DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
> +                   eventdev->data->dev_id, eventdev->data->socket_id);
> +
> +       dlb_entry_points_init(eventdev);
> +
> +       dlb_pf_iface_fn_ptrs_init();
> +
> +       pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
> +
> +       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +               dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
> +
> +               /* Probe the DLB PF layer */
> +               dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
> +
> +               if (dlb->qm_instance.pf_dev == NULL) {
> +                       DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
> +                                   rte_errno);
> +                       ret = -rte_errno;
> +                       goto dlb_probe_failed;
> +               }
> +
> +               /* Were we invoked with runtime parameters? */
> +               if (pci_dev->device.devargs) {
> +                       ret = dlb_parse_params(pci_dev->device.devargs->args,
> +                                              pci_dev->device.devargs->name,
> +                                              &dlb_args);
> +                       if (ret) {
> +                               DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
> +                                           ret, rte_errno);
> +                               goto dlb_probe_failed;
> +                       }
> +               }
> +
> +               ret = dlb_primary_eventdev_probe(eventdev,
> +                                                EVDEV_DLB_NAME_PMD_STR,
> +                                                &dlb_args);
> +       } else {
> +               ret = dlb_secondary_eventdev_probe(eventdev,
> +                                                  EVDEV_DLB_NAME_PMD_STR);
> +       }
> +       if (ret)
> +               goto dlb_probe_failed;
> +
> +       DLB_LOG_INFO("DLB PF Probe success\n");
> +
> +       return 0;
> +
> +dlb_probe_failed:
> +
> +       DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
> +
> +       return ret;
> +}
> +
> +#define EVENTDEV_INTEL_VENDOR_ID 0x8086
> +
> +static const struct rte_pci_id pci_id_dlb_map[] = {
> +       {
> +               RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
> +                              DLB_PF_DEV_ID)
> +       },
> +       {
> +               .vendor_id = 0,
> +       },
> +};
> +
> +static int
> +event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
> +                   struct rte_pci_device *pci_dev)
> +{
> +       return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
> +               sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
> +               EVDEV_DLB_NAME_PMD_STR);
> +}
> +
> +static int
> +event_dlb_pci_remove(struct rte_pci_device *pci_dev)
> +{
> +       return rte_event_pmd_pci_remove(pci_dev, NULL);
> +}
> +
> +static struct rte_pci_driver pci_eventdev_dlb_pmd = {
> +       .id_table = pci_id_dlb_map,
> +       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
> +       .probe = event_dlb_pci_probe,
> +       .remove = event_dlb_pci_remove,
> +};
> +
> +RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
> +RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
> --
> 2.6.4
>
Eads, Gage Oct. 20, 2020, 8:06 p.m. UTC | #2
> -----Original Message-----
> From: McDaniel, Timothy <timothy.mcdaniel@intel.com>
> Sent: Saturday, October 17, 2020 2:04 PM
> To: Burakov, Anatoly <anatoly.burakov@intel.com>
> Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carrillo@intel.com>; Eads, Gage
> <gage.eads@intel.com>; Van Haaren, Harry <harry.van.haaren@intel.com>;
> jerinj@marvell.com
> Subject: [PATCH v5 06/22] event/dlb: add probe
> 
> The DLB hardware is a PCI device. This commit adds
> support for probe and other initialization. The
> dlb_iface.[ch] files implement a flexible interface
> that supports both the PF PMD and the bifurcated PMD.
> The bifurcated PMD will be released in a future
> patch set. Note that the flexible interface is only
> used for configuration, and is not used in the data
> path. The shared code is added in pf/base.
> Command line parameters are parsed at config time.
> 
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>

Looks like my suggestions were addressed, but I think Jerin's comments
in the dlb2 "add probe" patch are applicable here as well.

Thanks,
Gage

Patch
diff mbox series

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
new file mode 100644
index 0000000..fbec8f1
--- /dev/null
+++ b/drivers/event/dlb/dlb.c
@@ -0,0 +1,483 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <nmmintrin.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_io.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_ring.h>
+#include <rte_string_fns.h>
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+
+#include "dlb_priv.h"
+#include "dlb_iface.h"
+#include "dlb_inline_fns.h"
+
+/*
+ * Resources exposed to eventdev.
+ */
+#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
+#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
+#endif
+static struct rte_event_dev_info evdev_dlb_default_info = {
+	.driver_name = "", /* probe will set */
+	.min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
+	.max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
+#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
+	.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+#else
+	.max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
+#endif
+	.max_event_queue_flows = DLB_MAX_NUM_FLOWS,
+	.max_event_queue_priority_levels = DLB_QID_PRIORITIES,
+	.max_event_priority_levels = DLB_QID_PRIORITIES,
+	.max_event_ports = DLB_MAX_NUM_LDB_PORTS,
+	.max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
+	.max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
+	.max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
+	.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
+	.max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
+	.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+			  RTE_EVENT_DEV_CAP_EVENT_QOS |
+			  RTE_EVENT_DEV_CAP_BURST_MODE |
+			  RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
+			  RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
+};
+
+struct process_local_port_data
+dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
+
+static int
+dlb_hw_query_resources(struct dlb_eventdev *dlb)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_hw_resource_info *dlb_info = &handle->info;
+	int ret;
+
+	ret = dlb_iface_get_num_resources(handle,
+					  &dlb->hw_rsrc_query_results);
+	if (ret) {
+		DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
+		return ret;
+	}
+
+	/* Complete filling in device resource info returned to evdev app,
+	 * overriding any default values.
+	 * The capabilities (CAPs) were set at compile time.
+	 */
+
+	evdev_dlb_default_info.max_event_queues =
+		dlb->hw_rsrc_query_results.num_ldb_queues;
+
+	evdev_dlb_default_info.max_event_ports =
+		dlb->hw_rsrc_query_results.num_ldb_ports;
+
+	evdev_dlb_default_info.max_num_events =
+		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
+
+	/* Save off values used when creating the scheduling domain. */
+
+	handle->info.num_sched_domains =
+		dlb->hw_rsrc_query_results.num_sched_domains;
+
+	handle->info.hw_rsrc_max.nb_events_limit =
+		dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
+
+	handle->info.hw_rsrc_max.num_queues =
+		dlb->hw_rsrc_query_results.num_ldb_queues +
+		dlb->hw_rsrc_query_results.num_dir_ports;
+
+	handle->info.hw_rsrc_max.num_ldb_queues =
+		dlb->hw_rsrc_query_results.num_ldb_queues;
+
+	handle->info.hw_rsrc_max.num_ldb_ports =
+		dlb->hw_rsrc_query_results.num_ldb_ports;
+
+	handle->info.hw_rsrc_max.num_dir_ports =
+		dlb->hw_rsrc_query_results.num_dir_ports;
+
+	handle->info.hw_rsrc_max.reorder_window_size =
+		dlb->hw_rsrc_query_results.num_hist_list_entries;
+
+	rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
+
+	return 0;
+}
+
+/* Wrapper for string to int conversion. Substituted for atoi(...), which is
+ * unsafe.
+ */
+#define RTE_BASE_10 10
+
+static int
+dlb_string_to_int(int *result, const char *str)
+{
+	long ret;
+	char *endstr;
+
+	if (str == NULL || result == NULL)
+		return -EINVAL;
+
+	errno = 0;
+	ret = strtol(str, &endstr, RTE_BASE_10);
+	if (errno)
+		return -errno;
+
+	/* long int and int may be different width for some architectures */
+	if (ret < INT_MIN || ret > INT_MAX || endstr == str)
+		return -EINVAL;
+
+	*result = ret;
+	return 0;
+}
+
+static int
+set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+	int *socket_id = opaque;
+	int ret;
+
+	ret = dlb_string_to_int(socket_id, value);
+	if (ret < 0)
+		return ret;
+
+	if (*socket_id > RTE_MAX_NUMA_NODES)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+set_max_num_events(const char *key __rte_unused,
+		   const char *value,
+		   void *opaque)
+{
+	int *max_num_events = opaque;
+	int ret;
+
+	if (value == NULL || opaque == NULL) {
+		DLB_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	ret = dlb_string_to_int(max_num_events, value);
+	if (ret < 0)
+		return ret;
+
+	if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
+		DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
+			    DLB_MAX_NUM_LDB_CREDITS);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+set_num_dir_credits(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int *num_dir_credits = opaque;
+	int ret;
+
+	if (value == NULL || opaque == NULL) {
+		DLB_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	ret = dlb_string_to_int(num_dir_credits, value);
+	if (ret < 0)
+		return ret;
+
+	if (*num_dir_credits < 0 ||
+	    *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
+		DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
+			    DLB_MAX_NUM_DIR_CREDITS);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+set_dev_id(const char *key __rte_unused,
+	   const char *value,
+	   void *opaque)
+{
+	int *dev_id = opaque;
+	int ret;
+
+	if (value == NULL || opaque == NULL) {
+		DLB_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	ret = dlb_string_to_int(dev_id, value);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int
+set_defer_sched(const char *key __rte_unused,
+		const char *value,
+		void *opaque)
+{
+	int *defer_sched = opaque;
+
+	if (value == NULL || opaque == NULL) {
+		DLB_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	if (strncmp(value, "on", 2) != 0) {
+		DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
+			    value);
+		return -EINVAL;
+	}
+
+	*defer_sched = 1;
+
+	return 0;
+}
+
+static int
+set_num_atm_inflights(const char *key __rte_unused,
+		      const char *value,
+		      void *opaque)
+{
+	int *num_atm_inflights = opaque;
+	int ret;
+
+	if (value == NULL || opaque == NULL) {
+		DLB_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	ret = dlb_string_to_int(num_atm_inflights, value);
+	if (ret < 0)
+		return ret;
+
+	if (*num_atm_inflights < 0 ||
+	    *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
+		DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
+			    DLB_MAX_NUM_ATM_INFLIGHTS);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void
+dlb_entry_points_init(struct rte_eventdev *dev)
+{
+	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
+	};
+
+	/* Expose PMD's eventdev interface */
+	dev->dev_ops = &dlb_eventdev_entry_ops;
+}
+
+int
+dlb_primary_eventdev_probe(struct rte_eventdev *dev,
+			   const char *name,
+			   struct dlb_devargs *dlb_args)
+{
+	struct dlb_eventdev *dlb;
+	int err;
+
+	dlb = dev->data->dev_private;
+
+	dlb->event_dev = dev; /* backlink */
+
+	evdev_dlb_default_info.driver_name = name;
+
+	dlb->max_num_events_override = dlb_args->max_num_events;
+	dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
+	dlb->defer_sched = dlb_args->defer_sched;
+	dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
+
+	/* Open the interface.
+	 * For vdev mode, this means open the dlb kernel module.
+	 */
+	err = dlb_iface_open(&dlb->qm_instance, name);
+	if (err < 0) {
+		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
+			    err);
+		return err;
+	}
+
+	err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
+	if (err < 0) {
+		DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
+			    err);
+		return err;
+	}
+
+	err = dlb_hw_query_resources(dlb);
+	if (err) {
+		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
+		return err;
+	}
+
+	err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
+	if (err < 0) {
+		DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
+		return err;
+	}
+
+	rte_spinlock_init(&dlb->qm_instance.resource_lock);
+
+	dlb_iface_low_level_io_init(dlb);
+
+	dlb_entry_points_init(dev);
+
+	return 0;
+}
+
+int
+dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
+			     const char *name)
+{
+	struct dlb_eventdev *dlb;
+	int err;
+
+	dlb = dev->data->dev_private;
+
+	evdev_dlb_default_info.driver_name = name;
+
+	err = dlb_iface_open(&dlb->qm_instance, name);
+	if (err < 0) {
+		DLB_LOG_ERR("could not open event hardware device, err=%d\n",
+			    err);
+		return err;
+	}
+
+	err = dlb_hw_query_resources(dlb);
+	if (err) {
+		DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
+		return err;
+	}
+
+	dlb_iface_low_level_io_init(dlb);
+
+	dlb_entry_points_init(dev);
+
+	return 0;
+}
+
+int
+dlb_parse_params(const char *params,
+		 const char *name,
+		 struct dlb_devargs *dlb_args)
+{
+	int ret = 0;
+	static const char * const args[] = { NUMA_NODE_ARG,
+					     DLB_MAX_NUM_EVENTS,
+					     DLB_NUM_DIR_CREDITS,
+					     DEV_ID_ARG,
+					     DLB_DEFER_SCHED_ARG,
+					     DLB_NUM_ATM_INFLIGHTS_ARG,
+					     NULL };
+
+	if (params && params[0] != '\0') {
+		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+		if (kvlist == NULL) {
+			DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
+				     name);
+		} else {
+			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+						     set_numa_node,
+						     &dlb_args->socket_id);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing numa node parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
+						 set_max_num_events,
+						 &dlb_args->max_num_events);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist,
+					DLB_NUM_DIR_CREDITS,
+					set_num_dir_credits,
+					&dlb_args->num_dir_credits_override);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
+						 set_dev_id,
+						 &dlb_args->dev_id);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing dev_id parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
+						 set_defer_sched,
+						 &dlb_args->defer_sched);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			ret = rte_kvargs_process(kvlist,
+						 DLB_NUM_ATM_INFLIGHTS_ARG,
+						 set_num_atm_inflights,
+						 &dlb_args->num_atm_inflights);
+			if (ret != 0) {
+				DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
+					    name);
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+
+			rte_kvargs_free(kvlist);
+		}
+	}
+	return ret;
+}
+RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
new file mode 100644
index 0000000..dd72120
--- /dev/null
+++ b/drivers/event/dlb/dlb_iface.c
@@ -0,0 +1,27 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+
+#include "dlb_priv.h"
+
+/* DLB PMD Internal interface function pointers.
+ * If VDEV (bifurcated PMD),  these will resolve to functions that issue ioctls
+ * serviced by DLB kernel module.
+ * If PCI (PF PMD),  these will be implemented locally in user mode.
+ */
+
+void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
+
+int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
+
+int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
+				    uint8_t *revision);
+
+int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
+				   struct dlb_get_num_resources_args *rsrcs);
+
+int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
+				  enum dlb_cq_poll_modes *mode);
+
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
new file mode 100644
index 0000000..416d1b3
--- /dev/null
+++ b/drivers/event/dlb/dlb_iface.h
@@ -0,0 +1,27 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef _DLB_IFACE_H
+#define _DLB_IFACE_H
+
+/* DLB PMD Internal interface function pointers.
+ * If VDEV (bifurcated PMD), these will resolve to functions that issue ioctls
+ * serviced by DLB kernel module.
+ * If PCI (PF PMD), these will be implemented locally in user mode.
+ */
+
+extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
+
+extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
+
+extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
+					   uint8_t *revision);
+
+extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
+				   struct dlb_get_num_resources_args *rsrcs);
+
+extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
+					 enum dlb_cq_poll_modes *mode);
+
+#endif /* _DLB_IFACE_H */
diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
index f695abf..892d55f 100644
--- a/drivers/event/dlb/dlb_priv.h
+++ b/drivers/event/dlb/dlb_priv.h
@@ -505,4 +505,6 @@  int dlb_parse_params(const char *params,
 		     const char *name,
 		     struct dlb_devargs *dlb_args);
 
+void dlb_entry_points_init(struct rte_eventdev *dev);
+
 #endif	/* _DLB_PRIV_H_ */
diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
index 54ba2c8..414b3ed 100644
--- a/drivers/event/dlb/meson.build
+++ b/drivers/event/dlb/meson.build
@@ -1,7 +1,11 @@ 
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2019-2020 Intel Corporation
 
-sources = files(
+sources = files('dlb.c',
+		'dlb_iface.c',
+		'pf/dlb_main.c',
+		'pf/dlb_pf.c',
+		'pf/base/dlb_resource.c'
 )
 
 deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
diff --git a/drivers/event/dlb/pf/base/dlb_hw_types.h b/drivers/event/dlb/pf/base/dlb_hw_types.h
new file mode 100644
index 0000000..87b83f8
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_hw_types.h
@@ -0,0 +1,334 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_HW_TYPES_H
+#define __DLB_HW_TYPES_H
+
+#include "../../dlb_user.h"
+#include "dlb_osdep_types.h"
+#include "dlb_osdep_list.h"
+
+#define DLB_MAX_NUM_DOMAINS 32
+#define DLB_MAX_NUM_LDB_QUEUES 128
+#define DLB_MAX_NUM_LDB_PORTS 64
+#define DLB_MAX_NUM_DIR_PORTS 128
+#define DLB_MAX_NUM_LDB_CREDITS 16384
+#define DLB_MAX_NUM_DIR_CREDITS 4096
+#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
+#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
+#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
+#define DLB_MAX_NUM_AQOS_ENTRIES 2048
+#define DLB_MAX_NUM_TOTAL_OUTSTANDING_COMPLETIONS 4096
+#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
+#define DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS 4
+#define DLB_MAX_NUM_SEQUENCE_NUMBER_MODES 6
+#define DLB_QID_PRIORITIES 8
+#define DLB_NUM_ARB_WEIGHTS 8
+#define DLB_MAX_WEIGHT 255
+#define DLB_MAX_PORT_CREDIT_QUANTUM 1023
+#define DLB_MAX_CQ_COMP_CHECK_LOOPS 409600
+#define DLB_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
+#define DLB_HZ 800000000
+
+/* Used for DLB A-stepping workaround for hardware write buffer lock up issue */
+#define DLB_A_STEP_MAX_PORTS 128
+
+#define DLB_PF_DEV_ID 0x270B
+
+/* Interrupt related macros */
+#define DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8
+#define DLB_PF_NUM_CQ_INTERRUPT_VECTORS	 64
+#define DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS \
+	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
+	 DLB_PF_NUM_CQ_INTERRUPT_VECTORS)
+#define DLB_PF_NUM_COMPRESSED_MODE_VECTORS \
+	(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
+#define DLB_PF_NUM_PACKED_MODE_VECTORS	 DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS
+#define DLB_PF_COMPRESSED_MODE_CQ_VECTOR_ID DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS
+
+#define DLB_PF_NUM_ALARM_INTERRUPT_VECTORS 4
+#define DLB_INT_ALARM 0
+#define DLB_INT_INGRESS_ERROR 3
+
+#define DLB_ALARM_HW_SOURCE_SYS 0
+#define DLB_ALARM_HW_SOURCE_DLB 1
+
+#define DLB_ALARM_HW_UNIT_CHP 1
+#define DLB_ALARM_HW_UNIT_LSP 3
+
+#define DLB_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6
+#define DLB_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7
+#define DLB_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15
+#define DLB_ALARM_SYS_AID_ILLEGAL_HCW 0
+#define DLB_ALARM_SYS_AID_ILLEGAL_QID 3
+#define DLB_ALARM_SYS_AID_DISABLED_QID 4
+#define DLB_ALARM_SYS_AID_ILLEGAL_CQID 6
+
+/* Hardware-defined base addresses */
+#define DLB_LDB_PP_BASE 0x2100000
+#define DLB_LDB_PP_STRIDE 0x1000
+#define DLB_LDB_PP_BOUND \
+	(DLB_LDB_PP_BASE + DLB_LDB_PP_STRIDE * DLB_MAX_NUM_LDB_PORTS)
+#define DLB_DIR_PP_BASE 0x2000000
+#define DLB_DIR_PP_STRIDE 0x1000
+#define DLB_DIR_PP_BOUND \
+	(DLB_DIR_PP_BASE + DLB_DIR_PP_STRIDE * DLB_MAX_NUM_DIR_PORTS)
+
+struct dlb_freelist {
+	u32 base;
+	u32 bound;
+	u32 offset;
+};
+
+static inline u32 dlb_freelist_count(struct dlb_freelist *list)
+{
+	return (list->bound - list->base) - list->offset;
+}
+
+struct dlb_hcw {
+	u64 data;
+	/* Word 3 */
+	u16 opaque;
+	u8 qid;
+	u8 sched_type:2;
+	u8 priority:3;
+	u8 msg_type:3;
+	/* Word 4 */
+	u16 lock_id;
+	u8 meas_lat:1;
+	u8 rsvd1:2;
+	u8 no_dec:1;
+	u8 cmp_id:4;
+	u8 cq_token:1;
+	u8 qe_comp:1;
+	u8 qe_frag:1;
+	u8 qe_valid:1;
+	u8 int_arm:1;
+	u8 error:1;
+	u8 rsvd:2;
+};
+
+struct dlb_ldb_queue {
+	struct dlb_list_entry domain_list;
+	struct dlb_list_entry func_list;
+	u32 id;
+	u32 domain_id;
+	u32 num_qid_inflights;
+	struct dlb_freelist aqed_freelist;
+	u8 sn_cfg_valid;
+	u32 sn_group;
+	u32 sn_slot;
+	u32 num_mappings;
+	u8 num_pending_additions;
+	u8 owned;
+	u8 configured;
+};
+
+/* Directed ports and queues are paired by nature, so the driver tracks them
+ * with a single data structure.
+ */
+struct dlb_dir_pq_pair {
+	struct dlb_list_entry domain_list;
+	struct dlb_list_entry func_list;
+	u32 id;
+	u32 domain_id;
+	u8 ldb_pool_used;
+	u8 dir_pool_used;
+	u8 queue_configured;
+	u8 port_configured;
+	u8 owned;
+	u8 enabled;
+	u32 ref_cnt;
+};
+
+enum dlb_qid_map_state {
+	/* The slot doesn't contain a valid queue mapping */
+	DLB_QUEUE_UNMAPPED,
+	/* The slot contains a valid queue mapping */
+	DLB_QUEUE_MAPPED,
+	/* The driver is mapping a queue into this slot */
+	DLB_QUEUE_MAP_IN_PROGRESS,
+	/* The driver is unmapping a queue from this slot */
+	DLB_QUEUE_UNMAP_IN_PROGRESS,
+	/* The driver is unmapping a queue from this slot, and once complete
+	 * will replace it with another mapping.
+	 */
+	DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP,
+};
+
+struct dlb_ldb_port_qid_map {
+	u16 qid;
+	u8 priority;
+	u16 pending_qid;
+	u8 pending_priority;
+	enum dlb_qid_map_state state;
+};
+
+struct dlb_ldb_port {
+	struct dlb_list_entry domain_list;
+	struct dlb_list_entry func_list;
+	u32 id;
+	u32 domain_id;
+	u8 ldb_pool_used;
+	u8 dir_pool_used;
+	u8 init_tkn_cnt;
+	u32 hist_list_entry_base;
+	u32 hist_list_entry_limit;
+	/* The qid_map represents the hardware QID mapping state. */
+	struct dlb_ldb_port_qid_map qid_map[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
+	u32 ref_cnt;
+	u8 num_pending_removals;
+	u8 num_mappings;
+	u8 owned;
+	u8 enabled;
+	u8 configured;
+};
+
+struct dlb_credit_pool {
+	struct dlb_list_entry domain_list;
+	struct dlb_list_entry func_list;
+	u32 id;
+	u32 domain_id;
+	u32 total_credits;
+	u32 avail_credits;
+	u8 owned;
+	u8 configured;
+};
+
+struct dlb_sn_group {
+	u32 mode;
+	u32 sequence_numbers_per_queue;
+	u32 slot_use_bitmap;
+	u32 id;
+};
+
+static inline bool dlb_sn_group_full(struct dlb_sn_group *group)
+{
+	u32 mask[6] = {
+		0xffffffff,  /* 32 SNs per queue */
+		0x0000ffff,  /* 64 SNs per queue */
+		0x000000ff,  /* 128 SNs per queue */
+		0x0000000f,  /* 256 SNs per queue */
+		0x00000003,  /* 512 SNs per queue */
+		0x00000001}; /* 1024 SNs per queue */
+
+	return group->slot_use_bitmap == mask[group->mode];
+}
+
+static inline int dlb_sn_group_alloc_slot(struct dlb_sn_group *group)
+{
+	int bound[6] = {32, 16, 8, 4, 2, 1};
+	int i;
+
+	for (i = 0; i < bound[group->mode]; i++) {
+		if (!(group->slot_use_bitmap & (1 << i))) {
+			group->slot_use_bitmap |= 1 << i;
+			return i;
+		}
+	}
+
+	return -1;
+}
+
+static inline void dlb_sn_group_free_slot(struct dlb_sn_group *group, int slot)
+{
+	group->slot_use_bitmap &= ~(1 << slot);
+}
+
+static inline int dlb_sn_group_used_slots(struct dlb_sn_group *group)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < 32; i++)
+		cnt += !!(group->slot_use_bitmap & (1 << i));
+
+	return cnt;
+}
+
+struct dlb_domain {
+	struct dlb_function_resources *parent_func;
+	struct dlb_list_entry func_list;
+	struct dlb_list_head used_ldb_queues;
+	struct dlb_list_head used_ldb_ports;
+	struct dlb_list_head used_dir_pq_pairs;
+	struct dlb_list_head used_ldb_credit_pools;
+	struct dlb_list_head used_dir_credit_pools;
+	struct dlb_list_head avail_ldb_queues;
+	struct dlb_list_head avail_ldb_ports;
+	struct dlb_list_head avail_dir_pq_pairs;
+	struct dlb_list_head avail_ldb_credit_pools;
+	struct dlb_list_head avail_dir_credit_pools;
+	u32 total_hist_list_entries;
+	u32 avail_hist_list_entries;
+	u32 hist_list_entry_base;
+	u32 hist_list_entry_offset;
+	struct dlb_freelist qed_freelist;
+	struct dlb_freelist dqed_freelist;
+	struct dlb_freelist aqed_freelist;
+	u32 id;
+	int num_pending_removals;
+	int num_pending_additions;
+	u8 configured;
+	u8 started;
+};
+
+struct dlb_bitmap;
+
+struct dlb_function_resources {
+	u32 num_avail_domains;
+	struct dlb_list_head avail_domains;
+	struct dlb_list_head used_domains;
+	u32 num_avail_ldb_queues;
+	struct dlb_list_head avail_ldb_queues;
+	u32 num_avail_ldb_ports;
+	struct dlb_list_head avail_ldb_ports;
+	u32 num_avail_dir_pq_pairs;
+	struct dlb_list_head avail_dir_pq_pairs;
+	struct dlb_bitmap *avail_hist_list_entries;
+	struct dlb_bitmap *avail_qed_freelist_entries;
+	struct dlb_bitmap *avail_dqed_freelist_entries;
+	struct dlb_bitmap *avail_aqed_freelist_entries;
+	u32 num_avail_ldb_credit_pools;
+	struct dlb_list_head avail_ldb_credit_pools;
+	u32 num_avail_dir_credit_pools;
+	struct dlb_list_head avail_dir_credit_pools;
+	u32 num_enabled_ldb_ports;
+};
+
+/* After initialization, each resource in dlb_hw_resources is located in one of
+ * the following lists:
+ * -- The PF's available resources list. These are unconfigured resources owned
+ *	by the PF and not allocated to a DLB scheduling domain.
+ * -- A domain's available resources list. These are domain-owned unconfigured
+ *	resources.
+ * -- A domain's used resources list. These are are domain-owned configured
+ *	resources.
+ *
+ * A resource moves to a new list when a domain is created or destroyed, or
+ * when the resource is configured.
+ */
+struct dlb_hw_resources {
+	struct dlb_ldb_queue ldb_queues[DLB_MAX_NUM_LDB_QUEUES];
+	struct dlb_ldb_port ldb_ports[DLB_MAX_NUM_LDB_PORTS];
+	struct dlb_dir_pq_pair dir_pq_pairs[DLB_MAX_NUM_DIR_PORTS];
+	struct dlb_credit_pool ldb_credit_pools[DLB_MAX_NUM_LDB_CREDIT_POOLS];
+	struct dlb_credit_pool dir_credit_pools[DLB_MAX_NUM_DIR_CREDIT_POOLS];
+	struct dlb_sn_group sn_groups[DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
+};
+
+struct dlb_hw {
+	/* BAR 0 address */
+	void  *csr_kva;
+	unsigned long csr_phys_addr;
+	/* BAR 2 address */
+	void  *func_kva;
+	unsigned long func_phys_addr;
+
+	/* Resource tracking */
+	struct dlb_hw_resources rsrcs;
+	struct dlb_function_resources pf;
+	struct dlb_domain domains[DLB_MAX_NUM_DOMAINS];
+};
+
+#endif /* __DLB_HW_TYPES_H */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep.h b/drivers/event/dlb/pf/base/dlb_osdep.h
new file mode 100644
index 0000000..a6eef2f
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_osdep.h
@@ -0,0 +1,326 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_OSDEP_H__
+#define __DLB_OSDEP_H__
+
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <cpuid.h>
+#include <pthread.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+#include "../dlb_main.h"
+#include "dlb_resource.h"
+#include "../../dlb_log.h"
+#include "../../dlb_user.h"
+
+
+#define DLB_PCI_REG_READ(reg)        rte_read32((void *)reg)
+#define DLB_PCI_REG_WRITE(reg, val)   rte_write32(val, (void *)reg)
+
+#define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
+#define DLB_CSR_RD(hw, reg) \
+	DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
+#define DLB_CSR_WR(hw, reg, val) \
+	DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
+
+#define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
+#define DLB_FUNC_RD(hw, reg) \
+	DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
+#define DLB_FUNC_WR(hw, reg, val) \
+	DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
+
+extern unsigned int dlb_unregister_timeout_s;
+/**
+ * os_queue_unregister_timeout_s() - timeout (in seconds) to wait for queue
+ *                                   unregister acknowledgments.
+ */
+static inline unsigned int os_queue_unregister_timeout_s(void)
+{
+	return dlb_unregister_timeout_s;
+}
+
+static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
+{
+	return rte_strlcpy(dst, src, sz);
+}
+
+/**
+ * os_udelay() - busy-wait for a number of microseconds
+ * @usecs: delay duration.
+ */
+static inline void os_udelay(int usecs)
+{
+	rte_delay_us(usecs);
+}
+
+/**
+ * os_msleep() - sleep for a number of milliseconds
+ * @usecs: delay duration.
+ */
+
+static inline void os_msleep(int msecs)
+{
+	rte_delay_ms(msecs);
+}
+
+#define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE : DLB_DIR_PP_BASE)
+/**
+ * os_map_producer_port() - map a producer port into the caller's address space
+ * @hw: dlb_hw handle for a particular device.
+ * @port_id: port ID
+ * @is_ldb: true for load-balanced port, false for a directed port
+ *
+ * This function maps the requested producer port memory into the caller's
+ * address space.
+ *
+ * Return:
+ * Returns the base address at which the PP memory was mapped, else NULL.
+ */
+static inline void *os_map_producer_port(struct dlb_hw *hw,
+					 u8 port_id,
+					 bool is_ldb)
+{
+	uint64_t addr;
+	uint64_t pp_dma_base;
+
+
+	pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
+	addr = (pp_dma_base + (PAGE_SIZE * port_id));
+
+	return (void *)(uintptr_t)addr;
+
+}
+/**
+ * os_unmap_producer_port() - unmap a producer port
+ * @addr: mapped producer port address
+ *
+ * This function undoes os_map_producer_port() by unmapping the producer port
+ * memory from the caller's address space.
+ *
+ * Return:
+ * Returns the base address at which the PP memory was mapped, else NULL.
+ */
+
+/* PFPMD - Nothing to do here, since memory was not actually mapped by us */
+static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
+{
+	RTE_SET_USED(hw);
+	RTE_SET_USED(addr);
+}
+/**
+ * os_enqueue_four_hcws() - enqueue four HCWs to DLB
+ * @hw: dlb_hw handle for a particular device.
+ * @hcw: pointer to the 64B-aligned contiguous HCW memory
+ * @addr: producer port address
+ */
+static inline void os_enqueue_four_hcws(struct dlb_hw *hw,
+					struct dlb_hcw *hcw,
+					void *addr)
+{
+	struct dlb_dev *dlb_dev;
+
+	dlb_dev = container_of(hw, struct dlb_dev, hw);
+
+	dlb_dev->enqueue_four(hcw, addr);
+}
+
+/**
+ * os_fence_hcw() - fence an HCW to ensure it arrives at the device
+ * @hw: dlb_hw handle for a particular device.
+ * @pp_addr: producer port address
+ */
+static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
+{
+	RTE_SET_USED(hw);
+
+	/* To ensure outstanding HCWs reach the device, read the PP address. IA
+	 * memory ordering prevents reads from passing older writes, and the
+	 * mfence also ensures this.
+	 */
+	rte_mb();
+
+	*(volatile u64 *)pp_addr;
+}
+
+/* Map to PMDs logging interface */
+#define DLB_ERR(dev, fmt, args...) \
+	DLB_LOG_ERR(fmt, ## args)
+
+#define DLB_INFO(dev, fmt, args...) \
+	DLB_LOG_INFO(fmt, ## args)
+
+#define DLB_DEBUG(dev, fmt, args...) \
+	DLB_LOG_DEBUG(fmt, ## args)
+
+/**
+ * DLB_HW_ERR() - log an error message
+ * @dlb: dlb_hw handle for a particular device.
+ * @...: variable string args.
+ */
+#define DLB_HW_ERR(dlb, ...) do {	\
+	RTE_SET_USED(dlb);		\
+	DLB_ERR(dlb, __VA_ARGS__);	\
+} while (0)
+
+/**
+ * DLB_HW_INFO() - log an info message
+ * @dlb: dlb_hw handle for a particular device.
+ * @...: variable string args.
+ */
+#define DLB_HW_INFO(dlb, ...) do {	\
+	RTE_SET_USED(dlb);		\
+	DLB_INFO(dlb, __VA_ARGS__);	\
+} while (0)
+
+/*** scheduling functions ***/
+
+/* The callback runs until it completes all outstanding QID->CQ
+ * map and unmap requests. To prevent deadlock, this function gives other
+ * threads a chance to grab the resource mutex and configure hardware.
+ */
+static void *dlb_complete_queue_map_unmap(void *__args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
+	int ret;
+
+	while (1) {
+		rte_spinlock_lock(&dlb_dev->resource_mutex);
+
+		ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
+		ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
+
+		if (ret != 0) {
+			rte_spinlock_unlock(&dlb_dev->resource_mutex);
+			/* Relinquish the CPU so the application can process
+			 * its CQs, so this function does not deadlock.
+			 */
+			sched_yield();
+		} else
+			break;
+	}
+
+	dlb_dev->worker_launched = false;
+
+	rte_spinlock_unlock(&dlb_dev->resource_mutex);
+
+	return NULL;
+}
+
+
+/**
+ * os_schedule_work() - launch a thread to process pending map and unmap work
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function launches a thread that will run until all pending
+ * map and unmap procedures are complete.
+ */
+static inline void os_schedule_work(struct dlb_hw *hw)
+{
+	struct dlb_dev *dlb_dev;
+	pthread_t complete_queue_map_unmap_thread;
+	int ret;
+
+	dlb_dev = container_of(hw, struct dlb_dev, hw);
+
+	ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
+				     "dlb_queue_unmap_waiter",
+				     NULL,
+				     dlb_complete_queue_map_unmap,
+				     dlb_dev);
+	if (ret)
+		DLB_ERR(dlb_dev,
+		"Could not create queue complete map/unmap thread, err=%d\n",
+			  ret);
+	else
+		dlb_dev->worker_launched = true;
+}
+
+/**
+ * os_worker_active() - query whether the map/unmap worker thread is active
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function returns a boolean indicating whether a thread (launched by
+ * os_schedule_work()) is active. This function is used to determine
+ * whether or not to launch a worker thread.
+ */
+static inline bool os_worker_active(struct dlb_hw *hw)
+{
+	struct dlb_dev *dlb_dev;
+
+	dlb_dev = container_of(hw, struct dlb_dev, hw);
+
+	return dlb_dev->worker_launched;
+}
+
+/**
+ * os_notify_user_space() - notify user space
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: ID of domain to notify.
+ * @alert_id: alert ID.
+ * @aux_alert_data: additional alert data.
+ *
+ * This function notifies user space of an alert (such as a remote queue
+ * unregister or hardware alarm).
+ *
+ * Return:
+ * Returns 0 upon success, <0 otherwise.
+ */
+static inline int os_notify_user_space(struct dlb_hw *hw,
+				       u32 domain_id,
+				       u64 alert_id,
+				       u64 aux_alert_data)
+{
+	RTE_SET_USED(hw);
+	RTE_SET_USED(domain_id);
+	RTE_SET_USED(alert_id);
+	RTE_SET_USED(aux_alert_data);
+
+	/* Not called for PF PMD */
+	return -1;
+}
+
+enum dlb_dev_revision {
+	DLB_A0,
+	DLB_A1,
+	DLB_A2,
+	DLB_A3,
+	DLB_B0,
+};
+
+/**
+ * os_get_dev_revision() - query the device_revision
+ * @hw: dlb_hw handle for a particular device.
+ */
+static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
+{
+	uint32_t a, b, c, d, stepping;
+
+	RTE_SET_USED(hw);
+
+	__cpuid(0x1, a, b, c, d);
+
+	stepping = a & 0xf;
+
+	switch (stepping) {
+	case 0:
+		return DLB_A0;
+	case 1:
+		return DLB_A1;
+	case 2:
+		return DLB_A2;
+	case 3:
+		return DLB_A3;
+	default:
+		/* Treat all revisions >= 4 as B0 */
+		return DLB_B0;
+	}
+}
+
+#endif /*  __DLB_OSDEP_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
new file mode 100644
index 0000000..00ab732
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_osdep_bitmap.h
@@ -0,0 +1,441 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_OSDEP_BITMAP_H__
+#define __DLB_OSDEP_BITMAP_H__
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <rte_bitmap.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include "../dlb_main.h"
+
+/*************************/
+/*** Bitmap operations ***/
+/*************************/
+struct dlb_bitmap {
+	struct rte_bitmap *map;
+	unsigned int len;
+	struct dlb_hw *hw;
+};
+
+/**
+ * dlb_bitmap_alloc() - alloc a bitmap data structure
+ * @bitmap: pointer to dlb_bitmap structure pointer.
+ * @len: number of entries in the bitmap.
+ *
+ * This function allocates a bitmap and initializes it with length @len. All
+ * entries are initially zero.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or len is 0.
+ * ENOMEM - could not allocate memory for the bitmap data structure.
+ */
+static inline int dlb_bitmap_alloc(struct dlb_hw *hw,
+				   struct dlb_bitmap **bitmap,
+				   unsigned int len)
+{
+	struct dlb_bitmap *bm;
+	void *mem;
+	uint32_t alloc_size;
+	uint32_t nbits = (uint32_t) len;
+	RTE_SET_USED(hw);
+
+	if (bitmap == NULL || nbits == 0)
+		return -EINVAL;
+
+	/* Allocate DLB bitmap control struct */
+	bm = rte_malloc("DLB_PF",
+		sizeof(struct dlb_bitmap),
+		RTE_CACHE_LINE_SIZE);
+
+	if (bm == NULL)
+		return -ENOMEM;
+
+	/* Allocate bitmap memory */
+	alloc_size = rte_bitmap_get_memory_footprint(nbits);
+	mem = rte_malloc("DLB_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
+	if (mem == NULL) {
+		rte_free(bm);
+		return -ENOMEM;
+	}
+
+	bm->map = rte_bitmap_init(len, mem, alloc_size);
+	if (bm->map == NULL) {
+		rte_free(mem);
+		rte_free(bm);
+		return -ENOMEM;
+	}
+
+	bm->len = len;
+
+	*bitmap = bm;
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_free() - free a previously allocated bitmap data structure
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * This function frees a bitmap that was allocated with dlb_bitmap_alloc().
+ */
+static inline void dlb_bitmap_free(struct dlb_bitmap *bitmap)
+{
+	if (bitmap == NULL)
+		return;
+
+	rte_free(bitmap->map);
+	rte_free(bitmap);
+}
+
+/**
+ * dlb_bitmap_fill() - fill a bitmap with all 1s
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * This function sets all bitmap values to 1.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized.
+ */
+static inline int dlb_bitmap_fill(struct dlb_bitmap *bitmap)
+{
+	unsigned int i;
+
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	for (i = 0; i != bitmap->len; i++)
+		rte_bitmap_set(bitmap->map, i);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_zero() - fill a bitmap with all 0s
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * This function sets all bitmap values to 0.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized.
+ */
+static inline int dlb_bitmap_zero(struct dlb_bitmap *bitmap)
+{
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	rte_bitmap_reset(bitmap->map);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_set() - set a bitmap entry
+ * @bitmap: pointer to dlb_bitmap structure.
+ * @bit: bit index.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
+ *	    bitmap length.
+ */
+static inline int dlb_bitmap_set(struct dlb_bitmap *bitmap,
+				 unsigned int bit)
+{
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	if (bitmap->len <= bit)
+		return -EINVAL;
+
+	rte_bitmap_set(bitmap->map, bit);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_set_range() - set a range of bitmap entries
+ * @bitmap: pointer to dlb_bitmap structure.
+ * @bit: starting bit index.
+ * @len: length of the range.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
+ *	    length.
+ */
+static inline int dlb_bitmap_set_range(struct dlb_bitmap *bitmap,
+				       unsigned int bit,
+				       unsigned int len)
+{
+	unsigned int i;
+
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	if (bitmap->len <= bit)
+		return -EINVAL;
+
+	for (i = 0; i != len; i++)
+		rte_bitmap_set(bitmap->map, bit + i);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_clear() - clear a bitmap entry
+ * @bitmap: pointer to dlb_bitmap structure.
+ * @bit: bit index.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
+ *	    bitmap length.
+ */
+static inline int dlb_bitmap_clear(struct dlb_bitmap *bitmap,
+				   unsigned int bit)
+{
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	if (bitmap->len <= bit)
+		return -EINVAL;
+
+	rte_bitmap_clear(bitmap->map, bit);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_clear_range() - clear a range of bitmap entries
+ * @bitmap: pointer to dlb_bitmap structure.
+ * @bit: starting bit index.
+ * @len: length of the range.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
+ *	    length.
+ */
+static inline int dlb_bitmap_clear_range(struct dlb_bitmap *bitmap,
+					 unsigned int bit,
+					 unsigned int len)
+{
+	unsigned int i;
+
+	if (bitmap  == NULL || bitmap->map == NULL)
+		return -EINVAL;
+
+	if (bitmap->len <= bit)
+		return -EINVAL;
+
+	for (i = 0; i != len; i++)
+		rte_bitmap_clear(bitmap->map, bit + i);
+
+	return 0;
+}
+
+/**
+ * dlb_bitmap_find_set_bit_range() - find a range of set bits
+ * @bitmap: pointer to dlb_bitmap structure.
+ * @len: length of the range.
+ *
+ * This function looks for a range of set bits of length @len.
+ *
+ * Return:
+ * Returns the base bit index upon success, < 0 otherwise.
+ *
+ * Errors:
+ * ENOENT - unable to find a length *len* range of set bits.
+ * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
+ */
+static inline int dlb_bitmap_find_set_bit_range(struct dlb_bitmap *bitmap,
+						unsigned int len)
+{
+	unsigned int i, j = 0;
+
+	if (bitmap  == NULL || bitmap->map  == NULL || len == 0)
+		return -EINVAL;
+
+	if (bitmap->len < len)
+		return -ENOENT;
+
+	for (i = 0; i != bitmap->len; i++) {
+		if  (rte_bitmap_get(bitmap->map, i)) {
+			if (++j == len)
+				return i - j + 1;
+		} else
+			j = 0;
+	}
+
+	/* No set bit range of length len? */
+	return -ENOENT;
+}
+
+/**
+ * dlb_bitmap_find_set_bit() - find the first set bit
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * This function looks for a single set bit.
+ *
+ * Return:
+ * Returns the base bit index upon success, < 0 otherwise.
+ *
+ * Errors:
+ * ENOENT - the bitmap contains no set bits.
+ * EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
+ */
+static inline int dlb_bitmap_find_set_bit(struct dlb_bitmap *bitmap)
+{
+	unsigned int i;
+
+	if (bitmap == NULL)
+		return -EINVAL;
+
+	if (bitmap->map == NULL)
+		return -EINVAL;
+
+	for (i = 0; i != bitmap->len; i++) {
+		if  (rte_bitmap_get(bitmap->map, i))
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * dlb_bitmap_count() - returns the number of set bits
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * This function looks for a single set bit.
+ *
+ * Return:
+ * Returns the number of set bits upon success, <0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized.
+ */
+static inline int dlb_bitmap_count(struct dlb_bitmap *bitmap)
+{
+	int weight = 0;
+	unsigned int i;
+
+	if (bitmap == NULL)
+		return -EINVAL;
+
+	if (bitmap->map == NULL)
+		return -EINVAL;
+
+	for (i = 0; i != bitmap->len; i++) {
+		if  (rte_bitmap_get(bitmap->map, i))
+			weight++;
+	}
+	return weight;
+}
+
+/**
+ * dlb_bitmap_longest_set_range() - returns longest contiguous range of set bits
+ * @bitmap: pointer to dlb_bitmap structure.
+ *
+ * Return:
+ * Returns the bitmap's longest contiguous range of of set bits upon success,
+ * <0 otherwise.
+ *
+ * Errors:
+ * EINVAL - bitmap is NULL or is uninitialized.
+ */
+static inline int dlb_bitmap_longest_set_range(struct dlb_bitmap *bitmap)
+{
+	int max_len = 0, len = 0;
+	unsigned int i;
+
+	if (bitmap == NULL)
+		return -EINVAL;
+
+	if (bitmap->map == NULL)
+		return -EINVAL;
+
+	for (i = 0; i != bitmap->len; i++) {
+		if  (rte_bitmap_get(bitmap->map, i)) {
+			len++;
+		} else {
+			if (len > max_len)
+				max_len = len;
+			len = 0;
+		}
+	}
+
+	if (len > max_len)
+		max_len = len;
+
+	return max_len;
+}
+
+/**
+ * dlb_bitmap_or() - store the logical 'or' of two bitmaps into a third
+ * @dest: pointer to dlb_bitmap structure, which will contain the results of
+ *	  the 'or' of src1 and src2.
+ * @src1: pointer to dlb_bitmap structure, will be 'or'ed with src2.
+ * @src2: pointer to dlb_bitmap structure, will be 'or'ed with src1.
+ *
+ * This function 'or's two bitmaps together and stores the result in a third
+ * bitmap. The source and destination bitmaps can be the same.
+ *
+ * Return:
+ * Returns the number of set bits upon success, <0 otherwise.
+ *
+ * Errors:
+ * EINVAL - One of the bitmaps is NULL or is uninitialized.
+ */
+static inline int dlb_bitmap_or(struct dlb_bitmap *dest,
+				struct dlb_bitmap *src1,
+				struct dlb_bitmap *src2)
+{
+	unsigned int i, min;
+	int numset = 0;
+
+	if (dest  == NULL || dest->map == NULL ||
+	    src1 == NULL || src1->map == NULL ||
+	    src2  == NULL || src2->map == NULL)
+		return -EINVAL;
+
+	min = dest->len;
+	min = (min > src1->len) ? src1->len : min;
+	min = (min > src2->len) ? src2->len : min;
+
+	for (i = 0; i != min; i++) {
+		if  (rte_bitmap_get(src1->map, i) ||
+				rte_bitmap_get(src2->map, i)) {
+			rte_bitmap_set(dest->map, i);
+			numset++;
+		} else
+			rte_bitmap_clear(dest->map, i);
+	}
+
+	return numset;
+}
+
+#endif /*  __DLB_OSDEP_BITMAP_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_list.h b/drivers/event/dlb/pf/base/dlb_osdep_list.h
new file mode 100644
index 0000000..a53b362
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_osdep_list.h
@@ -0,0 +1,131 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_OSDEP_LIST_H__
+#define __DLB_OSDEP_LIST_H__
+
+#include <rte_tailq.h>
+
+struct dlb_list_entry {
+	TAILQ_ENTRY(dlb_list_entry) node;
+};
+
+/* Dummy - just a struct definition */
+TAILQ_HEAD(dlb_list_head, dlb_list_entry);
+
+/* =================
+ * TAILQ Supplements
+ * =================
+ */
+
+#ifndef TAILQ_FOREACH_ENTRY
+#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter)		\
+	for ((iter) = TAILQ_FIRST(&head);			\
+	    (iter)						\
+		&& (ptr = container_of(iter, typeof(*(ptr)), name)); \
+	    (iter) = TAILQ_NEXT((iter), node))
+#endif
+
+#ifndef TAILQ_FOREACH_ENTRY_SAFE
+#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar)	\
+	for ((iter) = TAILQ_FIRST(&head);			\
+	    (iter) &&						\
+		(ptr = container_of(iter, typeof(*(ptr)), name)) &&\
+		((tvar) = TAILQ_NEXT((iter), node), 1);	\
+	    (iter) = (tvar))
+#endif
+
+/* =========
+ * DLB Lists
+ * =========
+ */
+
+/**
+ * dlb_list_init_head() - initialize the head of a list
+ * @head: list head
+ */
+static inline void dlb_list_init_head(struct dlb_list_head *head)
+{
+	TAILQ_INIT(head);
+}
+
+/**
+ * dlb_list_add() - add an entry to a list
+ * @head: new entry will be added after this list header
+ * @entry: new list entry to be added
+ */
+static inline void dlb_list_add(struct dlb_list_head *head,
+				struct dlb_list_entry *entry)
+{
+	TAILQ_INSERT_TAIL(head, entry, node);
+}
+
+/**
+ * @head: list head
+ * @entry: list entry to be deleted
+ */
+static inline void dlb_list_del(struct dlb_list_head *head,
+				struct dlb_list_entry *entry)
+{
+	TAILQ_REMOVE(head, entry, node);
+}
+
+/**
+ * dlb_list_empty() - check if a list is empty
+ * @head: list head
+ *
+ * Return:
+ * Returns 1 if empty, 0 if not.
+ */
+static inline bool dlb_list_empty(struct dlb_list_head *head)
+{
+	return TAILQ_EMPTY(head);
+}
+
+/**
+ * dlb_list_empty() - check if a list is empty
+ * @src_head: list to be added
+ * @ head: where src_head will be inserted
+ */
+static inline void dlb_list_splice(struct dlb_list_head *src_head,
+				   struct dlb_list_head *head)
+{
+	TAILQ_CONCAT(head, src_head, node);
+}
+
+/**
+ * DLB_LIST_HEAD() - retrieve the head of the list
+ * @head: list head
+ * @type: type of the list variable
+ * @name: name of the dlb_list within the struct
+ */
+#define DLB_LIST_HEAD(head, type, name)				\
+	(TAILQ_FIRST(&head) ?					\
+		container_of(TAILQ_FIRST(&head), type, name) :	\
+		NULL)
+
+/**
+ * DLB_LIST_FOR_EACH() - iterate over a list
+ * @head: list head
+ * @ptr: pointer to struct containing a struct dlb_list_entry
+ * @name: name of the dlb_list_entry field within the containing struct
+ * @iter: iterator variable
+ */
+#define DLB_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
+	TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
+
+/**
+ * DLB_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
+ * an element is removed from the list while processing it.
+ * @ptr: pointer to struct containing a struct dlb_list_entry
+ * @ptr_tmp: pointer to struct containing a struct dlb_list_entry (temporary)
+ * @head: list head
+ * @name: name of the dlb_list_entry field within the containing struct
+ * @iter: iterator variable
+ * @iter_tmp: iterator variable (temporary)
+ */
+#define DLB_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter, saf_iter) \
+	TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_iter)
+
+#endif /*  __DLB_OSDEP_LIST_H__ */
diff --git a/drivers/event/dlb/pf/base/dlb_osdep_types.h b/drivers/event/dlb/pf/base/dlb_osdep_types.h
new file mode 100644
index 0000000..2e9d7d8
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_osdep_types.h
@@ -0,0 +1,31 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_OSDEP_TYPES_H
+#define __DLB_OSDEP_TYPES_H
+
+#include <linux/types.h>
+
+#include <inttypes.h>
+#include <ctype.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+/* Types for user mode PF PMD */
+typedef uint8_t         u8;
+typedef int8_t          s8;
+typedef uint16_t        u16;
+typedef int16_t         s16;
+typedef uint32_t        u32;
+typedef int32_t         s32;
+typedef uint64_t        u64;
+
+#define __iomem
+
+/* END types for user mode PF PMD */
+
+#endif /* __DLB_OSDEP_TYPES_H */
diff --git a/drivers/event/dlb/pf/base/dlb_regs.h b/drivers/event/dlb/pf/base/dlb_regs.h
new file mode 100644
index 0000000..a1c63f3
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_regs.h
@@ -0,0 +1,2368 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_REGS_H
+#define __DLB_REGS_H
+
+#include "dlb_osdep_types.h"
+
+#define DLB_MSIX_MEM_VECTOR_CTRL(x) \
+	(0x100000c + (x) * 0x10)
+#define DLB_MSIX_MEM_VECTOR_CTRL_RST 0x1
+union dlb_msix_mem_vector_ctrl {
+	struct {
+		u32 vec_mask : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_TOTAL_VAS 0x124
+#define DLB_SYS_TOTAL_VAS_RST 0x20
+union dlb_sys_total_vas {
+	struct {
+		u32 total_vas : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_ALARM_PF_SYND2 0x508
+#define DLB_SYS_ALARM_PF_SYND2_RST 0x0
+union dlb_sys_alarm_pf_synd2 {
+	struct {
+		u32 lock_id : 16;
+		u32 meas : 1;
+		u32 debug : 7;
+		u32 cq_pop : 1;
+		u32 qe_uhl : 1;
+		u32 qe_orsp : 1;
+		u32 qe_valid : 1;
+		u32 cq_int_rearm : 1;
+		u32 dsi_error : 1;
+		u32 rsvd0 : 2;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_ALARM_PF_SYND1 0x504
+#define DLB_SYS_ALARM_PF_SYND1_RST 0x0
+union dlb_sys_alarm_pf_synd1 {
+	struct {
+		u32 dsi : 16;
+		u32 qid : 8;
+		u32 qtype : 2;
+		u32 qpri : 3;
+		u32 msg_type : 3;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_ALARM_PF_SYND0 0x500
+#define DLB_SYS_ALARM_PF_SYND0_RST 0x0
+union dlb_sys_alarm_pf_synd0 {
+	struct {
+		u32 syndrome : 8;
+		u32 rtype : 2;
+		u32 rsvd0 : 2;
+		u32 from_dmv : 1;
+		u32 is_ldb : 1;
+		u32 cls : 2;
+		u32 aid : 6;
+		u32 unit : 4;
+		u32 source : 4;
+		u32 more : 1;
+		u32 valid : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_VASQID_V(x) \
+	(0xf60 + (x) * 0x1000)
+#define DLB_SYS_LDB_VASQID_V_RST 0x0
+union dlb_sys_ldb_vasqid_v {
+	struct {
+		u32 vasqid_v : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_VASQID_V(x) \
+	(0xf68 + (x) * 0x1000)
+#define DLB_SYS_DIR_VASQID_V_RST 0x0
+union dlb_sys_dir_vasqid_v {
+	struct {
+		u32 vasqid_v : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_WBUF_DIR_FLAGS(x) \
+	(0xf70 + (x) * 0x1000)
+#define DLB_SYS_WBUF_DIR_FLAGS_RST 0x0
+union dlb_sys_wbuf_dir_flags {
+	struct {
+		u32 wb_v : 4;
+		u32 cl : 1;
+		u32 busy : 1;
+		u32 opt : 1;
+		u32 rsvd0 : 25;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_WBUF_LDB_FLAGS(x) \
+	(0xf78 + (x) * 0x1000)
+#define DLB_SYS_WBUF_LDB_FLAGS_RST 0x0
+union dlb_sys_wbuf_ldb_flags {
+	struct {
+		u32 wb_v : 4;
+		u32 cl : 1;
+		u32 busy : 1;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_QID_V(x) \
+	(0x8000034 + (x) * 0x1000)
+#define DLB_SYS_LDB_QID_V_RST 0x0
+union dlb_sys_ldb_qid_v {
+	struct {
+		u32 qid_v : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_QID_CFG_V(x) \
+	(0x8000030 + (x) * 0x1000)
+#define DLB_SYS_LDB_QID_CFG_V_RST 0x0
+union dlb_sys_ldb_qid_cfg_v {
+	struct {
+		u32 sn_cfg_v : 1;
+		u32 fid_cfg_v : 1;
+		u32 rsvd0 : 30;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_QID_V(x) \
+	(0x8000040 + (x) * 0x1000)
+#define DLB_SYS_DIR_QID_V_RST 0x0
+union dlb_sys_dir_qid_v {
+	struct {
+		u32 qid_v : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_POOL_ENBLD(x) \
+	(0x8000070 + (x) * 0x1000)
+#define DLB_SYS_LDB_POOL_ENBLD_RST 0x0
+union dlb_sys_ldb_pool_enbld {
+	struct {
+		u32 pool_enabled : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_POOL_ENBLD(x) \
+	(0x8000080 + (x) * 0x1000)
+#define DLB_SYS_DIR_POOL_ENBLD_RST 0x0
+union dlb_sys_dir_pool_enbld {
+	struct {
+		u32 pool_enabled : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP2VPP(x) \
+	(0x8000090 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP2VPP_RST 0x0
+union dlb_sys_ldb_pp2vpp {
+	struct {
+		u32 vpp : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP2VPP(x) \
+	(0x8000094 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP2VPP_RST 0x0
+union dlb_sys_dir_pp2vpp {
+	struct {
+		u32 vpp : 7;
+		u32 rsvd0 : 25;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP_V(x) \
+	(0x8000128 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP_V_RST 0x0
+union dlb_sys_ldb_pp_v {
+	struct {
+		u32 pp_v : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ_ISR(x) \
+	(0x8000124 + (x) * 0x1000)
+#define DLB_SYS_LDB_CQ_ISR_RST 0x0
+/* CQ Interrupt Modes */
+#define DLB_CQ_ISR_MODE_DIS  0
+#define DLB_CQ_ISR_MODE_MSI  1
+#define DLB_CQ_ISR_MODE_MSIX 2
+union dlb_sys_ldb_cq_isr {
+	struct {
+		u32 vector : 6;
+		u32 vf : 4;
+		u32 en_code : 2;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ2VF_PF(x) \
+	(0x8000120 + (x) * 0x1000)
+#define DLB_SYS_LDB_CQ2VF_PF_RST 0x0
+union dlb_sys_ldb_cq2vf_pf {
+	struct {
+		u32 vf : 4;
+		u32 is_pf : 1;
+		u32 rsvd0 : 27;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP2VAS(x) \
+	(0x800011c + (x) * 0x1000)
+#define DLB_SYS_LDB_PP2VAS_RST 0x0
+union dlb_sys_ldb_pp2vas {
+	struct {
+		u32 vas : 5;
+		u32 rsvd0 : 27;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP2LDBPOOL(x) \
+	(0x8000118 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP2LDBPOOL_RST 0x0
+union dlb_sys_ldb_pp2ldbpool {
+	struct {
+		u32 ldbpool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP2DIRPOOL(x) \
+	(0x8000114 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP2DIRPOOL_RST 0x0
+union dlb_sys_ldb_pp2dirpool {
+	struct {
+		u32 dirpool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP2VF_PF(x) \
+	(0x8000110 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP2VF_PF_RST 0x0
+union dlb_sys_ldb_pp2vf_pf {
+	struct {
+		u32 vf : 4;
+		u32 is_pf : 1;
+		u32 rsvd0 : 27;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP_ADDR_U(x) \
+	(0x800010c + (x) * 0x1000)
+#define DLB_SYS_LDB_PP_ADDR_U_RST 0x0
+union dlb_sys_ldb_pp_addr_u {
+	struct {
+		u32 addr_u : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_PP_ADDR_L(x) \
+	(0x8000108 + (x) * 0x1000)
+#define DLB_SYS_LDB_PP_ADDR_L_RST 0x0
+union dlb_sys_ldb_pp_addr_l {
+	struct {
+		u32 rsvd0 : 7;
+		u32 addr_l : 25;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ_ADDR_U(x) \
+	(0x8000104 + (x) * 0x1000)
+#define DLB_SYS_LDB_CQ_ADDR_U_RST 0x0
+union dlb_sys_ldb_cq_addr_u {
+	struct {
+		u32 addr_u : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ_ADDR_L(x) \
+	(0x8000100 + (x) * 0x1000)
+#define DLB_SYS_LDB_CQ_ADDR_L_RST 0x0
+union dlb_sys_ldb_cq_addr_l {
+	struct {
+		u32 rsvd0 : 6;
+		u32 addr_l : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP_V(x) \
+	(0x8000228 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP_V_RST 0x0
+union dlb_sys_dir_pp_v {
+	struct {
+		u32 pp_v : 1;
+		u32 mb_dm : 1;
+		u32 rsvd0 : 30;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_ISR(x) \
+	(0x8000224 + (x) * 0x1000)
+#define DLB_SYS_DIR_CQ_ISR_RST 0x0
+union dlb_sys_dir_cq_isr {
+	struct {
+		u32 vector : 6;
+		u32 vf : 4;
+		u32 en_code : 2;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ2VF_PF(x) \
+	(0x8000220 + (x) * 0x1000)
+#define DLB_SYS_DIR_CQ2VF_PF_RST 0x0
+union dlb_sys_dir_cq2vf_pf {
+	struct {
+		u32 vf : 4;
+		u32 is_pf : 1;
+		u32 rsvd0 : 27;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP2VAS(x) \
+	(0x800021c + (x) * 0x1000)
+#define DLB_SYS_DIR_PP2VAS_RST 0x0
+union dlb_sys_dir_pp2vas {
+	struct {
+		u32 vas : 5;
+		u32 rsvd0 : 27;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP2LDBPOOL(x) \
+	(0x8000218 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP2LDBPOOL_RST 0x0
+union dlb_sys_dir_pp2ldbpool {
+	struct {
+		u32 ldbpool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP2DIRPOOL(x) \
+	(0x8000214 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP2DIRPOOL_RST 0x0
+union dlb_sys_dir_pp2dirpool {
+	struct {
+		u32 dirpool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP2VF_PF(x) \
+	(0x8000210 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP2VF_PF_RST 0x0
+union dlb_sys_dir_pp2vf_pf {
+	struct {
+		u32 vf : 4;
+		u32 is_pf : 1;
+		u32 is_hw_dsi : 1;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP_ADDR_U(x) \
+	(0x800020c + (x) * 0x1000)
+#define DLB_SYS_DIR_PP_ADDR_U_RST 0x0
+union dlb_sys_dir_pp_addr_u {
+	struct {
+		u32 addr_u : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_PP_ADDR_L(x) \
+	(0x8000208 + (x) * 0x1000)
+#define DLB_SYS_DIR_PP_ADDR_L_RST 0x0
+union dlb_sys_dir_pp_addr_l {
+	struct {
+		u32 rsvd0 : 7;
+		u32 addr_l : 25;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_ADDR_U(x) \
+	(0x8000204 + (x) * 0x1000)
+#define DLB_SYS_DIR_CQ_ADDR_U_RST 0x0
+union dlb_sys_dir_cq_addr_u {
+	struct {
+		u32 addr_u : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_ADDR_L(x) \
+	(0x8000200 + (x) * 0x1000)
+#define DLB_SYS_DIR_CQ_ADDR_L_RST 0x0
+union dlb_sys_dir_cq_addr_l {
+	struct {
+		u32 rsvd0 : 6;
+		u32 addr_l : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_INGRESS_ALARM_ENBL 0x300
+#define DLB_SYS_INGRESS_ALARM_ENBL_RST 0x0
+union dlb_sys_ingress_alarm_enbl {
+	struct {
+		u32 illegal_hcw : 1;
+		u32 illegal_pp : 1;
+		u32 disabled_pp : 1;
+		u32 illegal_qid : 1;
+		u32 disabled_qid : 1;
+		u32 illegal_ldb_qid_cfg : 1;
+		u32 illegal_cqid : 1;
+		u32 rsvd0 : 25;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_CQ_MODE 0x30c
+#define DLB_SYS_CQ_MODE_RST 0x0
+union dlb_sys_cq_mode {
+	struct {
+		u32 ldb_cq64 : 1;
+		u32 dir_cq64 : 1;
+		u32 rsvd0 : 30;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_MSIX_ACK 0x400
+#define DLB_SYS_MSIX_ACK_RST 0x0
+union dlb_sys_msix_ack {
+	struct {
+		u32 msix_0_ack : 1;
+		u32 msix_1_ack : 1;
+		u32 msix_2_ack : 1;
+		u32 msix_3_ack : 1;
+		u32 msix_4_ack : 1;
+		u32 msix_5_ack : 1;
+		u32 msix_6_ack : 1;
+		u32 msix_7_ack : 1;
+		u32 msix_8_ack : 1;
+		u32 rsvd0 : 23;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_MSIX_PASSTHRU 0x404
+#define DLB_SYS_MSIX_PASSTHRU_RST 0x0
+union dlb_sys_msix_passthru {
+	struct {
+		u32 msix_0_passthru : 1;
+		u32 msix_1_passthru : 1;
+		u32 msix_2_passthru : 1;
+		u32 msix_3_passthru : 1;
+		u32 msix_4_passthru : 1;
+		u32 msix_5_passthru : 1;
+		u32 msix_6_passthru : 1;
+		u32 msix_7_passthru : 1;
+		u32 msix_8_passthru : 1;
+		u32 rsvd0 : 23;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_MSIX_MODE 0x408
+#define DLB_SYS_MSIX_MODE_RST 0x0
+/* MSI-X Modes */
+#define DLB_MSIX_MODE_PACKED     0
+#define DLB_MSIX_MODE_COMPRESSED 1
+union dlb_sys_msix_mode {
+	struct {
+		u32 mode : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS 0x440
+#define DLB_SYS_DIR_CQ_31_0_OCC_INT_STS_RST 0x0
+union dlb_sys_dir_cq_31_0_occ_int_sts {
+	struct {
+		u32 cq_0_occ_int : 1;
+		u32 cq_1_occ_int : 1;
+		u32 cq_2_occ_int : 1;
+		u32 cq_3_occ_int : 1;
+		u32 cq_4_occ_int : 1;
+		u32 cq_5_occ_int : 1;
+		u32 cq_6_occ_int : 1;
+		u32 cq_7_occ_int : 1;
+		u32 cq_8_occ_int : 1;
+		u32 cq_9_occ_int : 1;
+		u32 cq_10_occ_int : 1;
+		u32 cq_11_occ_int : 1;
+		u32 cq_12_occ_int : 1;
+		u32 cq_13_occ_int : 1;
+		u32 cq_14_occ_int : 1;
+		u32 cq_15_occ_int : 1;
+		u32 cq_16_occ_int : 1;
+		u32 cq_17_occ_int : 1;
+		u32 cq_18_occ_int : 1;
+		u32 cq_19_occ_int : 1;
+		u32 cq_20_occ_int : 1;
+		u32 cq_21_occ_int : 1;
+		u32 cq_22_occ_int : 1;
+		u32 cq_23_occ_int : 1;
+		u32 cq_24_occ_int : 1;
+		u32 cq_25_occ_int : 1;
+		u32 cq_26_occ_int : 1;
+		u32 cq_27_occ_int : 1;
+		u32 cq_28_occ_int : 1;
+		u32 cq_29_occ_int : 1;
+		u32 cq_30_occ_int : 1;
+		u32 cq_31_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS 0x444
+#define DLB_SYS_DIR_CQ_63_32_OCC_INT_STS_RST 0x0
+union dlb_sys_dir_cq_63_32_occ_int_sts {
+	struct {
+		u32 cq_32_occ_int : 1;
+		u32 cq_33_occ_int : 1;
+		u32 cq_34_occ_int : 1;
+		u32 cq_35_occ_int : 1;
+		u32 cq_36_occ_int : 1;
+		u32 cq_37_occ_int : 1;
+		u32 cq_38_occ_int : 1;
+		u32 cq_39_occ_int : 1;
+		u32 cq_40_occ_int : 1;
+		u32 cq_41_occ_int : 1;
+		u32 cq_42_occ_int : 1;
+		u32 cq_43_occ_int : 1;
+		u32 cq_44_occ_int : 1;
+		u32 cq_45_occ_int : 1;
+		u32 cq_46_occ_int : 1;
+		u32 cq_47_occ_int : 1;
+		u32 cq_48_occ_int : 1;
+		u32 cq_49_occ_int : 1;
+		u32 cq_50_occ_int : 1;
+		u32 cq_51_occ_int : 1;
+		u32 cq_52_occ_int : 1;
+		u32 cq_53_occ_int : 1;
+		u32 cq_54_occ_int : 1;
+		u32 cq_55_occ_int : 1;
+		u32 cq_56_occ_int : 1;
+		u32 cq_57_occ_int : 1;
+		u32 cq_58_occ_int : 1;
+		u32 cq_59_occ_int : 1;
+		u32 cq_60_occ_int : 1;
+		u32 cq_61_occ_int : 1;
+		u32 cq_62_occ_int : 1;
+		u32 cq_63_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS 0x448
+#define DLB_SYS_DIR_CQ_95_64_OCC_INT_STS_RST 0x0
+union dlb_sys_dir_cq_95_64_occ_int_sts {
+	struct {
+		u32 cq_64_occ_int : 1;
+		u32 cq_65_occ_int : 1;
+		u32 cq_66_occ_int : 1;
+		u32 cq_67_occ_int : 1;
+		u32 cq_68_occ_int : 1;
+		u32 cq_69_occ_int : 1;
+		u32 cq_70_occ_int : 1;
+		u32 cq_71_occ_int : 1;
+		u32 cq_72_occ_int : 1;
+		u32 cq_73_occ_int : 1;
+		u32 cq_74_occ_int : 1;
+		u32 cq_75_occ_int : 1;
+		u32 cq_76_occ_int : 1;
+		u32 cq_77_occ_int : 1;
+		u32 cq_78_occ_int : 1;
+		u32 cq_79_occ_int : 1;
+		u32 cq_80_occ_int : 1;
+		u32 cq_81_occ_int : 1;
+		u32 cq_82_occ_int : 1;
+		u32 cq_83_occ_int : 1;
+		u32 cq_84_occ_int : 1;
+		u32 cq_85_occ_int : 1;
+		u32 cq_86_occ_int : 1;
+		u32 cq_87_occ_int : 1;
+		u32 cq_88_occ_int : 1;
+		u32 cq_89_occ_int : 1;
+		u32 cq_90_occ_int : 1;
+		u32 cq_91_occ_int : 1;
+		u32 cq_92_occ_int : 1;
+		u32 cq_93_occ_int : 1;
+		u32 cq_94_occ_int : 1;
+		u32 cq_95_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS 0x44c
+#define DLB_SYS_DIR_CQ_127_96_OCC_INT_STS_RST 0x0
+union dlb_sys_dir_cq_127_96_occ_int_sts {
+	struct {
+		u32 cq_96_occ_int : 1;
+		u32 cq_97_occ_int : 1;
+		u32 cq_98_occ_int : 1;
+		u32 cq_99_occ_int : 1;
+		u32 cq_100_occ_int : 1;
+		u32 cq_101_occ_int : 1;
+		u32 cq_102_occ_int : 1;
+		u32 cq_103_occ_int : 1;
+		u32 cq_104_occ_int : 1;
+		u32 cq_105_occ_int : 1;
+		u32 cq_106_occ_int : 1;
+		u32 cq_107_occ_int : 1;
+		u32 cq_108_occ_int : 1;
+		u32 cq_109_occ_int : 1;
+		u32 cq_110_occ_int : 1;
+		u32 cq_111_occ_int : 1;
+		u32 cq_112_occ_int : 1;
+		u32 cq_113_occ_int : 1;
+		u32 cq_114_occ_int : 1;
+		u32 cq_115_occ_int : 1;
+		u32 cq_116_occ_int : 1;
+		u32 cq_117_occ_int : 1;
+		u32 cq_118_occ_int : 1;
+		u32 cq_119_occ_int : 1;
+		u32 cq_120_occ_int : 1;
+		u32 cq_121_occ_int : 1;
+		u32 cq_122_occ_int : 1;
+		u32 cq_123_occ_int : 1;
+		u32 cq_124_occ_int : 1;
+		u32 cq_125_occ_int : 1;
+		u32 cq_126_occ_int : 1;
+		u32 cq_127_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS 0x460
+#define DLB_SYS_LDB_CQ_31_0_OCC_INT_STS_RST 0x0
+union dlb_sys_ldb_cq_31_0_occ_int_sts {
+	struct {
+		u32 cq_0_occ_int : 1;
+		u32 cq_1_occ_int : 1;
+		u32 cq_2_occ_int : 1;
+		u32 cq_3_occ_int : 1;
+		u32 cq_4_occ_int : 1;
+		u32 cq_5_occ_int : 1;
+		u32 cq_6_occ_int : 1;
+		u32 cq_7_occ_int : 1;
+		u32 cq_8_occ_int : 1;
+		u32 cq_9_occ_int : 1;
+		u32 cq_10_occ_int : 1;
+		u32 cq_11_occ_int : 1;
+		u32 cq_12_occ_int : 1;
+		u32 cq_13_occ_int : 1;
+		u32 cq_14_occ_int : 1;
+		u32 cq_15_occ_int : 1;
+		u32 cq_16_occ_int : 1;
+		u32 cq_17_occ_int : 1;
+		u32 cq_18_occ_int : 1;
+		u32 cq_19_occ_int : 1;
+		u32 cq_20_occ_int : 1;
+		u32 cq_21_occ_int : 1;
+		u32 cq_22_occ_int : 1;
+		u32 cq_23_occ_int : 1;
+		u32 cq_24_occ_int : 1;
+		u32 cq_25_occ_int : 1;
+		u32 cq_26_occ_int : 1;
+		u32 cq_27_occ_int : 1;
+		u32 cq_28_occ_int : 1;
+		u32 cq_29_occ_int : 1;
+		u32 cq_30_occ_int : 1;
+		u32 cq_31_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS 0x464
+#define DLB_SYS_LDB_CQ_63_32_OCC_INT_STS_RST 0x0
+union dlb_sys_ldb_cq_63_32_occ_int_sts {
+	struct {
+		u32 cq_32_occ_int : 1;
+		u32 cq_33_occ_int : 1;
+		u32 cq_34_occ_int : 1;
+		u32 cq_35_occ_int : 1;
+		u32 cq_36_occ_int : 1;
+		u32 cq_37_occ_int : 1;
+		u32 cq_38_occ_int : 1;
+		u32 cq_39_occ_int : 1;
+		u32 cq_40_occ_int : 1;
+		u32 cq_41_occ_int : 1;
+		u32 cq_42_occ_int : 1;
+		u32 cq_43_occ_int : 1;
+		u32 cq_44_occ_int : 1;
+		u32 cq_45_occ_int : 1;
+		u32 cq_46_occ_int : 1;
+		u32 cq_47_occ_int : 1;
+		u32 cq_48_occ_int : 1;
+		u32 cq_49_occ_int : 1;
+		u32 cq_50_occ_int : 1;
+		u32 cq_51_occ_int : 1;
+		u32 cq_52_occ_int : 1;
+		u32 cq_53_occ_int : 1;
+		u32 cq_54_occ_int : 1;
+		u32 cq_55_occ_int : 1;
+		u32 cq_56_occ_int : 1;
+		u32 cq_57_occ_int : 1;
+		u32 cq_58_occ_int : 1;
+		u32 cq_59_occ_int : 1;
+		u32 cq_60_occ_int : 1;
+		u32 cq_61_occ_int : 1;
+		u32 cq_62_occ_int : 1;
+		u32 cq_63_occ_int : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_ALARM_HW_SYND 0x50c
+#define DLB_SYS_ALARM_HW_SYND_RST 0x0
+union dlb_sys_alarm_hw_synd {
+	struct {
+		u32 syndrome : 8;
+		u32 rtype : 2;
+		u32 rsvd0 : 2;
+		u32 from_dmv : 1;
+		u32 is_ldb : 1;
+		u32 cls : 2;
+		u32 aid : 6;
+		u32 unit : 4;
+		u32 source : 4;
+		u32 more : 1;
+		u32 valid : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_SYS_SYS_ALARM_INT_ENABLE 0xc001048
+#define DLB_SYS_SYS_ALARM_INT_ENABLE_RST 0x7fffff
+union dlb_sys_sys_alarm_int_enable {
+	struct {
+		u32 cq_addr_overflow_error : 1;
+		u32 ingress_perr : 1;
+		u32 egress_perr : 1;
+		u32 alarm_perr : 1;
+		u32 vf_to_pf_isr_pend_error : 1;
+		u32 pf_to_vf_isr_pend_error : 1;
+		u32 timeout_error : 1;
+		u32 dmvw_sm_error : 1;
+		u32 pptr_sm_par_error : 1;
+		u32 pptr_sm_len_error : 1;
+		u32 sch_sm_error : 1;
+		u32 wbuf_flag_error : 1;
+		u32 dmvw_cl_error : 1;
+		u32 dmvr_cl_error : 1;
+		u32 cmpl_data_error : 1;
+		u32 cmpl_error : 1;
+		u32 fifo_underflow : 1;
+		u32 fifo_overflow : 1;
+		u32 sb_ep_parity_err : 1;
+		u32 ti_parity_err : 1;
+		u32 ri_parity_err : 1;
+		u32 cfgm_ppw_err : 1;
+		u32 system_csr_perr : 1;
+		u32 rsvd0 : 9;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(x) \
+	(0x20000000 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST 0x0
+union dlb_lsp_cq_ldb_tot_sch_cnt_ctrl {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_DSBL(x) \
+	(0x20000124 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_DSBL_RST 0x1
+union dlb_lsp_cq_ldb_dsbl {
+	struct {
+		u32 disabled : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH(x) \
+	(0x20000120 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNTH_RST 0x0
+union dlb_lsp_cq_ldb_tot_sch_cnth {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL(x) \
+	(0x2000011c + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_TOT_SCH_CNTL_RST 0x0
+union dlb_lsp_cq_ldb_tot_sch_cntl {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(x) \
+	(0x20000118 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST 0x0
+union dlb_lsp_cq_ldb_tkn_depth_sel {
+	struct {
+		u32 token_depth_select : 4;
+		u32 ignore_depth : 1;
+		u32 enab_shallow_cq : 1;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_TKN_CNT(x) \
+	(0x20000114 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_TKN_CNT_RST 0x0
+union dlb_lsp_cq_ldb_tkn_cnt {
+	struct {
+		u32 token_count : 11;
+		u32 rsvd0 : 21;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_INFL_LIM(x) \
+	(0x20000110 + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_INFL_LIM_RST 0x0
+union dlb_lsp_cq_ldb_infl_lim {
+	struct {
+		u32 limit : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_LDB_INFL_CNT(x) \
+	(0x2000010c + (x) * 0x1000)
+#define DLB_LSP_CQ_LDB_INFL_CNT_RST 0x0
+union dlb_lsp_cq_ldb_infl_cnt {
+	struct {
+		u32 count : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ2QID(x, y) \
+	(0x20000104 + (x) * 0x1000 + (y) * 0x4)
+#define DLB_LSP_CQ2QID_RST 0x0
+union dlb_lsp_cq2qid {
+	struct {
+		u32 qid_p0 : 7;
+		u32 rsvd3 : 1;
+		u32 qid_p1 : 7;
+		u32 rsvd2 : 1;
+		u32 qid_p2 : 7;
+		u32 rsvd1 : 1;
+		u32 qid_p3 : 7;
+		u32 rsvd0 : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ2PRIOV(x) \
+	(0x20000100 + (x) * 0x1000)
+#define DLB_LSP_CQ2PRIOV_RST 0x0
+union dlb_lsp_cq2priov {
+	struct {
+		u32 prio : 24;
+		u32 v : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_DIR_DSBL(x) \
+	(0x20000310 + (x) * 0x1000)
+#define DLB_LSP_CQ_DIR_DSBL_RST 0x1
+union dlb_lsp_cq_dir_dsbl {
+	struct {
+		u32 disabled : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(x) \
+	(0x2000030c + (x) * 0x1000)
+#define DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST 0x0
+union dlb_lsp_cq_dir_tkn_depth_sel_dsi {
+	struct {
+		u32 token_depth_select : 4;
+		u32 disable_wb_opt : 1;
+		u32 ignore_depth : 1;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH(x) \
+	(0x20000308 + (x) * 0x1000)
+#define DLB_LSP_CQ_DIR_TOT_SCH_CNTH_RST 0x0
+union dlb_lsp_cq_dir_tot_sch_cnth {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL(x) \
+	(0x20000304 + (x) * 0x1000)
+#define DLB_LSP_CQ_DIR_TOT_SCH_CNTL_RST 0x0
+union dlb_lsp_cq_dir_tot_sch_cntl {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CQ_DIR_TKN_CNT(x) \
+	(0x20000300 + (x) * 0x1000)
+#define DLB_LSP_CQ_DIR_TKN_CNT_RST 0x0
+union dlb_lsp_cq_dir_tkn_cnt {
+	struct {
+		u32 count : 11;
+		u32 rsvd0 : 21;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_QID2CQIDX(x, y) \
+	(0x20000400 + (x) * 0x1000 + (y) * 0x4)
+#define DLB_LSP_QID_LDB_QID2CQIDX_RST 0x0
+union dlb_lsp_qid_ldb_qid2cqidx {
+	struct {
+		u32 cq_p0 : 8;
+		u32 cq_p1 : 8;
+		u32 cq_p2 : 8;
+		u32 cq_p3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_QID2CQIDX2(x, y) \
+	(0x20000500 + (x) * 0x1000 + (y) * 0x4)
+#define DLB_LSP_QID_LDB_QID2CQIDX2_RST 0x0
+union dlb_lsp_qid_ldb_qid2cqidx2 {
+	struct {
+		u32 cq_p0 : 8;
+		u32 cq_p1 : 8;
+		u32 cq_p2 : 8;
+		u32 cq_p3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_ATQ_ENQUEUE_CNT(x) \
+	(0x2000066c + (x) * 0x1000)
+#define DLB_LSP_QID_ATQ_ENQUEUE_CNT_RST 0x0
+union dlb_lsp_qid_atq_enqueue_cnt {
+	struct {
+		u32 count : 15;
+		u32 rsvd0 : 17;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_INFL_LIM(x) \
+	(0x2000064c + (x) * 0x1000)
+#define DLB_LSP_QID_LDB_INFL_LIM_RST 0x0
+union dlb_lsp_qid_ldb_infl_lim {
+	struct {
+		u32 limit : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_INFL_CNT(x) \
+	(0x2000062c + (x) * 0x1000)
+#define DLB_LSP_QID_LDB_INFL_CNT_RST 0x0
+union dlb_lsp_qid_ldb_infl_cnt {
+	struct {
+		u32 count : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_AQED_ACTIVE_LIM(x) \
+	(0x20000628 + (x) * 0x1000)
+#define DLB_LSP_QID_AQED_ACTIVE_LIM_RST 0x0
+union dlb_lsp_qid_aqed_active_lim {
+	struct {
+		u32 limit : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_AQED_ACTIVE_CNT(x) \
+	(0x20000624 + (x) * 0x1000)
+#define DLB_LSP_QID_AQED_ACTIVE_CNT_RST 0x0
+union dlb_lsp_qid_aqed_active_cnt {
+	struct {
+		u32 count : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_ENQUEUE_CNT(x) \
+	(0x20000604 + (x) * 0x1000)
+#define DLB_LSP_QID_LDB_ENQUEUE_CNT_RST 0x0
+union dlb_lsp_qid_ldb_enqueue_cnt {
+	struct {
+		u32 count : 15;
+		u32 rsvd0 : 17;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_LDB_REPLAY_CNT(x) \
+	(0x20000600 + (x) * 0x1000)
+#define DLB_LSP_QID_LDB_REPLAY_CNT_RST 0x0
+union dlb_lsp_qid_ldb_replay_cnt {
+	struct {
+		u32 count : 15;
+		u32 rsvd0 : 17;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_QID_DIR_ENQUEUE_CNT(x) \
+	(0x20000700 + (x) * 0x1000)
+#define DLB_LSP_QID_DIR_ENQUEUE_CNT_RST 0x0
+union dlb_lsp_qid_dir_enqueue_cnt {
+	struct {
+		u32 count : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CTRL_CONFIG_0 0x2800002c
+#define DLB_LSP_CTRL_CONFIG_0_RST 0x12cc
+union dlb_lsp_ctrl_config_0 {
+	struct {
+		u32 atm_cq_qid_priority_prot : 1;
+		u32 ldb_arb_ignore_empty : 1;
+		u32 ldb_arb_mode : 2;
+		u32 ldb_arb_threshold : 18;
+		u32 cfg_cq_sla_upd_always : 1;
+		u32 cfg_cq_wcn_upd_always : 1;
+		u32 spare : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1 0x28000028
+#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_1_RST 0x0
+union dlb_lsp_cfg_arb_weight_atm_nalb_qid_1 {
+	struct {
+		u32 slot4_weight : 8;
+		u32 slot5_weight : 8;
+		u32 slot6_weight : 8;
+		u32 slot7_weight : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0 0x28000024
+#define DLB_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_RST 0x0
+union dlb_lsp_cfg_arb_weight_atm_nalb_qid_0 {
+	struct {
+		u32 slot0_weight : 8;
+		u32 slot1_weight : 8;
+		u32 slot2_weight : 8;
+		u32 slot3_weight : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1 0x28000020
+#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_1_RST 0x0
+union dlb_lsp_cfg_arb_weight_ldb_qid_1 {
+	struct {
+		u32 slot4_weight : 8;
+		u32 slot5_weight : 8;
+		u32 slot6_weight : 8;
+		u32 slot7_weight : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0 0x2800001c
+#define DLB_LSP_CFG_ARB_WEIGHT_LDB_QID_0_RST 0x0
+union dlb_lsp_cfg_arb_weight_ldb_qid_0 {
+	struct {
+		u32 slot0_weight : 8;
+		u32 slot1_weight : 8;
+		u32 slot2_weight : 8;
+		u32 slot3_weight : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_LDB_SCHED_CTRL 0x28100000
+#define DLB_LSP_LDB_SCHED_CTRL_RST 0x0
+union dlb_lsp_ldb_sched_ctrl {
+	struct {
+		u32 cq : 8;
+		u32 qidix : 3;
+		u32 value : 1;
+		u32 nalb_haswork_v : 1;
+		u32 rlist_haswork_v : 1;
+		u32 slist_haswork_v : 1;
+		u32 inflight_ok_v : 1;
+		u32 aqed_nfull_v : 1;
+		u32 spare0 : 15;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_DIR_SCH_CNT_H 0x2820000c
+#define DLB_LSP_DIR_SCH_CNT_H_RST 0x0
+union dlb_lsp_dir_sch_cnt_h {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_DIR_SCH_CNT_L 0x28200008
+#define DLB_LSP_DIR_SCH_CNT_L_RST 0x0
+union dlb_lsp_dir_sch_cnt_l {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_LDB_SCH_CNT_H 0x28200004
+#define DLB_LSP_LDB_SCH_CNT_H_RST 0x0
+union dlb_lsp_ldb_sch_cnt_h {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_LSP_LDB_SCH_CNT_L 0x28200000
+#define DLB_LSP_LDB_SCH_CNT_L_RST 0x0
+union dlb_lsp_ldb_sch_cnt_l {
+	struct {
+		u32 count : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_DP_DIR_CSR_CTRL 0x38000018
+#define DLB_DP_DIR_CSR_CTRL_RST 0xc0000000
+union dlb_dp_dir_csr_ctrl {
+	struct {
+		u32 cfg_int_dis : 1;
+		u32 cfg_int_dis_sbe : 1;
+		u32 cfg_int_dis_mbe : 1;
+		u32 spare0 : 27;
+		u32 cfg_vasr_dis : 1;
+		u32 cfg_int_dis_synd : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1 0x38000014
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_1_RST 0xfffefdfc
+union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_1 {
+	struct {
+		u32 pri4 : 8;
+		u32 pri5 : 8;
+		u32 pri6 : 8;
+		u32 pri7 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0 0x38000010
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_DIR_0_RST 0xfbfaf9f8
+union dlb_dp_cfg_ctrl_arb_weights_tqpri_dir_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x3800000c
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
+union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_1 {
+	struct {
+		u32 pri4 : 8;
+		u32 pri5 : 8;
+		u32 pri6 : 8;
+		u32 pri7 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x38000008
+#define DLB_DP_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
+union dlb_dp_cfg_ctrl_arb_weights_tqpri_replay_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1 0x6800001c
+#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_1_RST 0xfffefdfc
+union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_1 {
+	struct {
+		u32 pri4 : 8;
+		u32 pri5 : 8;
+		u32 pri6 : 8;
+		u32 pri7 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0 0x68000018
+#define DLB_NALB_PIPE_CTRL_ARB_WEIGHTS_TQPRI_NALB_0_RST 0xfbfaf9f8
+union dlb_nalb_pipe_ctrl_arb_weights_tqpri_nalb_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1 0x68000014
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_1_RST 0xfffefdfc
+union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_1 {
+	struct {
+		u32 pri4 : 8;
+		u32 pri5 : 8;
+		u32 pri6 : 8;
+		u32 pri7 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0 0x68000010
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATQ_0_RST 0xfbfaf9f8
+union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_atq_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1 0x6800000c
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_1_RST 0xfffefdfc
+union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_1 {
+	struct {
+		u32 pri4 : 8;
+		u32 pri5 : 8;
+		u32 pri6 : 8;
+		u32 pri7 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0 0x68000008
+#define DLB_NALB_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_REPLAY_0_RST 0xfbfaf9f8
+union dlb_nalb_pipe_cfg_ctrl_arb_weights_tqpri_replay_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX(x, y) \
+	(0x70000000 + (x) * 0x1000 + (y) * 0x4)
+#define DLB_ATM_PIPE_QID_LDB_QID2CQIDX_RST 0x0
+union dlb_atm_pipe_qid_ldb_qid2cqidx {
+	struct {
+		u32 cq_p0 : 8;
+		u32 cq_p1 : 8;
+		u32 cq_p2 : 8;
+		u32 cq_p3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN 0x7800000c
+#define DLB_ATM_PIPE_CFG_CTRL_ARB_WEIGHTS_SCHED_BIN_RST 0xfffefdfc
+union dlb_atm_pipe_cfg_ctrl_arb_weights_sched_bin {
+	struct {
+		u32 bin0 : 8;
+		u32 bin1 : 8;
+		u32 bin2 : 8;
+		u32 bin3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN 0x78000008
+#define DLB_ATM_PIPE_CTRL_ARB_WEIGHTS_RDY_BIN_RST 0xfffefdfc
+union dlb_atm_pipe_ctrl_arb_weights_rdy_bin {
+	struct {
+		u32 bin0 : 8;
+		u32 bin1 : 8;
+		u32 bin2 : 8;
+		u32 bin3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_QID_FID_LIM(x) \
+	(0x80000014 + (x) * 0x1000)
+#define DLB_AQED_PIPE_QID_FID_LIM_RST 0x7ff
+union dlb_aqed_pipe_qid_fid_lim {
+	struct {
+		u32 qid_fid_limit : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_FL_POP_PTR(x) \
+	(0x80000010 + (x) * 0x1000)
+#define DLB_AQED_PIPE_FL_POP_PTR_RST 0x0
+union dlb_aqed_pipe_fl_pop_ptr {
+	struct {
+		u32 pop_ptr : 11;
+		u32 generation : 1;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_FL_PUSH_PTR(x) \
+	(0x8000000c + (x) * 0x1000)
+#define DLB_AQED_PIPE_FL_PUSH_PTR_RST 0x0
+union dlb_aqed_pipe_fl_push_ptr {
+	struct {
+		u32 push_ptr : 11;
+		u32 generation : 1;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_FL_BASE(x) \
+	(0x80000008 + (x) * 0x1000)
+#define DLB_AQED_PIPE_FL_BASE_RST 0x0
+union dlb_aqed_pipe_fl_base {
+	struct {
+		u32 base : 11;
+		u32 rsvd0 : 21;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_FL_LIM(x) \
+	(0x80000004 + (x) * 0x1000)
+#define DLB_AQED_PIPE_FL_LIM_RST 0x800
+union dlb_aqed_pipe_fl_lim {
+	struct {
+		u32 limit : 11;
+		u32 freelist_disable : 1;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0 0x88000008
+#define DLB_AQED_PIPE_CFG_CTRL_ARB_WEIGHTS_TQPRI_ATM_0_RST 0xfffe
+union dlb_aqed_pipe_cfg_ctrl_arb_weights_tqpri_atm_0 {
+	struct {
+		u32 pri0 : 8;
+		u32 pri1 : 8;
+		u32 pri2 : 8;
+		u32 pri3 : 8;
+	} field;
+	u32 val;
+};
+
+#define DLB_RO_PIPE_QID2GRPSLT(x) \
+	(0x90000000 + (x) * 0x1000)
+#define DLB_RO_PIPE_QID2GRPSLT_RST 0x0
+union dlb_ro_pipe_qid2grpslt {
+	struct {
+		u32 slot : 5;
+		u32 rsvd1 : 3;
+		u32 group : 2;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_RO_PIPE_GRP_SN_MODE 0x98000008
+#define DLB_RO_PIPE_GRP_SN_MODE_RST 0x0
+union dlb_ro_pipe_grp_sn_mode {
+	struct {
+		u32 sn_mode_0 : 3;
+		u32 reserved0 : 5;
+		u32 sn_mode_1 : 3;
+		u32 reserved1 : 5;
+		u32 sn_mode_2 : 3;
+		u32 reserved2 : 5;
+		u32 sn_mode_3 : 3;
+		u32 reserved3 : 5;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN(x) \
+	(0xa000003c + (x) * 0x1000)
+#define DLB_CHP_CFG_DIR_PP_SW_ALARM_EN_RST 0x1
+union dlb_chp_cfg_dir_pp_sw_alarm_en {
+	struct {
+		u32 alarm_enable : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_WD_ENB(x) \
+	(0xa0000038 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_WD_ENB_RST 0x0
+union dlb_chp_dir_cq_wd_enb {
+	struct {
+		u32 wd_enable : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_LDB_PP2POOL(x) \
+	(0xa0000034 + (x) * 0x1000)
+#define DLB_CHP_DIR_LDB_PP2POOL_RST 0x0
+union dlb_chp_dir_ldb_pp2pool {
+	struct {
+		u32 pool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_DIR_PP2POOL(x) \
+	(0xa0000030 + (x) * 0x1000)
+#define DLB_CHP_DIR_DIR_PP2POOL_RST 0x0
+union dlb_chp_dir_dir_pp2pool {
+	struct {
+		u32 pool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_LDB_CRD_CNT(x) \
+	(0xa000002c + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_LDB_CRD_CNT_RST 0x0
+union dlb_chp_dir_pp_ldb_crd_cnt {
+	struct {
+		u32 count : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_DIR_CRD_CNT(x) \
+	(0xa0000028 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_DIR_CRD_CNT_RST 0x0
+union dlb_chp_dir_pp_dir_crd_cnt {
+	struct {
+		u32 count : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_TMR_THRESHOLD(x) \
+	(0xa0000024 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST 0x0
+union dlb_chp_dir_cq_tmr_threshold {
+	struct {
+		u32 timer_thrsh : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INT_ENB(x) \
+	(0xa0000020 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_INT_ENB_RST 0x0
+union dlb_chp_dir_cq_int_enb {
+	struct {
+		u32 en_tim : 1;
+		u32 en_depth : 1;
+		u32 rsvd0 : 30;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(x) \
+	(0xa000001c + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST 0x0
+union dlb_chp_dir_cq_int_depth_thrsh {
+	struct {
+		u32 depth_threshold : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(x) \
+	(0xa0000018 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST 0x0
+union dlb_chp_dir_cq_tkn_depth_sel {
+	struct {
+		u32 token_depth_select : 4;
+		u32 rsvd0 : 28;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(x) \
+	(0xa0000014 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST 0x1
+union dlb_chp_dir_pp_ldb_min_crd_qnt {
+	struct {
+		u32 quanta : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(x) \
+	(0xa0000010 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST 0x1
+union dlb_chp_dir_pp_dir_min_crd_qnt {
+	struct {
+		u32 quanta : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_LDB_CRD_LWM(x) \
+	(0xa000000c + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_LDB_CRD_LWM_RST 0x0
+union dlb_chp_dir_pp_ldb_crd_lwm {
+	struct {
+		u32 lwm : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_LDB_CRD_HWM(x) \
+	(0xa0000008 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_LDB_CRD_HWM_RST 0x0
+union dlb_chp_dir_pp_ldb_crd_hwm {
+	struct {
+		u32 hwm : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_DIR_CRD_LWM(x) \
+	(0xa0000004 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_DIR_CRD_LWM_RST 0x0
+union dlb_chp_dir_pp_dir_crd_lwm {
+	struct {
+		u32 lwm : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_DIR_CRD_HWM(x) \
+	(0xa0000000 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_DIR_CRD_HWM_RST 0x0
+union dlb_chp_dir_pp_dir_crd_hwm {
+	struct {
+		u32 hwm : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN(x) \
+	(0xa0000148 + (x) * 0x1000)
+#define DLB_CHP_CFG_LDB_PP_SW_ALARM_EN_RST 0x1
+union dlb_chp_cfg_ldb_pp_sw_alarm_en {
+	struct {
+		u32 alarm_enable : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_WD_ENB(x) \
+	(0xa0000144 + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_WD_ENB_RST 0x0
+union dlb_chp_ldb_cq_wd_enb {
+	struct {
+		u32 wd_enable : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_SN_CHK_ENBL(x) \
+	(0xa0000140 + (x) * 0x1000)
+#define DLB_CHP_SN_CHK_ENBL_RST 0x0
+union dlb_chp_sn_chk_enbl {
+	struct {
+		u32 en : 1;
+		u32 rsvd0 : 31;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_HIST_LIST_BASE(x) \
+	(0xa000013c + (x) * 0x1000)
+#define DLB_CHP_HIST_LIST_BASE_RST 0x0
+union dlb_chp_hist_list_base {
+	struct {
+		u32 base : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_HIST_LIST_LIM(x) \
+	(0xa0000138 + (x) * 0x1000)
+#define DLB_CHP_HIST_LIST_LIM_RST 0x0
+union dlb_chp_hist_list_lim {
+	struct {
+		u32 limit : 13;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_LDB_PP2POOL(x) \
+	(0xa0000134 + (x) * 0x1000)
+#define DLB_CHP_LDB_LDB_PP2POOL_RST 0x0
+union dlb_chp_ldb_ldb_pp2pool {
+	struct {
+		u32 pool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_DIR_PP2POOL(x) \
+	(0xa0000130 + (x) * 0x1000)
+#define DLB_CHP_LDB_DIR_PP2POOL_RST 0x0
+union dlb_chp_ldb_dir_pp2pool {
+	struct {
+		u32 pool : 6;
+		u32 rsvd0 : 26;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_LDB_CRD_CNT(x) \
+	(0xa000012c + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_LDB_CRD_CNT_RST 0x0
+union dlb_chp_ldb_pp_ldb_crd_cnt {
+	struct {
+		u32 count : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_DIR_CRD_CNT(x) \
+	(0xa0000128 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_DIR_CRD_CNT_RST 0x0
+union dlb_chp_ldb_pp_dir_crd_cnt {
+	struct {
+		u32 count : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_TMR_THRESHOLD(x) \
+	(0xa0000124 + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST 0x0
+union dlb_chp_ldb_cq_tmr_threshold {
+	struct {
+		u32 thrsh : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_INT_ENB(x) \
+	(0xa0000120 + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_INT_ENB_RST 0x0
+union dlb_chp_ldb_cq_int_enb {
+	struct {
+		u32 en_tim : 1;
+		u32 en_depth : 1;
+		u32 rsvd0 : 30;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(x) \
+	(0xa000011c + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST 0x0
+union dlb_chp_ldb_cq_int_depth_thrsh {
+	struct {
+		u32 depth_threshold : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(x) \
+	(0xa0000118 + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST 0x0
+union dlb_chp_ldb_cq_tkn_depth_sel {
+	struct {
+		u32 token_depth_select : 4;
+		u32 rsvd0 : 28;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(x) \
+	(0xa0000114 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST 0x1
+union dlb_chp_ldb_pp_ldb_min_crd_qnt {
+	struct {
+		u32 quanta : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(x) \
+	(0xa0000110 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST 0x1
+union dlb_chp_ldb_pp_dir_min_crd_qnt {
+	struct {
+		u32 quanta : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_LDB_CRD_LWM(x) \
+	(0xa000010c + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_LDB_CRD_LWM_RST 0x0
+union dlb_chp_ldb_pp_ldb_crd_lwm {
+	struct {
+		u32 lwm : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_LDB_CRD_HWM(x) \
+	(0xa0000108 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_LDB_CRD_HWM_RST 0x0
+union dlb_chp_ldb_pp_ldb_crd_hwm {
+	struct {
+		u32 hwm : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_DIR_CRD_LWM(x) \
+	(0xa0000104 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_DIR_CRD_LWM_RST 0x0
+union dlb_chp_ldb_pp_dir_crd_lwm {
+	struct {
+		u32 lwm : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_DIR_CRD_HWM(x) \
+	(0xa0000100 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_DIR_CRD_HWM_RST 0x0
+union dlb_chp_ldb_pp_dir_crd_hwm {
+	struct {
+		u32 hwm : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_DEPTH(x) \
+	(0xa0000218 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_DEPTH_RST 0x0
+union dlb_chp_dir_cq_depth {
+	struct {
+		u32 cq_depth : 11;
+		u32 rsvd0 : 21;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_WPTR(x) \
+	(0xa0000214 + (x) * 0x1000)
+#define DLB_CHP_DIR_CQ_WPTR_RST 0x0
+union dlb_chp_dir_cq_wptr {
+	struct {
+		u32 write_pointer : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_LDB_PUSH_PTR(x) \
+	(0xa0000210 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST 0x0
+union dlb_chp_dir_pp_ldb_push_ptr {
+	struct {
+		u32 push_pointer : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_DIR_PUSH_PTR(x) \
+	(0xa000020c + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST 0x0
+union dlb_chp_dir_pp_dir_push_ptr {
+	struct {
+		u32 push_pointer : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_STATE_RESET(x) \
+	(0xa0000204 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_STATE_RESET_RST 0x0
+union dlb_chp_dir_pp_state_reset {
+	struct {
+		u32 rsvd1 : 7;
+		u32 dir_type : 1;
+		u32 rsvd0 : 23;
+		u32 reset_pp_state : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_PP_CRD_REQ_STATE(x) \
+	(0xa0000200 + (x) * 0x1000)
+#define DLB_CHP_DIR_PP_CRD_REQ_STATE_RST 0x0
+union dlb_chp_dir_pp_crd_req_state {
+	struct {
+		u32 dir_crd_req_active_valid : 1;
+		u32 dir_crd_req_active_check : 1;
+		u32 dir_crd_req_active_busy : 1;
+		u32 rsvd1 : 1;
+		u32 ldb_crd_req_active_valid : 1;
+		u32 ldb_crd_req_active_check : 1;
+		u32 ldb_crd_req_active_busy : 1;
+		u32 rsvd0 : 1;
+		u32 no_pp_credit_update : 1;
+		u32 crd_req_state : 23;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_DEPTH(x) \
+	(0xa0000320 + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_DEPTH_RST 0x0
+union dlb_chp_ldb_cq_depth {
+	struct {
+		u32 depth : 11;
+		u32 reserved : 2;
+		u32 rsvd0 : 19;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_WPTR(x) \
+	(0xa000031c + (x) * 0x1000)
+#define DLB_CHP_LDB_CQ_WPTR_RST 0x0
+union dlb_chp_ldb_cq_wptr {
+	struct {
+		u32 write_pointer : 10;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_LDB_PUSH_PTR(x) \
+	(0xa0000318 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST 0x0
+union dlb_chp_ldb_pp_ldb_push_ptr {
+	struct {
+		u32 push_pointer : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_DIR_PUSH_PTR(x) \
+	(0xa0000314 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST 0x0
+union dlb_chp_ldb_pp_dir_push_ptr {
+	struct {
+		u32 push_pointer : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_HIST_LIST_POP_PTR(x) \
+	(0xa000030c + (x) * 0x1000)
+#define DLB_CHP_HIST_LIST_POP_PTR_RST 0x0
+union dlb_chp_hist_list_pop_ptr {
+	struct {
+		u32 pop_ptr : 13;
+		u32 generation : 1;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_HIST_LIST_PUSH_PTR(x) \
+	(0xa0000308 + (x) * 0x1000)
+#define DLB_CHP_HIST_LIST_PUSH_PTR_RST 0x0
+union dlb_chp_hist_list_push_ptr {
+	struct {
+		u32 push_ptr : 13;
+		u32 generation : 1;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_STATE_RESET(x) \
+	(0xa0000304 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_STATE_RESET_RST 0x0
+union dlb_chp_ldb_pp_state_reset {
+	struct {
+		u32 rsvd1 : 7;
+		u32 dir_type : 1;
+		u32 rsvd0 : 23;
+		u32 reset_pp_state : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_PP_CRD_REQ_STATE(x) \
+	(0xa0000300 + (x) * 0x1000)
+#define DLB_CHP_LDB_PP_CRD_REQ_STATE_RST 0x0
+union dlb_chp_ldb_pp_crd_req_state {
+	struct {
+		u32 dir_crd_req_active_valid : 1;
+		u32 dir_crd_req_active_check : 1;
+		u32 dir_crd_req_active_busy : 1;
+		u32 rsvd1 : 1;
+		u32 ldb_crd_req_active_valid : 1;
+		u32 ldb_crd_req_active_check : 1;
+		u32 ldb_crd_req_active_busy : 1;
+		u32 rsvd0 : 1;
+		u32 no_pp_credit_update : 1;
+		u32 crd_req_state : 23;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_ORD_QID_SN(x) \
+	(0xa0000408 + (x) * 0x1000)
+#define DLB_CHP_ORD_QID_SN_RST 0x0
+union dlb_chp_ord_qid_sn {
+	struct {
+		u32 sn : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_ORD_QID_SN_MAP(x) \
+	(0xa0000404 + (x) * 0x1000)
+#define DLB_CHP_ORD_QID_SN_MAP_RST 0x0
+union dlb_chp_ord_qid_sn_map {
+	struct {
+		u32 mode : 3;
+		u32 slot : 5;
+		u32 grp : 2;
+		u32 rsvd0 : 22;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_POOL_CRD_CNT(x) \
+	(0xa000050c + (x) * 0x1000)
+#define DLB_CHP_LDB_POOL_CRD_CNT_RST 0x0
+union dlb_chp_ldb_pool_crd_cnt {
+	struct {
+		u32 count : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_QED_FL_BASE(x) \
+	(0xa0000508 + (x) * 0x1000)
+#define DLB_CHP_QED_FL_BASE_RST 0x0
+union dlb_chp_qed_fl_base {
+	struct {
+		u32 base : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_QED_FL_LIM(x) \
+	(0xa0000504 + (x) * 0x1000)
+#define DLB_CHP_QED_FL_LIM_RST 0x8000
+union dlb_chp_qed_fl_lim {
+	struct {
+		u32 limit : 14;
+		u32 rsvd1 : 1;
+		u32 freelist_disable : 1;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_POOL_CRD_LIM(x) \
+	(0xa0000500 + (x) * 0x1000)
+#define DLB_CHP_LDB_POOL_CRD_LIM_RST 0x0
+union dlb_chp_ldb_pool_crd_lim {
+	struct {
+		u32 limit : 16;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_QED_FL_POP_PTR(x) \
+	(0xa0000604 + (x) * 0x1000)
+#define DLB_CHP_QED_FL_POP_PTR_RST 0x0
+union dlb_chp_qed_fl_pop_ptr {
+	struct {
+		u32 pop_ptr : 14;
+		u32 reserved0 : 1;
+		u32 generation : 1;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_QED_FL_PUSH_PTR(x) \
+	(0xa0000600 + (x) * 0x1000)
+#define DLB_CHP_QED_FL_PUSH_PTR_RST 0x0
+union dlb_chp_qed_fl_push_ptr {
+	struct {
+		u32 push_ptr : 14;
+		u32 reserved0 : 1;
+		u32 generation : 1;
+		u32 rsvd0 : 16;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_POOL_CRD_CNT(x) \
+	(0xa000070c + (x) * 0x1000)
+#define DLB_CHP_DIR_POOL_CRD_CNT_RST 0x0
+union dlb_chp_dir_pool_crd_cnt {
+	struct {
+		u32 count : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DQED_FL_BASE(x) \
+	(0xa0000708 + (x) * 0x1000)
+#define DLB_CHP_DQED_FL_BASE_RST 0x0
+union dlb_chp_dqed_fl_base {
+	struct {
+		u32 base : 12;
+		u32 rsvd0 : 20;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DQED_FL_LIM(x) \
+	(0xa0000704 + (x) * 0x1000)
+#define DLB_CHP_DQED_FL_LIM_RST 0x2000
+union dlb_chp_dqed_fl_lim {
+	struct {
+		u32 limit : 12;
+		u32 rsvd1 : 1;
+		u32 freelist_disable : 1;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_POOL_CRD_LIM(x) \
+	(0xa0000700 + (x) * 0x1000)
+#define DLB_CHP_DIR_POOL_CRD_LIM_RST 0x0
+union dlb_chp_dir_pool_crd_lim {
+	struct {
+		u32 limit : 14;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DQED_FL_POP_PTR(x) \
+	(0xa0000804 + (x) * 0x1000)
+#define DLB_CHP_DQED_FL_POP_PTR_RST 0x0
+union dlb_chp_dqed_fl_pop_ptr {
+	struct {
+		u32 pop_ptr : 12;
+		u32 reserved0 : 1;
+		u32 generation : 1;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DQED_FL_PUSH_PTR(x) \
+	(0xa0000800 + (x) * 0x1000)
+#define DLB_CHP_DQED_FL_PUSH_PTR_RST 0x0
+union dlb_chp_dqed_fl_push_ptr {
+	struct {
+		u32 push_ptr : 12;
+		u32 reserved0 : 1;
+		u32 generation : 1;
+		u32 rsvd0 : 18;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_CTRL_DIAG_02 0xa8000154
+#define DLB_CHP_CTRL_DIAG_02_RST 0x0
+union dlb_chp_ctrl_diag_02 {
+	struct {
+		u32 control : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_CFG_CHP_CSR_CTRL 0xa8000130
+#define DLB_CHP_CFG_CHP_CSR_CTRL_RST 0xc0003fff
+#define DLB_CHP_CFG_EXCESS_TOKENS_SHIFT 12
+union dlb_chp_cfg_chp_csr_ctrl {
+	struct {
+		u32 int_inf_alarm_enable_0 : 1;
+		u32 int_inf_alarm_enable_1 : 1;
+		u32 int_inf_alarm_enable_2 : 1;
+		u32 int_inf_alarm_enable_3 : 1;
+		u32 int_inf_alarm_enable_4 : 1;
+		u32 int_inf_alarm_enable_5 : 1;
+		u32 int_inf_alarm_enable_6 : 1;
+		u32 int_inf_alarm_enable_7 : 1;
+		u32 int_inf_alarm_enable_8 : 1;
+		u32 int_inf_alarm_enable_9 : 1;
+		u32 int_inf_alarm_enable_10 : 1;
+		u32 int_inf_alarm_enable_11 : 1;
+		u32 int_inf_alarm_enable_12 : 1;
+		u32 int_cor_alarm_enable : 1;
+		u32 csr_control_spare : 14;
+		u32 cfg_vasr_dis : 1;
+		u32 counter_clear : 1;
+		u32 blk_cor_report : 1;
+		u32 blk_cor_synd : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_INTR_ARMED1 0xa8000068
+#define DLB_CHP_LDB_CQ_INTR_ARMED1_RST 0x0
+union dlb_chp_ldb_cq_intr_armed1 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_LDB_CQ_INTR_ARMED0 0xa8000064
+#define DLB_CHP_LDB_CQ_INTR_ARMED0_RST 0x0
+union dlb_chp_ldb_cq_intr_armed0 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INTR_ARMED3 0xa8000024
+#define DLB_CHP_DIR_CQ_INTR_ARMED3_RST 0x0
+union dlb_chp_dir_cq_intr_armed3 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INTR_ARMED2 0xa8000020
+#define DLB_CHP_DIR_CQ_INTR_ARMED2_RST 0x0
+union dlb_chp_dir_cq_intr_armed2 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INTR_ARMED1 0xa800001c
+#define DLB_CHP_DIR_CQ_INTR_ARMED1_RST 0x0
+union dlb_chp_dir_cq_intr_armed1 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CHP_DIR_CQ_INTR_ARMED0 0xa8000018
+#define DLB_CHP_DIR_CQ_INTR_ARMED0_RST 0x0
+union dlb_chp_dir_cq_intr_armed0 {
+	struct {
+		u32 armed : 32;
+	} field;
+	u32 val;
+};
+
+#define DLB_CFG_MSTR_DIAG_RESET_STS 0xb8000004
+#define DLB_CFG_MSTR_DIAG_RESET_STS_RST 0x1ff
+union dlb_cfg_mstr_diag_reset_sts {
+	struct {
+		u32 chp_pf_reset_done : 1;
+		u32 rop_pf_reset_done : 1;
+		u32 lsp_pf_reset_done : 1;
+		u32 nalb_pf_reset_done : 1;
+		u32 ap_pf_reset_done : 1;
+		u32 dp_pf_reset_done : 1;
+		u32 qed_pf_reset_done : 1;
+		u32 dqed_pf_reset_done : 1;
+		u32 aqed_pf_reset_done : 1;
+		u32 rsvd1 : 6;
+		u32 pf_reset_active : 1;
+		u32 chp_vf_reset_done : 1;
+		u32 rop_vf_reset_done : 1;
+		u32 lsp_vf_reset_done : 1;
+		u32 nalb_vf_reset_done : 1;
+		u32 ap_vf_reset_done : 1;
+		u32 dp_vf_reset_done : 1;
+		u32 qed_vf_reset_done : 1;
+		u32 dqed_vf_reset_done : 1;
+		u32 aqed_vf_reset_done : 1;
+		u32 rsvd0 : 6;
+		u32 vf_reset_active : 1;
+	} field;
+	u32 val;
+};
+
+#define DLB_CFG_MSTR_BCAST_RESET_VF_START 0xc8100000
+#define DLB_CFG_MSTR_BCAST_RESET_VF_START_RST 0x0
+/* HW Reset Types */
+#define VF_RST_TYPE_CQ_LDB   0
+#define VF_RST_TYPE_QID_LDB  1
+#define VF_RST_TYPE_POOL_LDB 2
+#define VF_RST_TYPE_CQ_DIR   8
+#define VF_RST_TYPE_QID_DIR  9
+#define VF_RST_TYPE_POOL_DIR 10
+union dlb_cfg_mstr_bcast_reset_vf_start {
+	struct {
+		u32 vf_reset_start : 1;
+		u32 reserved : 3;
+		u32 vf_reset_type : 4;
+		u32 vf_reset_id : 24;
+	} field;
+	u32 val;
+};
+
+#endif /* __DLB_REGS_H */
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
new file mode 100644
index 0000000..9c4267b
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_resource.c
@@ -0,0 +1,302 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include "dlb_hw_types.h"
+#include "../../dlb_user.h"
+#include "dlb_resource.h"
+#include "dlb_osdep.h"
+#include "dlb_osdep_bitmap.h"
+#include "dlb_osdep_types.h"
+#include "dlb_regs.h"
+
+void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
+{
+	union dlb_dp_dir_csr_ctrl r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
+
+	r0.field.cfg_vasr_dis = 1;
+
+	DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
+}
+
+void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
+{
+	union dlb_chp_cfg_chp_csr_ctrl r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
+
+	r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
+
+	DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
+}
+
+void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
+{
+	union dlb_sys_cq_mode r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
+
+	r0.field.ldb_cq64 = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
+}
+
+void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
+{
+	union dlb_sys_cq_mode r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
+
+	r0.field.dir_cq64 = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
+}
+
+void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
+{
+	union dlb_sys_sys_alarm_int_enable r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
+
+	r0.field.pf_to_vf_isr_pend_error = 0;
+
+	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
+}
+
+void dlb_hw_get_num_resources(struct dlb_hw *hw,
+			      struct dlb_get_num_resources_args *arg)
+{
+	struct dlb_function_resources *rsrcs;
+	struct dlb_bitmap *map;
+
+	rsrcs = &hw->pf;
+
+	arg->num_sched_domains = rsrcs->num_avail_domains;
+
+	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
+
+	arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
+
+	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
+
+	map = rsrcs->avail_aqed_freelist_entries;
+
+	arg->num_atomic_inflights = dlb_bitmap_count(map);
+
+	arg->max_contiguous_atomic_inflights =
+		dlb_bitmap_longest_set_range(map);
+
+	map = rsrcs->avail_hist_list_entries;
+
+	arg->num_hist_list_entries = dlb_bitmap_count(map);
+
+	arg->max_contiguous_hist_list_entries =
+		dlb_bitmap_longest_set_range(map);
+
+	map = rsrcs->avail_qed_freelist_entries;
+
+	arg->num_ldb_credits = dlb_bitmap_count(map);
+
+	arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
+
+	map = rsrcs->avail_dqed_freelist_entries;
+
+	arg->num_dir_credits = dlb_bitmap_count(map);
+
+	arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
+
+	arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
+
+	arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
+}
+
+static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
+{
+	dlb_list_init_head(&rsrc->avail_domains);
+	dlb_list_init_head(&rsrc->used_domains);
+	dlb_list_init_head(&rsrc->avail_ldb_queues);
+	dlb_list_init_head(&rsrc->avail_ldb_ports);
+	dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
+	dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
+	dlb_list_init_head(&rsrc->avail_dir_credit_pools);
+}
+
+static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
+{
+	dlb_list_init_head(&domain->used_ldb_queues);
+	dlb_list_init_head(&domain->used_ldb_ports);
+	dlb_list_init_head(&domain->used_dir_pq_pairs);
+	dlb_list_init_head(&domain->used_ldb_credit_pools);
+	dlb_list_init_head(&domain->used_dir_credit_pools);
+	dlb_list_init_head(&domain->avail_ldb_queues);
+	dlb_list_init_head(&domain->avail_ldb_ports);
+	dlb_list_init_head(&domain->avail_dir_pq_pairs);
+	dlb_list_init_head(&domain->avail_ldb_credit_pools);
+	dlb_list_init_head(&domain->avail_dir_credit_pools);
+}
+
+int dlb_resource_init(struct dlb_hw *hw)
+{
+	struct dlb_list_entry *list;
+	unsigned int i;
+
+	/* For optimal load-balancing, ports that map to one or more QIDs in
+	 * common should not be in numerical sequence. This is application
+	 * dependent, but the driver interleaves port IDs as much as possible
+	 * to reduce the likelihood of this. This initial allocation maximizes
+	 * the average distance between an ID and its immediate neighbors (i.e.
+	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
+	 * 3, etc.).
+	 */
+	u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
+		0,  31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
+		48, 15, 46, 13, 44, 11, 42,  9, 40,  7, 38,  5, 36,  3, 34, 1,
+		32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
+		16, 47, 14, 45, 12, 43, 10, 41,  8, 39,  6, 37,  4, 35,  2, 33
+	};
+
+	/* Zero-out resource tracking data structures */
+	memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
+	memset(&hw->pf, 0, sizeof(hw->pf));
+
+	dlb_init_fn_rsrc_lists(&hw->pf);
+
+	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
+		memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
+		dlb_init_domain_rsrc_lists(&hw->domains[i]);
+		hw->domains[i].parent_func = &hw->pf;
+	}
+
+	/* Give all resources to the PF driver */
+	hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
+	for (i = 0; i < hw->pf.num_avail_domains; i++) {
+		list = &hw->domains[i].func_list;
+
+		dlb_list_add(&hw->pf.avail_domains, list);
+	}
+
+	hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
+	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
+		list = &hw->rsrcs.ldb_queues[i].func_list;
+
+		dlb_list_add(&hw->pf.avail_ldb_queues, list);
+	}
+
+	hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
+	for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
+		struct dlb_ldb_port *port;
+
+		port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
+
+		dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
+	}
+
+	hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
+	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
+		list = &hw->rsrcs.dir_pq_pairs[i].func_list;
+
+		dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
+	}
+
+	hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
+	for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
+		list = &hw->rsrcs.ldb_credit_pools[i].func_list;
+
+		dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
+	}
+
+	hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
+	for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
+		list = &hw->rsrcs.dir_credit_pools[i].func_list;
+
+		dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
+	}
+
+	/* There are 5120 history list entries, which allows us to overprovision
+	 * the inflight limit (4096) by 1k.
+	 */
+	if (dlb_bitmap_alloc(hw,
+			     &hw->pf.avail_hist_list_entries,
+			     DLB_MAX_NUM_HIST_LIST_ENTRIES))
+		return -1;
+
+	if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
+		return -1;
+
+	if (dlb_bitmap_alloc(hw,
+			     &hw->pf.avail_qed_freelist_entries,
+			     DLB_MAX_NUM_LDB_CREDITS))
+		return -1;
+
+	if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
+		return -1;
+
+	if (dlb_bitmap_alloc(hw,
+			     &hw->pf.avail_dqed_freelist_entries,
+			     DLB_MAX_NUM_DIR_CREDITS))
+		return -1;
+
+	if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
+		return -1;
+
+	if (dlb_bitmap_alloc(hw,
+			     &hw->pf.avail_aqed_freelist_entries,
+			     DLB_MAX_NUM_AQOS_ENTRIES))
+		return -1;
+
+	if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
+		return -1;
+
+	/* Initialize the hardware resource IDs */
+	for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
+		hw->domains[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
+		hw->rsrcs.ldb_queues[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
+		hw->rsrcs.ldb_ports[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
+		hw->rsrcs.dir_pq_pairs[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
+		hw->rsrcs.ldb_credit_pools[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
+		hw->rsrcs.dir_credit_pools[i].id = i;
+
+	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
+		hw->rsrcs.sn_groups[i].id = i;
+		/* Default mode (0) is 32 sequence numbers per queue */
+		hw->rsrcs.sn_groups[i].mode = 0;
+		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
+		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
+	}
+
+	return 0;
+}
+
+void dlb_resource_free(struct dlb_hw *hw)
+{
+	dlb_bitmap_free(hw->pf.avail_hist_list_entries);
+
+	dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
+
+	dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
+
+	dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
+}
+
+void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
+{
+	union dlb_sys_sys_alarm_int_enable r0;
+
+	r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
+
+	r0.field.vf_to_pf_isr_pend_error = 0;
+
+	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
+}
diff --git a/drivers/event/dlb/pf/base/dlb_resource.h b/drivers/event/dlb/pf/base/dlb_resource.h
new file mode 100644
index 0000000..4f48b73
--- /dev/null
+++ b/drivers/event/dlb/pf/base/dlb_resource.h
@@ -0,0 +1,876 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_RESOURCE_H
+#define __DLB_RESOURCE_H
+
+#include "dlb_hw_types.h"
+#include "dlb_osdep_types.h"
+
+/**
+ * dlb_resource_init() - initialize the device
+ * @hw: pointer to struct dlb_hw.
+ *
+ * This function initializes the device's software state (pointed to by the hw
+ * argument) and programs global scheduling QoS registers. This function should
+ * be called during driver initialization.
+ *
+ * The dlb_hw struct must be unique per DLB device and persist until the device
+ * is reset.
+ *
+ * Return:
+ * Returns 0 upon success, -1 otherwise.
+ */
+int dlb_resource_init(struct dlb_hw *hw);
+
+/**
+ * dlb_resource_free() - free device state memory
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function frees software state pointed to by dlb_hw. This function
+ * should be called when resetting the device or unloading the driver.
+ */
+void dlb_resource_free(struct dlb_hw *hw);
+
+/**
+ * dlb_resource_reset() - reset in-use resources to their initial state
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function resets in-use resources, and makes them available for use.
+ */
+void dlb_resource_reset(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_create_sched_domain() - create a scheduling domain
+ * @hw: dlb_hw handle for a particular device.
+ * @args: scheduling domain creation arguments.
+ * @resp: response structure.
+ *
+ * This function creates a scheduling domain containing the resources specified
+ * in args. The individual resources (queues, ports, credit pools) can be
+ * configured after creating a scheduling domain.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the domain ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, or the requested domain name
+ *	    is already in use.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_sched_domain(struct dlb_hw *hw,
+			       struct dlb_create_sched_domain_args *args,
+			       struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_ldb_pool() - create a load-balanced credit pool
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: credit pool creation arguments.
+ * @resp: response structure.
+ *
+ * This function creates a load-balanced credit pool containing the number of
+ * requested credits.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the pool ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, the domain is not configured,
+ *	    or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_ldb_pool_args *args,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_dir_pool() - create a directed credit pool
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: credit pool creation arguments.
+ * @resp: response structure.
+ *
+ * This function creates a directed credit pool containing the number of
+ * requested credits.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the pool ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, the domain is not configured,
+ *	    or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_dir_pool(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_dir_pool_args *args,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_ldb_queue() - create a load-balanced queue
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: queue creation arguments.
+ * @resp: response structure.
+ *
+ * This function creates a load-balanced queue.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the queue ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, the domain is not configured,
+ *	    the domain has already been started, or the requested queue name is
+ *	    already in use.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_create_ldb_queue_args *args,
+			    struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_dir_queue() - create a directed queue
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: queue creation arguments.
+ * @resp: response structure.
+ *
+ * This function creates a directed queue.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the queue ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, the domain is not configured,
+ *	    or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_dir_queue(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_create_dir_queue_args *args,
+			    struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_dir_port() - create a directed port
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port creation arguments.
+ * @pop_count_dma_base: base address of the pop count memory. This can be
+ *			a PA or an IOVA.
+ * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
+ * @resp: response structure.
+ *
+ * This function creates a directed port.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the port ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
+ *	    pool ID is invalid, a pointer address is not properly aligned, the
+ *	    domain is not configured, or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_dir_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_dir_port_args *args,
+			   u64 pop_count_dma_base,
+			   u64 cq_dma_base,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_create_ldb_port() - create a load-balanced port
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port creation arguments.
+ * @pop_count_dma_base: base address of the pop count memory. This can be
+ *			 a PA or an IOVA.
+ * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
+ * @resp: response structure.
+ *
+ * This function creates a load-balanced port.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the port ID.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
+ *	    pool ID is invalid, a pointer address is not properly aligned, the
+ *	    domain is not configured, or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_create_ldb_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_ldb_port_args *args,
+			   u64 pop_count_dma_base,
+			   u64 cq_dma_base,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_start_domain() - start a scheduling domain
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: start domain arguments.
+ * @resp: response structure.
+ *
+ * This function starts a scheduling domain, which allows applications to send
+ * traffic through it. Once a domain is started, its resources can no longer be
+ * configured (besides QID remapping and port enable/disable).
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - the domain is not configured, or the domain is already started.
+ */
+int dlb_hw_start_domain(struct dlb_hw *hw,
+			u32 domain_id,
+			struct dlb_start_domain_args *args,
+			struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_map_qid() - map a load-balanced queue to a load-balanced port
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: map QID arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to schedule QEs from the specified queue to
+ * the specified port. Each load-balanced port can be mapped to up to 8 queues;
+ * each load-balanced queue can potentially map to all the load-balanced ports.
+ *
+ * A successful return does not necessarily mean the mapping was configured. If
+ * this function is unable to immediately map the queue to the port, it will
+ * add the requested operation to a per-port list of pending map/unmap
+ * operations, and (if it's not already running) launch a kernel thread that
+ * periodically attempts to process all pending operations. In a sense, this is
+ * an asynchronous function.
+ *
+ * This asynchronicity creates two views of the state of hardware: the actual
+ * hardware state and the requested state (as if every request completed
+ * immediately). If there are any pending map/unmap operations, the requested
+ * state will differ from the actual state. All validation is performed with
+ * respect to the pending state; for instance, if there are 8 pending map
+ * operations for port X, a request for a 9th will fail because a load-balanced
+ * port can only map up to 8 queues.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
+ *	    the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_map_qid(struct dlb_hw *hw,
+		   u32 domain_id,
+		   struct dlb_map_qid_args *args,
+		   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: unmap QID arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to stop scheduling QEs from the specified
+ * queue to the specified port.
+ *
+ * A successful return does not necessarily mean the mapping was removed. If
+ * this function is unable to immediately unmap the queue from the port, it
+ * will add the requested operation to a per-port list of pending map/unmap
+ * operations, and (if it's not already running) launch a kernel thread that
+ * periodically attempts to process all pending operations. See
+ * dlb_hw_map_qid() for more details.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
+ *	    the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_unmap_qid(struct dlb_hw *hw,
+		     u32 domain_id,
+		     struct dlb_unmap_qid_args *args,
+		     struct dlb_cmd_response *resp);
+
+/**
+ * dlb_finish_unmap_qid_procedures() - finish any pending unmap procedures
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function attempts to finish any outstanding unmap procedures.
+ * This function should be called by the kernel thread responsible for
+ * finishing map/unmap procedures.
+ *
+ * Return:
+ * Returns the number of procedures that weren't completed.
+ */
+unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw);
+
+/**
+ * dlb_finish_map_qid_procedures() - finish any pending map procedures
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function attempts to finish any outstanding map procedures.
+ * This function should be called by the kernel thread responsible for
+ * finishing map/unmap procedures.
+ *
+ * Return:
+ * Returns the number of procedures that weren't completed.
+ */
+unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_enable_ldb_port() - enable a load-balanced port for scheduling
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port enable arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to schedule QEs to a load-balanced port.
+ * Ports are enabled by default.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid or the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_enable_ldb_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_enable_ldb_port_args *args,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_disable_ldb_port() - disable a load-balanced port for scheduling
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port disable arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to stop scheduling QEs to a load-balanced
+ * port. Ports are enabled by default.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid or the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_disable_ldb_port(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_disable_ldb_port_args *args,
+			    struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_enable_dir_port() - enable a directed port for scheduling
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port enable arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to schedule QEs to a directed port.
+ * Ports are enabled by default.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid or the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_enable_dir_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_enable_dir_port_args *args,
+			   struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_disable_dir_port() - disable a directed port for scheduling
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port disable arguments.
+ * @resp: response structure.
+ *
+ * This function configures the DLB to stop scheduling QEs to a directed port.
+ * Ports are enabled by default.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid or the domain is not configured.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb_hw_disable_dir_port(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_disable_dir_port_args *args,
+			    struct dlb_cmd_response *resp);
+
+/**
+ * dlb_configure_ldb_cq_interrupt() - configure load-balanced CQ for interrupts
+ * @hw: dlb_hw handle for a particular device.
+ * @port_id: load-balancd port ID.
+ * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
+ *	    else a value up to 64.
+ * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
+ * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
+ *	greater than 0.
+ *
+ * This function configures the DLB registers for load-balanced CQ's interrupts.
+ * This doesn't enable the CQ's interrupt; that can be done with
+ * dlb_arm_cq_interrupt() or through an interrupt arm QE.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid.
+ */
+int dlb_configure_ldb_cq_interrupt(struct dlb_hw *hw,
+				   int port_id,
+				   int vector,
+				   int mode,
+				   u16 threshold);
+
+/**
+ * dlb_configure_dir_cq_interrupt() - configure directed CQ for interrupts
+ * @hw: dlb_hw handle for a particular device.
+ * @port_id: load-balancd port ID.
+ * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
+ *	    else a value up to 64.
+ * @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
+ * @threshold: the minimum CQ depth at which the interrupt can fire. Must be
+ *	greater than 0.
+ *
+ * This function configures the DLB registers for directed CQ's interrupts.
+ * This doesn't enable the CQ's interrupt; that can be done with
+ * dlb_arm_cq_interrupt() or through an interrupt arm QE.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise.
+ *
+ * Errors:
+ * EINVAL - The port ID is invalid.
+ */
+int dlb_configure_dir_cq_interrupt(struct dlb_hw *hw,
+				   int port_id,
+				   int vector,
+				   int mode,
+				   u16 threshold);
+
+/**
+ * dlb_enable_alarm_interrupts() - enable certain hardware alarm interrupts
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function configures the ingress error alarm. (Other alarms are enabled
+ * by default.)
+ */
+void dlb_enable_alarm_interrupts(struct dlb_hw *hw);
+
+/**
+ * dlb_disable_alarm_interrupts() - disable certain hardware alarm interrupts
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function configures the ingress error alarm. (Other alarms are disabled
+ * by default.)
+ */
+void dlb_disable_alarm_interrupts(struct dlb_hw *hw);
+
+/**
+ * dlb_set_msix_mode() - enable certain hardware alarm interrupts
+ * @hw: dlb_hw handle for a particular device.
+ * @mode: MSI-X mode (DLB_MSIX_MODE_PACKED or DLB_MSIX_MODE_COMPRESSED)
+ *
+ * This function configures the hardware to use either packed or compressed
+ * mode. This function should not be called if using MSI interrupts.
+ */
+void dlb_set_msix_mode(struct dlb_hw *hw, int mode);
+
+/**
+ * dlb_arm_cq_interrupt() - arm a CQ's interrupt
+ * @hw: dlb_hw handle for a particular device.
+ * @port_id: port ID
+ * @is_ldb: true for load-balanced port, false for a directed port
+ *
+ * This function arms the CQ's interrupt. The CQ must be configured prior to
+ * calling this function.
+ *
+ * The function does no parameter validation; that is the caller's
+ * responsibility.
+ *
+ * Return: returns 0 upon success, <0 otherwise.
+ *
+ * EINVAL - Invalid port ID.
+ */
+int dlb_arm_cq_interrupt(struct dlb_hw *hw, int port_id, bool is_ldb);
+
+/**
+ * dlb_read_compressed_cq_intr_status() - read compressed CQ interrupt status
+ * @hw: dlb_hw handle for a particular device.
+ * @ldb_interrupts: 2-entry array of u32 bitmaps
+ * @dir_interrupts: 4-entry array of u32 bitmaps
+ *
+ * This function can be called from a compressed CQ interrupt handler to
+ * determine which CQ interrupts have fired. The caller should take appropriate
+ * (such as waking threads blocked on a CQ's interrupt) then ack the interrupts
+ * with dlb_ack_compressed_cq_intr().
+ */
+void dlb_read_compressed_cq_intr_status(struct dlb_hw *hw,
+					u32 *ldb_interrupts,
+					u32 *dir_interrupts);
+
+/**
+ * dlb_ack_compressed_cq_intr_status() - ack compressed CQ interrupts
+ * @hw: dlb_hw handle for a particular device.
+ * @ldb_interrupts: 2-entry array of u32 bitmaps
+ * @dir_interrupts: 4-entry array of u32 bitmaps
+ *
+ * This function ACKs compressed CQ interrupts. Its arguments should be the
+ * same ones passed to dlb_read_compressed_cq_intr_status().
+ */
+void dlb_ack_compressed_cq_intr(struct dlb_hw *hw,
+				u32 *ldb_interrupts,
+				u32 *dir_interrupts);
+
+/**
+ * dlb_process_alarm_interrupt() - process an alarm interrupt
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function reads the alarm syndrome, logs its, and acks the interrupt.
+ * This function should be called from the alarm interrupt handler when
+ * interrupt vector DLB_INT_ALARM fires.
+ */
+void dlb_process_alarm_interrupt(struct dlb_hw *hw);
+
+/**
+ * dlb_process_ingress_error_interrupt() - process ingress error interrupts
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function reads the alarm syndrome, logs it, notifies user-space, and
+ * acks the interrupt. This function should be called from the alarm interrupt
+ * handler when interrupt vector DLB_INT_INGRESS_ERROR fires.
+ */
+void dlb_process_ingress_error_interrupt(struct dlb_hw *hw);
+
+/**
+ * dlb_get_group_sequence_numbers() - return a group's number of SNs per queue
+ * @hw: dlb_hw handle for a particular device.
+ * @group_id: sequence number group ID.
+ *
+ * This function returns the configured number of sequence numbers per queue
+ * for the specified group.
+ *
+ * Return:
+ * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
+ */
+int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id);
+
+/**
+ * dlb_get_group_sequence_number_occupancy() - return a group's in-use slots
+ * @hw: dlb_hw handle for a particular device.
+ * @group_id: sequence number group ID.
+ *
+ * This function returns the group's number of in-use slots (i.e. load-balanced
+ * queues using the specified group).
+ *
+ * Return:
+ * Returns -EINVAL if group_id is invalid, else the group's occupancy.
+ */
+int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
+					    unsigned int group_id);
+
+/**
+ * dlb_set_group_sequence_numbers() - assign a group's number of SNs per queue
+ * @hw: dlb_hw handle for a particular device.
+ * @group_id: sequence number group ID.
+ * @val: requested amount of sequence numbers per queue.
+ *
+ * This function configures the group's number of sequence numbers per queue.
+ * val can be a power-of-two between 32 and 1024, inclusive. This setting can
+ * be configured until the first ordered load-balanced queue is configured, at
+ * which point the configuration is locked.
+ *
+ * Return:
+ * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
+ * ordered queue is configured.
+ */
+int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
+				   unsigned int group_id,
+				   unsigned long val);
+
+/**
+ * dlb_reset_domain() - reset a scheduling domain
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ *
+ * This function resets and frees a DLB scheduling domain and its associated
+ * resources.
+ *
+ * Pre-condition: the driver must ensure software has stopped sending QEs
+ * through this domain's producer ports before invoking this function, or
+ * undefined behavior will result.
+ *
+ * Return:
+ * Returns 0 upon success, -1 otherwise.
+ *
+ * EINVAL - Invalid domain ID, or the domain is not configured.
+ * EFAULT - Internal error. (Possibly caused if software is the pre-condition
+ *	    is not met.)
+ * ETIMEDOUT - Hardware component didn't reset in the expected time.
+ */
+int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id);
+
+/**
+ * dlb_ldb_port_owned_by_domain() - query whether a port is owned by a domain
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @port_id: port ID.
+ *
+ * This function returns whether a load-balanced port is owned by a specified
+ * domain.
+ *
+ * Return:
+ * Returns 0 if false, 1 if true, <0 otherwise.
+ *
+ * EINVAL - Invalid domain or port ID, or the domain is not configured.
+ */
+int dlb_ldb_port_owned_by_domain(struct dlb_hw *hw,
+				 u32 domain_id,
+				 u32 port_id);
+
+/**
+ * dlb_dir_port_owned_by_domain() - query whether a port is owned by a domain
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @port_id: port ID.
+ *
+ * This function returns whether a directed port is owned by a specified
+ * domain.
+ *
+ * Return:
+ * Returns 0 if false, 1 if true, <0 otherwise.
+ *
+ * EINVAL - Invalid domain or port ID, or the domain is not configured.
+ */
+int dlb_dir_port_owned_by_domain(struct dlb_hw *hw,
+				 u32 domain_id,
+				 u32 port_id);
+
+/**
+ * dlb_hw_get_num_resources() - query the PCI function's available resources
+ * @arg: pointer to resource counts.
+ *
+ * This function returns the number of available resources for the PF.
+ */
+void dlb_hw_get_num_resources(struct dlb_hw *hw,
+			      struct dlb_get_num_resources_args *arg);
+
+/**
+ * dlb_hw_get_num_used_resources() - query the PCI function's used resources
+ * @arg: pointer to resource counts.
+ *
+ * This function returns the number of resources in use by the PF. It fills in
+ * the fields that args points to, except the following:
+ * - max_contiguous_atomic_inflights
+ * - max_contiguous_hist_list_entries
+ * - max_contiguous_ldb_credits
+ * - max_contiguous_dir_credits
+ */
+void dlb_hw_get_num_used_resources(struct dlb_hw *hw,
+				   struct dlb_get_num_resources_args *arg);
+
+/**
+ * dlb_disable_dp_vasr_feature() - disable directed pipe VAS reset hardware
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function disables certain hardware in the directed pipe,
+ * necessary to workaround a DLB VAS reset issue.
+ */
+void dlb_disable_dp_vasr_feature(struct dlb_hw *hw);
+
+/**
+ * dlb_enable_excess_tokens_alarm() - enable interrupts for the excess token
+ * pop alarm
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function enables the PF ingress error alarm interrupt to fire when an
+ * excess token pop occurs.
+ */
+void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw);
+
+/**
+ * dlb_disable_excess_tokens_alarm() - disable interrupts for the excess token
+ * pop alarm
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function disables the PF ingress error alarm interrupt to fire when an
+ * excess token pop occurs.
+ */
+void dlb_disable_excess_tokens_alarm(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: queue depth args
+ *
+ * This function returns the depth of a load-balanced queue.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the depth.
+ *
+ * Errors:
+ * EINVAL - Invalid domain ID or queue ID.
+ */
+int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
+			       u32 domain_id,
+			       struct dlb_get_ldb_queue_depth_args *args,
+			       struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_get_dir_queue_depth() - returns the depth of a directed queue
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: queue depth args
+ *
+ * This function returns the depth of a directed queue.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the depth.
+ *
+ * Errors:
+ * EINVAL - Invalid domain ID or queue ID.
+ */
+int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
+			       u32 domain_id,
+			       struct dlb_get_dir_queue_depth_args *args,
+			       struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_pending_port_unmaps() - returns the number of unmap operations in
+ *	progress for a load-balanced port.
+ * @hw: dlb_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: number of unmaps in progress args
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb_error. If successful, resp->id
+ * contains the number of unmaps in progress.
+ *
+ * Errors:
+ * EINVAL - Invalid port ID.
+ */
+int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
+			       u32 domain_id,
+			       struct dlb_pending_port_unmaps_args *args,
+			       struct dlb_cmd_response *resp);
+
+/**
+ * dlb_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
+ *	ports.
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function must be called prior to configuring scheduling domains.
+ */
+void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports
+ * @hw: dlb_hw handle for a particular device.
+ *
+ * This function must be called prior to configuring scheduling domains.
+ */
+void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_set_qe_arbiter_weights() - program QE arbiter weights
+ * @hw: dlb_hw handle for a particular device.
+ * @weight: 8-entry array of arbiter weights.
+ *
+ * weight[N] programs priority N's weight. In cases where the 8 priorities are
+ * reduced to 4 bins, the mapping is:
+ * - weight[1] programs bin 0
+ * - weight[3] programs bin 1
+ * - weight[5] programs bin 2
+ * - weight[7] programs bin 3
+ */
+void dlb_hw_set_qe_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
+
+/**
+ * dlb_hw_set_qid_arbiter_weights() - program QID arbiter weights
+ * @hw: dlb_hw handle for a particular device.
+ * @weight: 8-entry array of arbiter weights.
+ *
+ * weight[N] programs priority N's weight. In cases where the 8 priorities are
+ * reduced to 4 bins, the mapping is:
+ * - weight[1] programs bin 0
+ * - weight[3] programs bin 1
+ * - weight[5] programs bin 2
+ * - weight[7] programs bin 3
+ */
+void dlb_hw_set_qid_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
+
+/**
+ * dlb_hw_enable_pp_sw_alarms() - enable out-of-credit alarm for all producer
+ * ports
+ * @hw: dlb_hw handle for a particular device.
+ */
+void dlb_hw_enable_pp_sw_alarms(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_disable_pp_sw_alarms() - disable out-of-credit alarm for all producer
+ * ports
+ * @hw: dlb_hw handle for a particular device.
+ */
+void dlb_hw_disable_pp_sw_alarms(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_disable_pf_to_vf_isr_pend_err() - disable alarm triggered by PF
+ *	access to VF's ISR pending register
+ * @hw: dlb_hw handle for a particular device.
+ */
+void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw);
+
+/**
+ * dlb_hw_disable_vf_to_pf_isr_pend_err() - disable alarm triggered by VF
+ *	access to PF's ISR pending register
+ * @hw: dlb_hw handle for a particular device.
+ */
+void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw);
+
+#endif /* __DLB_RESOURCE_H */
diff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c
new file mode 100644
index 0000000..714ae5a
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_main.c
@@ -0,0 +1,591 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "base/dlb_resource.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_regs.h"
+#include "../dlb_priv.h"
+#include "../dlb_inline_fns.h"
+#include "../dlb_user.h"
+#include "dlb_main.h"
+
+unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
+
+#define DLB_PCI_CFG_SPACE_SIZE 256
+#define DLB_PCI_CAP_POINTER 0x34
+#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
+#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
+#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
+#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
+#define DLB_PCI_EXT_CAP_ID_ERR 1
+#define DLB_PCI_ERR_UNCOR_MASK 8
+#define DLB_PCI_ERR_UNC_UNSUP  0x00100000
+
+#define DLB_PCI_EXP_DEVCTL 8
+#define DLB_PCI_LNKCTL 16
+#define DLB_PCI_SLTCTL 24
+#define DLB_PCI_RTCTL 28
+#define DLB_PCI_EXP_DEVCTL2 40
+#define DLB_PCI_LNKCTL2 48
+#define DLB_PCI_SLTCTL2 56
+#define DLB_PCI_CMD 4
+#define DLB_PCI_X_CMD 2
+#define DLB_PCI_EXP_DEVSTA 10
+#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
+#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
+#define DLB_PCI_PASID_CTRL 6
+#define DLB_PCI_PASID_CAP 4
+
+#define DLB_PCI_CAP_ID_EXP       0x10
+#define DLB_PCI_CAP_ID_MSIX      0x11
+#define DLB_PCI_EXT_CAP_ID_PAS   0x1B
+#define DLB_PCI_EXT_CAP_ID_PRI   0x13
+#define DLB_PCI_EXT_CAP_ID_ACS   0xD
+
+#define DLB_PCI_PASID_CAP_EXEC          0x2
+#define DLB_PCI_PASID_CAP_PRIV          0x4
+#define DLB_PCI_PASID_CTRL_ENABLE       0x1
+#define DLB_PCI_PRI_CTRL_ENABLE         0x1
+#define DLB_PCI_PRI_ALLOC_REQ           0xC
+#define DLB_PCI_PRI_CTRL                0x4
+#define DLB_PCI_MSIX_FLAGS              0x2
+#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000
+#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000
+#define DLB_PCI_ERR_ROOT_STATUS         0x30
+#define DLB_PCI_ERR_COR_STATUS          0x10
+#define DLB_PCI_ERR_UNCOR_STATUS        0x4
+#define DLB_PCI_COMMAND_INTX_DISABLE    0x400
+#define DLB_PCI_ACS_CAP                 0x4
+#define DLB_PCI_ACS_CTRL                0x6
+#define DLB_PCI_ACS_SV                  0x1
+#define DLB_PCI_ACS_RR                  0x4
+#define DLB_PCI_ACS_CR                  0x8
+#define DLB_PCI_ACS_UF                  0x10
+#define DLB_PCI_ACS_EC                  0x20
+
+static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+	uint32_t hdr;
+	size_t sz;
+	int pos;
+
+	pos = DLB_PCI_CFG_SPACE_SIZE;
+	sz = sizeof(hdr);
+
+	while (pos > 0xFF) {
+		if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
+			return -1;
+
+		if (DLB_PCI_EXT_CAP_ID(hdr) == id)
+			return pos;
+
+		pos = DLB_PCI_EXT_CAP_NEXT(hdr);
+	}
+
+	return -1;
+}
+
+static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+	uint8_t pos;
+	int ret;
+	uint16_t hdr;
+
+	ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
+	pos &= 0xFC;
+
+	if (ret != 1)
+		return -1;
+
+	while (pos > 0x3F) {
+		ret = rte_pci_read_config(pdev, &hdr, 2, pos);
+		if (ret != 2)
+			return -1;
+
+		if (DLB_PCI_CAP_ID(hdr) == id)
+			return pos;
+
+		if (DLB_PCI_CAP_ID(hdr) == 0xFF)
+			return -1;
+
+		pos = DLB_PCI_CAP_NEXT(hdr);
+	}
+
+	return -1;
+}
+
+static int dlb_mask_ur_err(struct rte_pci_device *pdev)
+{
+	uint32_t mask;
+	size_t sz = sizeof(mask);
+	int pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);
+
+	if (pos < 0) {
+		printf("[%s()] failed to find the aer capability\n",
+		       __func__);
+		return pos;
+	}
+
+	pos += DLB_PCI_ERR_UNCOR_MASK;
+
+	if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
+		printf("[%s()] Failed to read uncorrectable error mask reg\n",
+		       __func__);
+		return -1;
+	}
+
+	/* Mask Unsupported Request errors */
+	mask |= DLB_PCI_ERR_UNC_UNSUP;
+
+	if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
+		printf("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
+		       __func__, pos);
+		return -1;
+	}
+
+	return 0;
+}
+
+struct dlb_dev *
+dlb_probe(struct rte_pci_device *pdev)
+{
+	struct dlb_dev *dlb_dev;
+	int ret = 0;
+
+	DLB_INFO(dlb_dev, "probe\n");
+
+	dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
+			     RTE_CACHE_LINE_SIZE);
+
+	if (dlb_dev == NULL) {
+		ret = -ENOMEM;
+		goto dlb_dev_malloc_fail;
+	}
+
+	/* PCI Bus driver has already mapped bar space into process.
+	 * Save off our IO register and FUNC addresses.
+	 */
+
+	/* BAR 0 */
+	if (pdev->mem_resource[0].addr == NULL) {
+		DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
+		ret = -EINVAL;
+		goto pci_mmap_bad_addr;
+	}
+	dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
+	dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
+
+	DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
+		 (void *)dlb_dev->hw.func_kva,
+		 (void *)dlb_dev->hw.func_phys_addr,
+		 pdev->mem_resource[0].len);
+
+	/* BAR 2 */
+	if (pdev->mem_resource[2].addr == NULL) {
+		DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
+		ret = -EINVAL;
+		goto pci_mmap_bad_addr;
+	}
+	dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
+	dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
+
+	DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
+		 (void *)dlb_dev->hw.csr_kva,
+		 (void *)dlb_dev->hw.csr_phys_addr,
+		 pdev->mem_resource[2].len);
+
+	dlb_dev->pdev = pdev;
+
+	ret = dlb_pf_reset(dlb_dev);
+	if (ret)
+		goto dlb_reset_fail;
+
+	/* DLB incorrectly sends URs in response to certain messages. Mask UR
+	 * errors to prevent these from being propagated to the MCA.
+	 */
+	ret = dlb_mask_ur_err(pdev);
+	if (ret)
+		goto mask_ur_err_fail;
+
+	ret = dlb_pf_init_driver_state(dlb_dev);
+	if (ret)
+		goto init_driver_state_fail;
+
+	ret = dlb_resource_init(&dlb_dev->hw);
+	if (ret)
+		goto resource_init_fail;
+
+	dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
+
+	dlb_pf_init_hardware(dlb_dev);
+
+	return dlb_dev;
+
+resource_init_fail:
+	dlb_resource_free(&dlb_dev->hw);
+init_driver_state_fail:
+mask_ur_err_fail:
+dlb_reset_fail:
+pci_mmap_bad_addr:
+	rte_free(dlb_dev);
+dlb_dev_malloc_fail:
+	rte_errno = ret;
+	return NULL;
+}
+
+int
+dlb_pf_reset(struct dlb_dev *dlb_dev)
+{
+	int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
+	uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
+	uint16_t rt_ctl_word, pri_reqs_dword,  pri_ctrl_word;
+	struct rte_pci_device *pdev = dlb_dev->pdev;
+	uint16_t devsta_busy_word, devctl_word;
+	int pcie_cap_offset, pri_cap_offset;
+	uint16_t slt_word, slt_word2, cmd;
+	int ret = 0, i = 0;
+	uint32_t dword[16];
+	off_t off;
+
+	/* Save PCI config state */
+
+	for (i = 0; i < 16; i++) {
+		if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
+			return ret;
+	}
+
+	pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
+
+	if (pcie_cap_offset < 0) {
+		printf("[%s()] failed to find the pcie capability\n",
+		       __func__);
+		return pcie_cap_offset;
+	}
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
+		dev_ctl_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_LNKCTL;
+	if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
+		lnk_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_SLTCTL;
+	if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
+		slt_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_RTCTL;
+	if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
+		rt_ctl_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+	if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
+		dev_ctl2_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+	if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
+		lnk_word2 = 0;
+
+	off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
+		slt_word2 = 0;
+
+	pri_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_PRI);
+	if (pri_cap_offset >= 0) {
+		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+		if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
+			pri_reqs_dword = 0;
+	}
+
+	/* clear the PCI command register before issuing the FLR */
+
+	off = DLB_PCI_CMD;
+	cmd = 0;
+	if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+		printf("[%s()] failed to write pci config space at offset %d\n",
+		       __func__, (int)off);
+		return -1;
+	}
+
+	/* issue the FLR */
+	for (wait_count = 0; wait_count < 4; wait_count++) {
+		int sleep_time;
+
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
+		ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
+		if (ret != 2) {
+			printf("[%s()] failed to read the pci device status\n",
+			       __func__);
+			return ret;
+		}
+
+		if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
+			break;
+
+		sleep_time = (1 << (wait_count)) * 100;
+		rte_delay_ms(sleep_time);
+	}
+
+	if (wait_count == 4) {
+		printf("[%s()] wait for pci pending transactions timed out\n",
+		       __func__);
+		return -1;
+	}
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
+	if (ret != 2) {
+		printf("[%s()] failed to read the pcie device control\n",
+		       __func__);
+		return ret;
+	}
+
+	devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
+
+	if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
+		printf("[%s()] failed to write the pcie device control at offset %d\n",
+		       __func__, (int)off);
+		return -1;
+	}
+
+	rte_delay_ms(100);
+
+	/* Restore PCI config state */
+
+	if (pcie_cap_offset >= 0) {
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+		if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
+			printf("[%s()] failed to write the pcie device control at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_LNKCTL;
+		if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_SLTCTL;
+		if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_RTCTL;
+		if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+		if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+		if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+		if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	if (pri_cap_offset >= 0) {
+		pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
+
+		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+		if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pri_cap_offset + DLB_PCI_PRI_CTRL;
+		if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	err_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_ERR);
+	if (err_cap_offset >= 0) {
+		uint32_t tmp;
+
+		off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	for (i = 16; i > 0; i--) {
+		off = (i - 1) * 4;
+		if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	off = DLB_PCI_CMD;
+	if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+		cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
+		if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space\n",
+			       __func__);
+			return -1;
+		}
+	}
+
+	msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
+	if (msix_cap_offset >= 0) {
+		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+			cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
+			cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
+			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+				printf("[%s()] failed to write msix flags\n",
+				       __func__);
+				return -1;
+			}
+		}
+
+		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+			cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
+			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+				printf("[%s()] failed to write msix flags\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	acs_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_ACS);
+	if (acs_cap_offset >= 0) {
+		uint16_t acs_cap, acs_ctrl, acs_mask;
+		off = acs_cap_offset + DLB_PCI_ACS_CAP;
+		if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
+			acs_cap = 0;
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+			acs_ctrl = 0;
+
+		acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
+		acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
+		acs_ctrl |= (acs_cap & acs_mask);
+
+		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+			acs_ctrl = 0;
+
+		acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
+		acs_ctrl &= ~acs_mask;
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*******************************/
+/****** Driver management ******/
+/*******************************/
+
+int
+dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
+{
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B))
+		dlb_dev->enqueue_four = dlb_movdir64b;
+	else
+		dlb_dev->enqueue_four = dlb_movntdq;
+
+	/* Initialize software state */
+	rte_spinlock_init(&dlb_dev->resource_mutex);
+	rte_spinlock_init(&dlb_dev->measurement_lock);
+
+	return 0;
+}
+
+void
+dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
+{
+	dlb_disable_dp_vasr_feature(&dlb_dev->hw);
+
+	dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
+
+	if (dlb_dev->revision >= DLB_REV_B0) {
+		dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
+		dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
+	}
+
+	if (dlb_dev->revision >= DLB_REV_B0) {
+		dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
+		dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
+	}
+}
diff --git a/drivers/event/dlb/pf/dlb_main.h b/drivers/event/dlb/pf/dlb_main.h
new file mode 100644
index 0000000..abe9c02
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_main.h
@@ -0,0 +1,52 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_MAIN_H
+#define __DLB_MAIN_H
+
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
+
+#include "base/dlb_hw_types.h"
+#include "../dlb_user.h"
+
+#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
+
+struct dlb_dev {
+	struct rte_pci_device *pdev;
+	struct dlb_hw hw;
+	/* struct list_head list; */
+	struct device *dlb_device;
+	/* The enqueue_four function enqueues four HCWs (one cache-line worth)
+	 * to the DLB, using whichever mechanism is supported by the platform
+	 * on which this driver is running.
+	 */
+	void (*enqueue_four)(void *qe4, void *pp_addr);
+	bool domain_reset_failed;
+	/* The resource mutex serializes access to driver data structures and
+	 * hardware registers.
+	 */
+	rte_spinlock_t resource_mutex;
+	rte_spinlock_t measurement_lock;
+	bool worker_launched;
+	u8 revision;
+};
+
+struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+void dlb_reset_done(struct dlb_dev *dlb_dev);
+
+/* pf_ops */
+int dlb_pf_init_driver_state(struct dlb_dev *dev);
+void dlb_pf_free_driver_state(struct dlb_dev *dev);
+void dlb_pf_init_hardware(struct dlb_dev *dev);
+int dlb_pf_reset(struct dlb_dev *dlb_dev);
+
+#endif /* __DLB_MAIN_H */
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
new file mode 100644
index 0000000..7fc85e9
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -0,0 +1,232 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "../dlb_priv.h"
+#include "../dlb_iface.h"
+#include "../dlb_inline_fns.h"
+#include "dlb_main.h"
+#include "base/dlb_hw_types.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_resource.h"
+
+static void
+dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
+{
+	int i;
+
+	/* Addresses will be initialized at port create */
+	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+		/* First directed ports */
+
+		/* producer port */
+		dlb_port[i][DLB_DIR].pp_addr = NULL;
+
+		/* popcount */
+		dlb_port[i][DLB_DIR].ldb_popcount = NULL;
+		dlb_port[i][DLB_DIR].dir_popcount = NULL;
+
+		/* consumer queue */
+		dlb_port[i][DLB_DIR].cq_base = NULL;
+		dlb_port[i][DLB_DIR].mmaped = true;
+
+		/* Now load balanced ports */
+
+		/* producer port */
+		dlb_port[i][DLB_LDB].pp_addr = NULL;
+
+		/* popcount */
+		dlb_port[i][DLB_LDB].ldb_popcount = NULL;
+		dlb_port[i][DLB_LDB].dir_popcount = NULL;
+
+		/* consumer queue */
+		dlb_port[i][DLB_LDB].cq_base = NULL;
+		dlb_port[i][DLB_LDB].mmaped = true;
+	}
+}
+
+static int
+dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
+{
+	RTE_SET_USED(handle);
+	RTE_SET_USED(name);
+
+	return 0;
+}
+
+static int
+dlb_pf_get_device_version(struct dlb_hw_dev *handle,
+			  uint8_t *revision)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	*revision = dlb_dev->revision;
+
+	return 0;
+}
+
+static int
+dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
+			 struct dlb_get_num_resources_args *rsrcs)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
+
+	return 0;
+}
+
+static int
+dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
+			enum dlb_cq_poll_modes *mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	if (dlb_dev->revision >= DLB_REV_B0)
+		*mode = DLB_CQ_POLL_MODE_SPARSE;
+	else
+		*mode = DLB_CQ_POLL_MODE_STD;
+
+	return 0;
+}
+
+static void
+dlb_pf_iface_fn_ptrs_init(void)
+{
+	dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
+	dlb_iface_open = dlb_pf_open;
+	dlb_iface_get_device_version = dlb_pf_get_device_version;
+	dlb_iface_get_num_resources = dlb_pf_get_num_resources;
+	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
+}
+
+/* PCI DEV HOOKS */
+static int
+dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
+{
+	int ret = 0;
+	struct rte_pci_device *pci_dev;
+	struct dlb_devargs dlb_args = {
+		.socket_id = rte_socket_id(),
+		.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
+		.num_dir_credits_override = -1,
+		.defer_sched = 0,
+		.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
+	};
+	struct dlb_eventdev *dlb;
+
+	DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
+		    eventdev->data->dev_id, eventdev->data->socket_id);
+
+	dlb_entry_points_init(eventdev);
+
+	dlb_pf_iface_fn_ptrs_init();
+
+	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
+
+		/* Probe the DLB PF layer */
+		dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
+
+		if (dlb->qm_instance.pf_dev == NULL) {
+			DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
+				    rte_errno);
+			ret = -rte_errno;
+			goto dlb_probe_failed;
+		}
+
+		/* Were we invoked with runtime parameters? */
+		if (pci_dev->device.devargs) {
+			ret = dlb_parse_params(pci_dev->device.devargs->args,
+					       pci_dev->device.devargs->name,
+					       &dlb_args);
+			if (ret) {
+				DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
+					    ret, rte_errno);
+				goto dlb_probe_failed;
+			}
+		}
+
+		ret = dlb_primary_eventdev_probe(eventdev,
+						 EVDEV_DLB_NAME_PMD_STR,
+						 &dlb_args);
+	} else {
+		ret = dlb_secondary_eventdev_probe(eventdev,
+						   EVDEV_DLB_NAME_PMD_STR);
+	}
+	if (ret)
+		goto dlb_probe_failed;
+
+	DLB_LOG_INFO("DLB PF Probe success\n");
+
+	return 0;
+
+dlb_probe_failed:
+
+	DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
+
+	return ret;
+}
+
+#define EVENTDEV_INTEL_VENDOR_ID 0x8086
+
+static const struct rte_pci_id pci_id_dlb_map[] = {
+	{
+		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
+			       DLB_PF_DEV_ID)
+	},
+	{
+		.vendor_id = 0,
+	},
+};
+
+static int
+event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
+		sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
+		EVDEV_DLB_NAME_PMD_STR);
+}
+
+static int
+event_dlb_pci_remove(struct rte_pci_device *pci_dev)
+{
+	return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_dlb_pmd = {
+	.id_table = pci_id_dlb_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = event_dlb_pci_probe,
+	.remove = event_dlb_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);