get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83052/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83052,
    "url": "https://patches.dpdk.org/api/patches/83052/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1604101295-15970-11-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1604101295-15970-11-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1604101295-15970-11-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-10-30T23:41:22",
    "name": "[v11,10/23] event/dlb: add infos get and configure",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2b89e64572374e1cdfc0331696469b68273aa697",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1604101295-15970-11-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 13512,
            "url": "https://patches.dpdk.org/api/series/13512/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13512",
            "date": "2020-10-30T23:41:12",
            "name": "Add DLB PMD",
            "version": 11,
            "mbox": "https://patches.dpdk.org/series/13512/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/83052/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/83052/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 649A5A04E6;\n\tSat, 31 Oct 2020 00:43:18 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D0E66C822;\n\tSat, 31 Oct 2020 00:40:16 +0100 (CET)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n by dpdk.org (Postfix) with ESMTP id 1BC44BE57\n for <dev@dpdk.org>; Sat, 31 Oct 2020 00:39:57 +0100 (CET)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Oct 2020 16:39:57 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga004.fm.intel.com with ESMTP; 30 Oct 2020 16:39:56 -0700"
        ],
        "IronPort-SDR": [
            "\n NUKPuPQcpzu9wAfoVpZfu6tsEqgtjaEXe4jE1h5Zc+dr4m8tLzqH0t/w4j8B40mn2OXT56bi72\n XscdPYfI6BCg==",
            "\n pjWOm0NFyMLVTUmr9DtjnT/ORhkzqRDiFeD3umwAvNfgi6Awl11sUCkASEj73e7q8gXhqbRcTL\n 67Wjdu4C3Qbg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9790\"; a=\"166094675\"",
            "E=Sophos;i=\"5.77,435,1596524400\"; d=\"scan'208\";a=\"166094675\"",
            "E=Sophos;i=\"5.77,435,1596524400\"; d=\"scan'208\";a=\"352025688\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net",
        "Date": "Fri, 30 Oct 2020 18:41:22 -0500",
        "Message-Id": "<1604101295-15970-11-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1604101295-15970-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<20200612212434.6852-2-timothy.mcdaniel@intel.com>\n <1604101295-15970-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v11 10/23] event/dlb: add infos get and configure",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for configuring the DLB hardware.\nIn particular, this patch configures the DLB\nhardware's scheduling domain, such that it is provisioned with\nthe requested number of ports and queues, provided sufficient\nresources are available. Individual queues and ports are\nconfigured later in port setup and eventdev start.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\nReviewed-by: Gage Eads <gage.eads@intel.com>\n---\n doc/guides/eventdevs/dlb.rst             |   48 +\n drivers/event/dlb/dlb.c                  |  397 +++\n drivers/event/dlb/dlb_iface.c            |   11 +\n drivers/event/dlb/dlb_iface.h            |   11 +\n drivers/event/dlb/pf/base/dlb_resource.c | 4100 +++++++++++++++++++++++++++++-\n drivers/event/dlb/pf/dlb_pf.c            |   88 +\n 6 files changed, 4562 insertions(+), 93 deletions(-)",
    "diff": "diff --git a/doc/guides/eventdevs/dlb.rst b/doc/guides/eventdevs/dlb.rst\nindex 92341c0..2d7999b 100644\n--- a/doc/guides/eventdevs/dlb.rst\n+++ b/doc/guides/eventdevs/dlb.rst\n@@ -34,3 +34,51 @@ detailed understanding of the hardware, but these details are important when\n writing high-performance code. This section describes the places where the\n eventdev API and DLB misalign.\n \n+Scheduling Domain Configuration\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+There are 32 scheduling domainis the DLB.\n+When one is configured, it allocates load-balanced and\n+directed queues, ports, credits, and other hardware resources. Some\n+resource allocations are user-controlled -- the number of queues, for example\n+-- and others, like credit pools (one directed and one load-balanced pool per\n+scheduling domain), are not.\n+\n+The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device\n+setup argument and the per-port ``new_event_threshold`` argument apply as\n+defined in the eventdev header file. The limit is applied to all enqueues,\n+regardless of whether it will consume a directed or load-balanced credit.\n+\n+Reconfiguration\n+~~~~~~~~~~~~~~~\n+\n+The Eventdev API allows one to reconfigure a device, its ports, and its queues\n+by first stopping the device, calling the configuration function(s), then\n+restarting the device. The DLB does not support configuring an individual queue\n+or port without first reconfiguring the entire device, however, so there are\n+certain reconfiguration sequences that are valid in the eventdev API but not\n+supported by the PMD.\n+\n+Specifically, the PMD supports the following configuration sequence:\n+1. Configure and start the device\n+2. Stop the device\n+3. (Optional) Reconfigure the device\n+4. (Optional) If step 3 is run:\n+\n+   a. Setup queue(s). The reconfigured queue(s) lose their previous port links.\n+   b. The reconfigured port(s) lose their previous queue links.\n+\n+5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)\n+6. Restart the device. If the device is reconfigured in step 3 but one or more\n+   of its ports or queues are not, the PMD will apply their previous\n+   configuration (including port->queue links) at this time.\n+\n+The PMD does not support the following configuration sequences:\n+1. Configure and start the device\n+2. Stop the device\n+3. Setup queue or setup port\n+4. Start the device\n+\n+This sequence is not supported because the event device must be reconfigured\n+before its ports or queues can be.\n+\ndiff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c\nindex 62b9695..c038794 100644\n--- a/drivers/event/dlb/dlb.c\n+++ b/drivers/event/dlb/dlb.c\n@@ -139,6 +139,19 @@ dlb_hw_query_resources(struct dlb_eventdev *dlb)\n \treturn 0;\n }\n \n+static void\n+dlb_free_qe_mem(struct dlb_port *qm_port)\n+{\n+\tif (qm_port == NULL)\n+\t\treturn;\n+\n+\trte_free(qm_port->qe4);\n+\tqm_port->qe4 = NULL;\n+\n+\trte_free(qm_port->consume_qe);\n+\tqm_port->consume_qe = NULL;\n+}\n+\n /* Wrapper for string to int conversion. Substituted for atoi(...), which is\n  * unsafe.\n  */\n@@ -231,6 +244,388 @@ set_num_dir_credits(const char *key __rte_unused,\n \t\t\t    DLB_MAX_NUM_DIR_CREDITS);\n \t\treturn -EINVAL;\n \t}\n+\treturn 0;\n+}\n+\n+/* VDEV-only notes:\n+ * This function first unmaps all memory mappings and closes the\n+ * domain's file descriptor, which causes the driver to reset the\n+ * scheduling domain. Once that completes (when close() returns), we\n+ * can safely free the dynamically allocated memory used by the\n+ * scheduling domain.\n+ *\n+ * PF-only notes:\n+ * We will maintain a use count and use that to determine when\n+ * a reset is required.  In PF mode, we never mmap, or munmap\n+ * device memory,  and we own the entire physical PCI device.\n+ */\n+\n+static void\n+dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\tenum dlb_configuration_state config_state;\n+\tint i, j;\n+\n+\t/* Close and reset the domain */\n+\tdlb_iface_domain_close(dlb);\n+\n+\t/* Free all dynamically allocated port memory */\n+\tfor (i = 0; i < dlb->num_ports; i++)\n+\t\tdlb_free_qe_mem(&dlb->ev_ports[i].qm_port);\n+\n+\t/* If reconfiguring, mark the device's queues and ports as \"previously\n+\t * configured.\" If the user does not reconfigure them, the PMD will\n+\t * reapply their previous configuration when the device is started.\n+\t */\n+\tconfig_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;\n+\n+\tfor (i = 0; i < dlb->num_ports; i++) {\n+\t\tdlb->ev_ports[i].qm_port.config_state = config_state;\n+\t\t/* Reset setup_done so ports can be reconfigured */\n+\t\tdlb->ev_ports[i].setup_done = false;\n+\t\tfor (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)\n+\t\t\tdlb->ev_ports[i].link[j].mapped = false;\n+\t}\n+\n+\tfor (i = 0; i < dlb->num_queues; i++)\n+\t\tdlb->ev_queues[i].qm_queue.config_state = config_state;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_QUEUES; i++)\n+\t\tdlb->ev_queues[i].setup_done = false;\n+\n+\tdlb->num_ports = 0;\n+\tdlb->num_ldb_ports = 0;\n+\tdlb->num_dir_ports = 0;\n+\tdlb->num_queues = 0;\n+\tdlb->num_ldb_queues = 0;\n+\tdlb->num_dir_queues = 0;\n+\tdlb->configured = false;\n+}\n+\n+static int\n+dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)\n+{\n+\tstruct dlb_create_ldb_pool_args cfg;\n+\tstruct dlb_cmd_response response;\n+\tint ret;\n+\n+\tif (handle == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!handle->cfg.resources.num_ldb_credits) {\n+\t\thandle->cfg.ldb_credit_pool_id = 0;\n+\t\thandle->cfg.num_ldb_credits = 0;\n+\t\treturn 0;\n+\t}\n+\n+\tcfg.response = (uintptr_t)&response;\n+\tcfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;\n+\n+\tret = dlb_iface_ldb_credit_pool_create(handle,\n+\t\t\t\t\t       &cfg);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: ldb_credit_pool_create ret=%d (driver status: %s)\\n\",\n+\t\t\t    ret, dlb_error_strings[response.status]);\n+\t}\n+\n+\thandle->cfg.ldb_credit_pool_id = response.id;\n+\thandle->cfg.num_ldb_credits = cfg.num_ldb_credits;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)\n+{\n+\tstruct dlb_create_dir_pool_args cfg;\n+\tstruct dlb_cmd_response response;\n+\tint ret;\n+\n+\tif (handle == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (!handle->cfg.resources.num_dir_credits) {\n+\t\thandle->cfg.dir_credit_pool_id = 0;\n+\t\thandle->cfg.num_dir_credits = 0;\n+\t\treturn 0;\n+\t}\n+\n+\tcfg.response = (uintptr_t)&response;\n+\tcfg.num_dir_credits = handle->cfg.resources.num_dir_credits;\n+\n+\tret = dlb_iface_dir_credit_pool_create(handle, &cfg);\n+\tif (ret < 0)\n+\t\tDLB_LOG_ERR(\"dlb: dir_credit_pool_create ret=%d (driver status: %s)\\n\",\n+\t\t\t    ret, dlb_error_strings[response.status]);\n+\n+\thandle->cfg.dir_credit_pool_id = response.id;\n+\thandle->cfg.num_dir_credits = cfg.num_dir_credits;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_eventdev *dlb,\n+\t\t\t   const struct dlb_hw_rsrcs *resources_asked)\n+{\n+\tint ret = 0;\n+\tstruct dlb_create_sched_domain_args *config_params;\n+\tstruct dlb_cmd_response response;\n+\n+\tif (resources_asked == NULL) {\n+\t\tDLB_LOG_ERR(\"dlb: dlb_create NULL parameter\\n\");\n+\t\tret = EINVAL;\n+\t\tgoto error_exit;\n+\t}\n+\n+\t/* Map generic qm resources to dlb resources */\n+\tconfig_params = &handle->cfg.resources;\n+\n+\tconfig_params->response = (uintptr_t)&response;\n+\n+\t/* DIR ports and queues */\n+\n+\tconfig_params->num_dir_ports =\n+\t\tresources_asked->num_dir_ports;\n+\n+\tconfig_params->num_dir_credits =\n+\t\tresources_asked->num_dir_credits;\n+\n+\t/* LDB ports and queues */\n+\n+\tconfig_params->num_ldb_queues =\n+\t\tresources_asked->num_ldb_queues;\n+\n+\tconfig_params->num_ldb_ports =\n+\t\tresources_asked->num_ldb_ports;\n+\n+\tconfig_params->num_ldb_credits =\n+\t\tresources_asked->num_ldb_credits;\n+\n+\tconfig_params->num_atomic_inflights =\n+\t\tdlb->num_atm_inflights_per_queue *\n+\t\tconfig_params->num_ldb_queues;\n+\n+\tconfig_params->num_hist_list_entries = config_params->num_ldb_ports *\n+\t\tDLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;\n+\n+\t/* dlb limited to 1 credit pool per queue type */\n+\tconfig_params->num_ldb_credit_pools = 1;\n+\tconfig_params->num_dir_credit_pools = 1;\n+\n+\tDLB_LOG_DBG(\"sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\\n\",\n+\t\t    config_params->num_ldb_queues,\n+\t\t    config_params->num_ldb_ports,\n+\t\t    config_params->num_dir_ports,\n+\t\t    config_params->num_atomic_inflights,\n+\t\t    config_params->num_hist_list_entries,\n+\t\t    config_params->num_ldb_credits,\n+\t\t    config_params->num_dir_credits,\n+\t\t    config_params->num_ldb_credit_pools,\n+\t\t    config_params->num_dir_credit_pools);\n+\n+\t/* Configure the QM */\n+\n+\tret = dlb_iface_sched_domain_create(handle, config_params);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\\n\",\n+\t\t\t    handle->device_id,\n+\t\t\t    ret,\n+\t\t\t    dlb_error_strings[response.status]);\n+\t\tgoto error_exit;\n+\t}\n+\n+\thandle->domain_id = response.id;\n+\thandle->domain_id_valid = 1;\n+\n+\tconfig_params->response = 0;\n+\n+\tret = dlb_ldb_credit_pool_create(handle);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: create ldb credit pool failed\\n\");\n+\t\tgoto error_exit2;\n+\t}\n+\n+\tret = dlb_dir_credit_pool_create(handle);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: create dir credit pool failed\\n\");\n+\t\tgoto error_exit2;\n+\t}\n+\n+\thandle->cfg.configured = true;\n+\n+\treturn 0;\n+\n+error_exit2:\n+\tdlb_iface_domain_close(dlb);\n+\n+error_exit:\n+\treturn ret;\n+}\n+\n+/* End HW specific */\n+static void\n+dlb_eventdev_info_get(struct rte_eventdev *dev,\n+\t\t      struct rte_event_dev_info *dev_info)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\tint ret;\n+\n+\tret = dlb_hw_query_resources(dlb);\n+\tif (ret) {\n+\t\tconst struct rte_eventdev_data *data = dev->data;\n+\n+\t\tDLB_LOG_ERR(\"get resources err=%d, devid=%d\\n\",\n+\t\t\t    ret, data->dev_id);\n+\t\t/* fn is void, so fall through and return values set up in\n+\t\t * probe\n+\t\t */\n+\t}\n+\n+\t/* Add num resources currently owned by this domain.\n+\t * These would become available if the scheduling domain were reset due\n+\t * to the application recalling eventdev_configure to *reconfigure* the\n+\t * domain.\n+\t */\n+\tevdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;\n+\tevdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;\n+\tevdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;\n+\n+\t/* In DLB A-stepping hardware, applications are limited to 128\n+\t * configured ports (load-balanced or directed). The reported number of\n+\t * available ports must reflect this.\n+\t */\n+\tif (dlb->revision < DLB_REV_B0) {\n+\t\tint used_ports;\n+\n+\t\tused_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -\n+\t\t\tdlb->hw_rsrc_query_results.num_ldb_ports -\n+\t\t\tdlb->hw_rsrc_query_results.num_dir_ports;\n+\n+\t\tevdev_dlb_default_info.max_event_ports =\n+\t\t\tRTE_MIN(evdev_dlb_default_info.max_event_ports,\n+\t\t\t\t128 - used_ports);\n+\t}\n+\n+\tevdev_dlb_default_info.max_event_queues =\n+\t\tRTE_MIN(evdev_dlb_default_info.max_event_queues,\n+\t\t\tRTE_EVENT_MAX_QUEUES_PER_DEV);\n+\n+\tevdev_dlb_default_info.max_num_events =\n+\t\tRTE_MIN(evdev_dlb_default_info.max_num_events,\n+\t\t\tdlb->max_num_events_override);\n+\n+\t*dev_info = evdev_dlb_default_info;\n+}\n+\n+/* Note: 1 QM instance per QM device, QM instance/device == event device */\n+static int\n+dlb_eventdev_configure(const struct rte_eventdev *dev)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\tstruct dlb_hw_dev *handle = &dlb->qm_instance;\n+\tstruct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;\n+\tconst struct rte_eventdev_data *data = dev->data;\n+\tconst struct rte_event_dev_config *config = &data->dev_conf;\n+\tint ret;\n+\n+\t/* If this eventdev is already configured, we must release the current\n+\t * scheduling domain before attempting to configure a new one.\n+\t */\n+\tif (dlb->configured) {\n+\t\tdlb_hw_reset_sched_domain(dev, true);\n+\n+\t\tret = dlb_hw_query_resources(dlb);\n+\t\tif (ret) {\n+\t\t\tDLB_LOG_ERR(\"get resources err=%d, devid=%d\\n\",\n+\t\t\t\t    ret, data->dev_id);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tif (config->nb_event_queues > rsrcs->num_queues) {\n+\t\tDLB_LOG_ERR(\"nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\\n\",\n+\t\t\t    config->nb_event_queues,\n+\t\t\t    rsrcs->num_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (config->nb_event_ports > (rsrcs->num_ldb_ports\n+\t\t\t+ rsrcs->num_dir_ports)) {\n+\t\tDLB_LOG_ERR(\"nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\\n\",\n+\t\t\t    config->nb_event_ports,\n+\t\t\t    (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));\n+\t\treturn -EINVAL;\n+\t}\n+\tif (config->nb_events_limit > rsrcs->nb_events_limit) {\n+\t\tDLB_LOG_ERR(\"nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\\n\",\n+\t\t\t    config->nb_events_limit,\n+\t\t\t    rsrcs->nb_events_limit);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)\n+\t\tdlb->global_dequeue_wait = false;\n+\telse {\n+\t\tuint32_t timeout32;\n+\n+\t\tdlb->global_dequeue_wait = true;\n+\n+\t\ttimeout32 = config->dequeue_timeout_ns;\n+\n+\t\tdlb->global_dequeue_wait_ticks =\n+\t\t\ttimeout32 * (rte_get_timer_hz() / 1E9);\n+\t}\n+\n+\t/* Does this platform support umonitor/umwait? */\n+\tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {\n+\t\tif (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&\n+\t\t    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {\n+\t\t\tDLB_LOG_ERR(\"invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\\n\",\n+\t\t\t\t    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tdlb->umwait_allowed = true;\n+\t}\n+\n+\trsrcs->num_dir_ports = config->nb_single_link_event_port_queues;\n+\trsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;\n+\t/* 1 dir queue per dir port */\n+\trsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;\n+\n+\t/* Scale down nb_events_limit by 4 for directed credits, since there\n+\t * are 4x as many load-balanced credits.\n+\t */\n+\trsrcs->num_ldb_credits = 0;\n+\trsrcs->num_dir_credits = 0;\n+\n+\tif (rsrcs->num_ldb_queues)\n+\t\trsrcs->num_ldb_credits = config->nb_events_limit;\n+\tif (rsrcs->num_dir_ports)\n+\t\trsrcs->num_dir_credits = config->nb_events_limit / 4;\n+\tif (dlb->num_dir_credits_override != -1)\n+\t\trsrcs->num_dir_credits = dlb->num_dir_credits_override;\n+\n+\tif (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {\n+\t\tDLB_LOG_ERR(\"dlb_hw_create_sched_domain failed\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tdlb->new_event_limit = config->nb_events_limit;\n+\t__atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);\n+\n+\t/* Save number of ports/queues for this event dev */\n+\tdlb->num_ports = config->nb_event_ports;\n+\tdlb->num_queues = config->nb_event_queues;\n+\tdlb->num_dir_ports = rsrcs->num_dir_ports;\n+\tdlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;\n+\tdlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;\n+\tdlb->num_dir_queues = dlb->num_dir_ports;\n+\tdlb->num_ldb_credits = rsrcs->num_ldb_credits;\n+\tdlb->num_dir_credits = rsrcs->num_dir_credits;\n+\n+\tdlb->configured = true;\n \n \treturn 0;\n }\n@@ -309,6 +704,8 @@ void\n dlb_entry_points_init(struct rte_eventdev *dev)\n {\n \tstatic struct rte_eventdev_ops dlb_eventdev_entry_ops = {\n+\t\t.dev_infos_get    = dlb_eventdev_info_get,\n+\t\t.dev_configure    = dlb_eventdev_configure,\n \t\t.dump             = dlb_eventdev_dump,\n \t\t.xstats_get       = dlb_eventdev_xstats_get,\n \t\t.xstats_get_names = dlb_eventdev_xstats_get_names,\ndiff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c\nindex dd72120..f3e82f2 100644\n--- a/drivers/event/dlb/dlb_iface.c\n+++ b/drivers/event/dlb/dlb_iface.c\n@@ -16,12 +16,23 @@ void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);\n \n int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);\n \n+void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);\n+\n int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,\n \t\t\t\t    uint8_t *revision);\n \n int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,\n \t\t\t\t   struct dlb_get_num_resources_args *rsrcs);\n \n+int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t     struct dlb_create_sched_domain_args *args);\n+\n+int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t\tstruct dlb_create_ldb_pool_args *cfg);\n+\n+int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t\tstruct dlb_create_dir_pool_args *cfg);\n+\n int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,\n \t\t\t\t  enum dlb_cq_poll_modes *mode);\n \ndiff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h\nindex 416d1b3..d576232 100644\n--- a/drivers/event/dlb/dlb_iface.h\n+++ b/drivers/event/dlb/dlb_iface.h\n@@ -15,12 +15,23 @@ extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);\n \n extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);\n \n+extern void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);\n+\n extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,\n \t\t\t\t\t   uint8_t *revision);\n \n extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,\n \t\t\t\t   struct dlb_get_num_resources_args *rsrcs);\n \n+extern int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t     struct dlb_create_sched_domain_args *args);\n+\n+extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t\tstruct dlb_create_ldb_pool_args *cfg);\n+\n+extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,\n+\t\t\t\t\tstruct dlb_create_dir_pool_args *cfg);\n+\n extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,\n \t\t\t\t\t enum dlb_cq_poll_modes *mode);\n \ndiff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c\nindex 9c4267b..2f8ffec 100644\n--- a/drivers/event/dlb/pf/base/dlb_resource.c\n+++ b/drivers/event/dlb/pf/base/dlb_resource.c\n@@ -9,107 +9,30 @@\n #include \"dlb_osdep_bitmap.h\"\n #include \"dlb_osdep_types.h\"\n #include \"dlb_regs.h\"\n+#include \"../../dlb_priv.h\"\n+#include \"../../dlb_inline_fns.h\"\n \n-void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)\n-{\n-\tunion dlb_dp_dir_csr_ctrl r0;\n-\n-\tr0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);\n-\n-\tr0.field.cfg_vasr_dis = 1;\n-\n-\tDLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);\n-}\n-\n-void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)\n-{\n-\tunion dlb_chp_cfg_chp_csr_ctrl r0;\n-\n-\tr0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);\n-\n-\tr0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;\n-\n-\tDLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);\n-}\n-\n-void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)\n-{\n-\tunion dlb_sys_cq_mode r0;\n-\n-\tr0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);\n-\n-\tr0.field.ldb_cq64 = 1;\n-\n-\tDLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);\n-}\n+#define DLB_DOM_LIST_HEAD(head, type) \\\n+\tDLB_LIST_HEAD((head), type, domain_list)\n \n-void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)\n-{\n-\tunion dlb_sys_cq_mode r0;\n-\n-\tr0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);\n-\n-\tr0.field.dir_cq64 = 1;\n-\n-\tDLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);\n-}\n+#define DLB_FUNC_LIST_HEAD(head, type) \\\n+\tDLB_LIST_HEAD((head), type, func_list)\n \n-void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)\n-{\n-\tunion dlb_sys_sys_alarm_int_enable r0;\n+#define DLB_DOM_LIST_FOR(head, ptr, iter) \\\n+\tDLB_LIST_FOR_EACH(head, ptr, domain_list, iter)\n \n-\tr0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);\n+#define DLB_FUNC_LIST_FOR(head, ptr, iter) \\\n+\tDLB_LIST_FOR_EACH(head, ptr, func_list, iter)\n \n-\tr0.field.pf_to_vf_isr_pend_error = 0;\n+#define DLB_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \\\n+\tDLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)\n \n-\tDLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);\n-}\n+#define DLB_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \\\n+\tDLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)\n \n-void dlb_hw_get_num_resources(struct dlb_hw *hw,\n-\t\t\t      struct dlb_get_num_resources_args *arg)\n+static inline void dlb_flush_csr(struct dlb_hw *hw)\n {\n-\tstruct dlb_function_resources *rsrcs;\n-\tstruct dlb_bitmap *map;\n-\n-\trsrcs = &hw->pf;\n-\n-\targ->num_sched_domains = rsrcs->num_avail_domains;\n-\n-\targ->num_ldb_queues = rsrcs->num_avail_ldb_queues;\n-\n-\targ->num_ldb_ports = rsrcs->num_avail_ldb_ports;\n-\n-\targ->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;\n-\n-\tmap = rsrcs->avail_aqed_freelist_entries;\n-\n-\targ->num_atomic_inflights = dlb_bitmap_count(map);\n-\n-\targ->max_contiguous_atomic_inflights =\n-\t\tdlb_bitmap_longest_set_range(map);\n-\n-\tmap = rsrcs->avail_hist_list_entries;\n-\n-\targ->num_hist_list_entries = dlb_bitmap_count(map);\n-\n-\targ->max_contiguous_hist_list_entries =\n-\t\tdlb_bitmap_longest_set_range(map);\n-\n-\tmap = rsrcs->avail_qed_freelist_entries;\n-\n-\targ->num_ldb_credits = dlb_bitmap_count(map);\n-\n-\targ->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);\n-\n-\tmap = rsrcs->avail_dqed_freelist_entries;\n-\n-\targ->num_dir_credits = dlb_bitmap_count(map);\n-\n-\targ->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);\n-\n-\targ->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;\n-\n-\targ->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;\n+\tDLB_CSR_RD(hw, DLB_SYS_TOTAL_VAS);\n }\n \n static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)\n@@ -290,6 +213,3997 @@ void dlb_resource_free(struct dlb_hw *hw)\n \tdlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);\n }\n \n+static struct dlb_domain *dlb_get_domain_from_id(struct dlb_hw *hw, u32 id)\n+{\n+\tif (id >= DLB_MAX_NUM_DOMAINS)\n+\t\treturn NULL;\n+\n+\treturn &hw->domains[id];\n+}\n+\n+static int dlb_attach_ldb_queues(struct dlb_hw *hw,\n+\t\t\t\t struct dlb_function_resources *rsrcs,\n+\t\t\t\t struct dlb_domain *domain,\n+\t\t\t\t u32 num_queues,\n+\t\t\t\t struct dlb_cmd_response *resp)\n+{\n+\tunsigned int i, j;\n+\n+\tif (rsrcs->num_avail_ldb_queues < num_queues) {\n+\t\tresp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\tstruct dlb_ldb_queue *queue;\n+\n+\t\tqueue = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,\n+\t\t\t\t\t   typeof(*queue));\n+\t\tif (queue == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: domain validation failed\\n\",\n+\t\t\t\t   __func__);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tdlb_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);\n+\n+\t\tqueue->domain_id = domain->id;\n+\t\tqueue->owned = true;\n+\n+\t\tdlb_list_add(&domain->avail_ldb_queues, &queue->domain_list);\n+\t}\n+\n+\trsrcs->num_avail_ldb_queues -= num_queues;\n+\n+\treturn 0;\n+\n+cleanup:\n+\n+\t/* Return the assigned queues */\n+\tfor (j = 0; j < i; j++) {\n+\t\tstruct dlb_ldb_queue *queue;\n+\n+\t\tqueue = DLB_FUNC_LIST_HEAD(domain->avail_ldb_queues,\n+\t\t\t\t\t   typeof(*queue));\n+\t\t/* Unrecoverable internal error */\n+\t\tif (queue == NULL)\n+\t\t\tbreak;\n+\n+\t\tqueue->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_ldb_queues, &queue->func_list);\n+\t}\n+\n+\treturn -EFAULT;\n+}\n+\n+static struct dlb_ldb_port *\n+dlb_get_next_ldb_port(struct dlb_hw *hw,\n+\t\t      struct dlb_function_resources *rsrcs,\n+\t\t      u32 domain_id)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\t/* To reduce the odds of consecutive load-balanced ports mapping to the\n+\t * same queue(s), the driver attempts to allocate ports whose neighbors\n+\t * are owned by a different domain.\n+\t */\n+\tDLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {\n+\t\tu32 next, prev;\n+\t\tu32 phys_id;\n+\n+\t\tphys_id = port->id;\n+\t\tnext = phys_id + 1;\n+\t\tprev = phys_id - 1;\n+\n+\t\tif (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)\n+\t\t\tnext = 0;\n+\t\tif (phys_id == 0)\n+\t\t\tprev = DLB_MAX_NUM_LDB_PORTS - 1;\n+\n+\t\tif (!hw->rsrcs.ldb_ports[next].owned ||\n+\t\t    hw->rsrcs.ldb_ports[next].domain_id == domain_id)\n+\t\t\tcontinue;\n+\n+\t\tif (!hw->rsrcs.ldb_ports[prev].owned ||\n+\t\t    hw->rsrcs.ldb_ports[prev].domain_id == domain_id)\n+\t\t\tcontinue;\n+\n+\t\treturn port;\n+\t}\n+\n+\t/* Failing that, the driver looks for a port with one neighbor owned by\n+\t * a different domain and the other unallocated.\n+\t */\n+\tDLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {\n+\t\tu32 next, prev;\n+\t\tu32 phys_id;\n+\n+\t\tphys_id = port->id;\n+\t\tnext = phys_id + 1;\n+\t\tprev = phys_id - 1;\n+\n+\t\tif (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)\n+\t\t\tnext = 0;\n+\t\tif (phys_id == 0)\n+\t\t\tprev = DLB_MAX_NUM_LDB_PORTS - 1;\n+\n+\t\tif (!hw->rsrcs.ldb_ports[prev].owned &&\n+\t\t    hw->rsrcs.ldb_ports[next].owned &&\n+\t\t    hw->rsrcs.ldb_ports[next].domain_id != domain_id)\n+\t\t\treturn port;\n+\n+\t\tif (!hw->rsrcs.ldb_ports[next].owned &&\n+\t\t    hw->rsrcs.ldb_ports[prev].owned &&\n+\t\t    hw->rsrcs.ldb_ports[prev].domain_id != domain_id)\n+\t\t\treturn port;\n+\t}\n+\n+\t/* Failing that, the driver looks for a port with both neighbors\n+\t * unallocated.\n+\t */\n+\tDLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {\n+\t\tu32 next, prev;\n+\t\tu32 phys_id;\n+\n+\t\tphys_id = port->id;\n+\t\tnext = phys_id + 1;\n+\t\tprev = phys_id - 1;\n+\n+\t\tif (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)\n+\t\t\tnext = 0;\n+\t\tif (phys_id == 0)\n+\t\t\tprev = DLB_MAX_NUM_LDB_PORTS - 1;\n+\n+\t\tif (!hw->rsrcs.ldb_ports[prev].owned &&\n+\t\t    !hw->rsrcs.ldb_ports[next].owned)\n+\t\t\treturn port;\n+\t}\n+\n+\t/* If all else fails, the driver returns the next available port. */\n+\treturn DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port));\n+}\n+\n+static int dlb_attach_ldb_ports(struct dlb_hw *hw,\n+\t\t\t\tstruct dlb_function_resources *rsrcs,\n+\t\t\t\tstruct dlb_domain *domain,\n+\t\t\t\tu32 num_ports,\n+\t\t\t\tstruct dlb_cmd_response *resp)\n+{\n+\tunsigned int i, j;\n+\n+\tif (rsrcs->num_avail_ldb_ports < num_ports) {\n+\t\tresp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < num_ports; i++) {\n+\t\tstruct dlb_ldb_port *port;\n+\n+\t\tport = dlb_get_next_ldb_port(hw, rsrcs, domain->id);\n+\n+\t\tif (port == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: domain validation failed\\n\",\n+\t\t\t\t   __func__);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tdlb_list_del(&rsrcs->avail_ldb_ports, &port->func_list);\n+\n+\t\tport->domain_id = domain->id;\n+\t\tport->owned = true;\n+\n+\t\tdlb_list_add(&domain->avail_ldb_ports, &port->domain_list);\n+\t}\n+\n+\trsrcs->num_avail_ldb_ports -= num_ports;\n+\n+\treturn 0;\n+\n+cleanup:\n+\n+\t/* Return the assigned ports */\n+\tfor (j = 0; j < i; j++) {\n+\t\tstruct dlb_ldb_port *port;\n+\n+\t\tport = DLB_FUNC_LIST_HEAD(domain->avail_ldb_ports,\n+\t\t\t\t\t  typeof(*port));\n+\t\t/* Unrecoverable internal error */\n+\t\tif (port == NULL)\n+\t\t\tbreak;\n+\n+\t\tport->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_ports, &port->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_ldb_ports, &port->func_list);\n+\t}\n+\n+\treturn -EFAULT;\n+}\n+\n+static int dlb_attach_dir_ports(struct dlb_hw *hw,\n+\t\t\t\tstruct dlb_function_resources *rsrcs,\n+\t\t\t\tstruct dlb_domain *domain,\n+\t\t\t\tu32 num_ports,\n+\t\t\t\tstruct dlb_cmd_response *resp)\n+{\n+\tunsigned int i, j;\n+\n+\tif (rsrcs->num_avail_dir_pq_pairs < num_ports) {\n+\t\tresp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < num_ports; i++) {\n+\t\tstruct dlb_dir_pq_pair *port;\n+\n+\t\tport = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,\n+\t\t\t\t\t  typeof(*port));\n+\t\tif (port == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: domain validation failed\\n\",\n+\t\t\t\t   __func__);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tdlb_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);\n+\n+\t\tport->domain_id = domain->id;\n+\t\tport->owned = true;\n+\n+\t\tdlb_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);\n+\t}\n+\n+\trsrcs->num_avail_dir_pq_pairs -= num_ports;\n+\n+\treturn 0;\n+\n+cleanup:\n+\n+\t/* Return the assigned ports */\n+\tfor (j = 0; j < i; j++) {\n+\t\tstruct dlb_dir_pq_pair *port;\n+\n+\t\tport = DLB_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs,\n+\t\t\t\t\t  typeof(*port));\n+\t\t/* Unrecoverable internal error */\n+\t\tif (port == NULL)\n+\t\t\tbreak;\n+\n+\t\tport->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list);\n+\t}\n+\n+\treturn -EFAULT;\n+}\n+\n+static int dlb_attach_ldb_credits(struct dlb_function_resources *rsrcs,\n+\t\t\t\t  struct dlb_domain *domain,\n+\t\t\t\t  u32 num_credits,\n+\t\t\t\t  struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_bitmap *bitmap = rsrcs->avail_qed_freelist_entries;\n+\n+\tif (dlb_bitmap_count(bitmap) < (int)num_credits) {\n+\t\tresp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (num_credits) {\n+\t\tint base;\n+\n+\t\tbase = dlb_bitmap_find_set_bit_range(bitmap, num_credits);\n+\t\tif (base < 0)\n+\t\t\tgoto error;\n+\n+\t\tdomain->qed_freelist.base = base;\n+\t\tdomain->qed_freelist.bound = base + num_credits;\n+\t\tdomain->qed_freelist.offset = 0;\n+\n+\t\tdlb_bitmap_clear_range(bitmap, base, num_credits);\n+\t}\n+\n+\treturn 0;\n+\n+error:\n+\tresp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;\n+\treturn -1;\n+}\n+\n+static int dlb_attach_dir_credits(struct dlb_function_resources *rsrcs,\n+\t\t\t\t  struct dlb_domain *domain,\n+\t\t\t\t  u32 num_credits,\n+\t\t\t\t  struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries;\n+\n+\tif (dlb_bitmap_count(bitmap) < (int)num_credits) {\n+\t\tresp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (num_credits) {\n+\t\tint base;\n+\n+\t\tbase = dlb_bitmap_find_set_bit_range(bitmap, num_credits);\n+\t\tif (base < 0)\n+\t\t\tgoto error;\n+\n+\t\tdomain->dqed_freelist.base = base;\n+\t\tdomain->dqed_freelist.bound = base + num_credits;\n+\t\tdomain->dqed_freelist.offset = 0;\n+\n+\t\tdlb_bitmap_clear_range(bitmap, base, num_credits);\n+\t}\n+\n+\treturn 0;\n+\n+error:\n+\tresp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;\n+\treturn -1;\n+}\n+\n+static int dlb_attach_ldb_credit_pools(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_function_resources *rsrcs,\n+\t\t\t\t       struct dlb_domain *domain,\n+\t\t\t\t       u32 num_credit_pools,\n+\t\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tunsigned int i, j;\n+\n+\tif (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) {\n+\t\tresp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < num_credit_pools; i++) {\n+\t\tstruct dlb_credit_pool *pool;\n+\n+\t\tpool = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools,\n+\t\t\t\t\t  typeof(*pool));\n+\t\tif (pool == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: domain validation failed\\n\",\n+\t\t\t\t   __func__);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tdlb_list_del(&rsrcs->avail_ldb_credit_pools,\n+\t\t\t     &pool->func_list);\n+\n+\t\tpool->domain_id = domain->id;\n+\t\tpool->owned = true;\n+\n+\t\tdlb_list_add(&domain->avail_ldb_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t}\n+\n+\trsrcs->num_avail_ldb_credit_pools -= num_credit_pools;\n+\n+\treturn 0;\n+\n+cleanup:\n+\n+\t/* Return the assigned credit pools */\n+\tfor (j = 0; j < i; j++) {\n+\t\tstruct dlb_credit_pool *pool;\n+\n+\t\tpool = DLB_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools,\n+\t\t\t\t\t  typeof(*pool));\n+\t\t/* Unrecoverable internal error */\n+\t\tif (pool == NULL)\n+\t\t\tbreak;\n+\n+\t\tpool->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_ldb_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t}\n+\n+\treturn -EFAULT;\n+}\n+\n+static int dlb_attach_dir_credit_pools(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_function_resources *rsrcs,\n+\t\t\t\t       struct dlb_domain *domain,\n+\t\t\t\t       u32 num_credit_pools,\n+\t\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tunsigned int i, j;\n+\n+\tif (rsrcs->num_avail_dir_credit_pools < num_credit_pools) {\n+\t\tresp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < num_credit_pools; i++) {\n+\t\tstruct dlb_credit_pool *pool;\n+\n+\t\tpool = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools,\n+\t\t\t\t\t  typeof(*pool));\n+\t\tif (pool == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: domain validation failed\\n\",\n+\t\t\t\t   __func__);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tdlb_list_del(&rsrcs->avail_dir_credit_pools,\n+\t\t\t     &pool->func_list);\n+\n+\t\tpool->domain_id = domain->id;\n+\t\tpool->owned = true;\n+\n+\t\tdlb_list_add(&domain->avail_dir_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t}\n+\n+\trsrcs->num_avail_dir_credit_pools -= num_credit_pools;\n+\n+\treturn 0;\n+\n+cleanup:\n+\n+\t/* Return the assigned credit pools */\n+\tfor (j = 0; j < i; j++) {\n+\t\tstruct dlb_credit_pool *pool;\n+\n+\t\tpool = DLB_FUNC_LIST_HEAD(domain->avail_dir_credit_pools,\n+\t\t\t\t\t  typeof(*pool));\n+\t\t/* Unrecoverable internal error */\n+\t\tif (pool == NULL)\n+\t\t\tbreak;\n+\n+\t\tpool->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_dir_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_dir_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t}\n+\n+\treturn -EFAULT;\n+}\n+\n+static int\n+dlb_attach_domain_hist_list_entries(struct dlb_function_resources *rsrcs,\n+\t\t\t\t    struct dlb_domain *domain,\n+\t\t\t\t    u32 num_hist_list_entries,\n+\t\t\t\t    struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_bitmap *bitmap;\n+\tint base;\n+\n+\tif (num_hist_list_entries) {\n+\t\tbitmap = rsrcs->avail_hist_list_entries;\n+\n+\t\tbase = dlb_bitmap_find_set_bit_range(bitmap,\n+\t\t\t\t\t\t     num_hist_list_entries);\n+\t\tif (base < 0)\n+\t\t\tgoto error;\n+\n+\t\tdomain->total_hist_list_entries = num_hist_list_entries;\n+\t\tdomain->avail_hist_list_entries = num_hist_list_entries;\n+\t\tdomain->hist_list_entry_base = base;\n+\t\tdomain->hist_list_entry_offset = 0;\n+\n+\t\tdlb_bitmap_clear_range(bitmap, base, num_hist_list_entries);\n+\t}\n+\treturn 0;\n+\n+error:\n+\tresp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;\n+\treturn -1;\n+}\n+\n+static int dlb_attach_atomic_inflights(struct dlb_function_resources *rsrcs,\n+\t\t\t\t       struct dlb_domain *domain,\n+\t\t\t\t       u32 num_atomic_inflights,\n+\t\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tif (num_atomic_inflights) {\n+\t\tstruct dlb_bitmap *bitmap =\n+\t\t\trsrcs->avail_aqed_freelist_entries;\n+\t\tint base;\n+\n+\t\tbase = dlb_bitmap_find_set_bit_range(bitmap,\n+\t\t\t\t\t\t     num_atomic_inflights);\n+\t\tif (base < 0)\n+\t\t\tgoto error;\n+\n+\t\tdomain->aqed_freelist.base = base;\n+\t\tdomain->aqed_freelist.bound = base + num_atomic_inflights;\n+\t\tdomain->aqed_freelist.offset = 0;\n+\n+\t\tdlb_bitmap_clear_range(bitmap, base, num_atomic_inflights);\n+\t}\n+\n+\treturn 0;\n+\n+error:\n+\tresp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;\n+\treturn -1;\n+}\n+\n+\n+static int\n+dlb_domain_attach_resources(struct dlb_hw *hw,\n+\t\t\t    struct dlb_function_resources *rsrcs,\n+\t\t\t    struct dlb_domain *domain,\n+\t\t\t    struct dlb_create_sched_domain_args *args,\n+\t\t\t    struct dlb_cmd_response *resp)\n+{\n+\tint ret;\n+\n+\tret = dlb_attach_ldb_queues(hw,\n+\t\t\t\t    rsrcs,\n+\t\t\t\t    domain,\n+\t\t\t\t    args->num_ldb_queues,\n+\t\t\t\t    resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_ldb_ports(hw,\n+\t\t\t\t   rsrcs,\n+\t\t\t\t   domain,\n+\t\t\t\t   args->num_ldb_ports,\n+\t\t\t\t   resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_dir_ports(hw,\n+\t\t\t\t   rsrcs,\n+\t\t\t\t   domain,\n+\t\t\t\t   args->num_dir_ports,\n+\t\t\t\t   resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_ldb_credits(rsrcs,\n+\t\t\t\t     domain,\n+\t\t\t\t     args->num_ldb_credits,\n+\t\t\t\t     resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_dir_credits(rsrcs,\n+\t\t\t\t     domain,\n+\t\t\t\t     args->num_dir_credits,\n+\t\t\t\t     resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_ldb_credit_pools(hw,\n+\t\t\t\t\t  rsrcs,\n+\t\t\t\t\t  domain,\n+\t\t\t\t\t  args->num_ldb_credit_pools,\n+\t\t\t\t\t  resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_dir_credit_pools(hw,\n+\t\t\t\t\t  rsrcs,\n+\t\t\t\t\t  domain,\n+\t\t\t\t\t  args->num_dir_credit_pools,\n+\t\t\t\t\t  resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_domain_hist_list_entries(rsrcs,\n+\t\t\t\t\t\t  domain,\n+\t\t\t\t\t\t  args->num_hist_list_entries,\n+\t\t\t\t\t\t  resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_attach_atomic_inflights(rsrcs,\n+\t\t\t\t\t  domain,\n+\t\t\t\t\t  args->num_atomic_inflights,\n+\t\t\t\t\t  resp);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tdomain->configured = true;\n+\n+\tdomain->started = false;\n+\n+\trsrcs->num_avail_domains--;\n+\n+\treturn 0;\n+}\n+\n+static void dlb_ldb_port_cq_enable(struct dlb_hw *hw,\n+\t\t\t\t   struct dlb_ldb_port *port)\n+{\n+\tunion dlb_lsp_cq_ldb_dsbl reg;\n+\n+\t/* Don't re-enable the port if a removal is pending. The caller should\n+\t * mark this port as enabled (if it isn't already), and when the\n+\t * removal completes the port will be enabled.\n+\t */\n+\tif (port->num_pending_removals)\n+\t\treturn;\n+\n+\treg.field.disabled = 0;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+static void dlb_dir_port_cq_enable(struct dlb_hw *hw,\n+\t\t\t\t   struct dlb_dir_pq_pair *port)\n+{\n+\tunion dlb_lsp_cq_dir_dsbl reg;\n+\n+\treg.field.disabled = 0;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+\n+static void dlb_ldb_port_cq_disable(struct dlb_hw *hw,\n+\t\t\t\t    struct dlb_ldb_port *port)\n+{\n+\tunion dlb_lsp_cq_ldb_dsbl reg;\n+\n+\treg.field.disabled = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+static void dlb_dir_port_cq_disable(struct dlb_hw *hw,\n+\t\t\t\t    struct dlb_dir_pq_pair *port)\n+{\n+\tunion dlb_lsp_cq_dir_dsbl reg;\n+\n+\treg.field.disabled = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+\n+\n+void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)\n+{\n+\tunion dlb_dp_dir_csr_ctrl r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);\n+\n+\tr0.field.cfg_vasr_dis = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);\n+}\n+\n+void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)\n+{\n+\tunion dlb_chp_cfg_chp_csr_ctrl r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);\n+\n+\tr0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);\n+}\n+\n+void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)\n+{\n+\tunion dlb_sys_cq_mode r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);\n+\n+\tr0.field.ldb_cq64 = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);\n+}\n+\n+void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)\n+{\n+\tunion dlb_sys_cq_mode r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);\n+\n+\tr0.field.dir_cq64 = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);\n+}\n+\n+void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)\n+{\n+\tunion dlb_sys_sys_alarm_int_enable r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);\n+\n+\tr0.field.pf_to_vf_isr_pend_error = 0;\n+\n+\tDLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);\n+}\n+\n+static unsigned int\n+dlb_get_num_ports_in_use(struct dlb_hw *hw)\n+{\n+\tunsigned int i, n = 0;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)\n+\t\tif (hw->rsrcs.ldb_ports[i].owned)\n+\t\t\tn++;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)\n+\t\tif (hw->rsrcs.dir_pq_pairs[i].owned)\n+\t\t\tn++;\n+\n+\treturn n;\n+}\n+\n+static bool dlb_port_find_slot(struct dlb_ldb_port *port,\n+\t\t\t       enum dlb_qid_map_state state,\n+\t\t\t       int *slot)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tif (port->qid_map[i].state == state)\n+\t\t\tbreak;\n+\t}\n+\n+\t*slot = i;\n+\n+\treturn (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);\n+}\n+\n+static bool dlb_port_find_slot_queue(struct dlb_ldb_port *port,\n+\t\t\t\t     enum dlb_qid_map_state state,\n+\t\t\t\t     struct dlb_ldb_queue *queue,\n+\t\t\t\t     int *slot)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tif (port->qid_map[i].state == state &&\n+\t\t    port->qid_map[i].qid == queue->id)\n+\t\t\tbreak;\n+\t}\n+\n+\t*slot = i;\n+\n+\treturn (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);\n+}\n+\n+static int dlb_port_slot_state_transition(struct dlb_hw *hw,\n+\t\t\t\t\t  struct dlb_ldb_port *port,\n+\t\t\t\t\t  struct dlb_ldb_queue *queue,\n+\t\t\t\t\t  int slot,\n+\t\t\t\t\t  enum dlb_qid_map_state new_state)\n+{\n+\tenum dlb_qid_map_state curr_state = port->qid_map[slot].state;\n+\tstruct dlb_domain *domain;\n+\n+\tdomain = dlb_get_domain_from_id(hw, port->domain_id);\n+\tif (domain == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: unable to find domain %d\\n\",\n+\t\t\t   __func__, port->domain_id);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tswitch (curr_state) {\n+\tcase DLB_QUEUE_UNMAPPED:\n+\t\tswitch (new_state) {\n+\t\tcase DLB_QUEUE_MAPPED:\n+\t\t\tqueue->num_mappings++;\n+\t\t\tport->num_mappings++;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_MAP_IN_PROGRESS:\n+\t\t\tqueue->num_pending_additions++;\n+\t\t\tdomain->num_pending_additions++;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB_QUEUE_MAPPED:\n+\t\tswitch (new_state) {\n+\t\tcase DLB_QUEUE_UNMAPPED:\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_UNMAP_IN_PROGRESS:\n+\t\t\tport->num_pending_removals++;\n+\t\t\tdomain->num_pending_removals++;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_MAPPED:\n+\t\t\t/* Priority change, nothing to update */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB_QUEUE_MAP_IN_PROGRESS:\n+\t\tswitch (new_state) {\n+\t\tcase DLB_QUEUE_UNMAPPED:\n+\t\t\tqueue->num_pending_additions--;\n+\t\t\tdomain->num_pending_additions--;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_MAPPED:\n+\t\t\tqueue->num_mappings++;\n+\t\t\tport->num_mappings++;\n+\t\t\tqueue->num_pending_additions--;\n+\t\t\tdomain->num_pending_additions--;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB_QUEUE_UNMAP_IN_PROGRESS:\n+\t\tswitch (new_state) {\n+\t\tcase DLB_QUEUE_UNMAPPED:\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_MAPPED:\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:\n+\t\t\t/* Nothing to update */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:\n+\t\tswitch (new_state) {\n+\t\tcase DLB_QUEUE_UNMAP_IN_PROGRESS:\n+\t\t\t/* Nothing to update */\n+\t\t\tbreak;\n+\t\tcase DLB_QUEUE_UNMAPPED:\n+\t\t\t/* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly\n+\t\t\t * becomes UNMAPPED before it transitions to\n+\t\t\t * MAP_IN_PROGRESS.\n+\t\t\t */\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tgoto error;\n+\t}\n+\n+\tport->qid_map[slot].state = new_state;\n+\n+\tDLB_HW_INFO(hw,\n+\t\t    \"[%s()] queue %d -> port %d state transition (%d -> %d)\\n\",\n+\t\t    __func__, queue->id, port->id, curr_state,\n+\t\t    new_state);\n+\treturn 0;\n+\n+error:\n+\tDLB_HW_ERR(hw,\n+\t\t   \"[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\\n\",\n+\t\t   __func__, queue->id, port->id, curr_state,\n+\t\t   new_state);\n+\treturn -EFAULT;\n+}\n+\n+/* dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as their\n+ * function names imply, and should only be called by the dynamic CQ mapping\n+ * code.\n+ */\n+static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,\n+\t\t\t\t\t     struct dlb_domain *domain,\n+\t\t\t\t\t     struct dlb_ldb_queue *queue)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\tint slot;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tenum dlb_qid_map_state state = DLB_QUEUE_MAPPED;\n+\n+\t\tif (!dlb_port_find_slot_queue(port, state, queue, &slot))\n+\t\t\tcontinue;\n+\n+\t\tif (port->enabled)\n+\t\t\tdlb_ldb_port_cq_disable(hw, port);\n+\t}\n+}\n+\n+static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,\n+\t\t\t\t\t    struct dlb_domain *domain,\n+\t\t\t\t\t    struct dlb_ldb_queue *queue)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\tint slot;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tenum dlb_qid_map_state state = DLB_QUEUE_MAPPED;\n+\n+\t\tif (!dlb_port_find_slot_queue(port, state, queue, &slot))\n+\t\t\tcontinue;\n+\n+\t\tif (port->enabled)\n+\t\t\tdlb_ldb_port_cq_enable(hw, port);\n+\t}\n+}\n+\n+static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_ldb_port *p,\n+\t\t\t\t       struct dlb_ldb_queue *q,\n+\t\t\t\t       u8 priority)\n+{\n+\tunion dlb_lsp_cq2priov r0;\n+\tunion dlb_lsp_cq2qid r1;\n+\tunion dlb_atm_pipe_qid_ldb_qid2cqidx r2;\n+\tunion dlb_lsp_qid_ldb_qid2cqidx r3;\n+\tunion dlb_lsp_qid_ldb_qid2cqidx2 r4;\n+\tenum dlb_qid_map_state state;\n+\tint i;\n+\n+\t/* Look for a pending or already mapped slot, else an unused slot */\n+\tif (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROGRESS, q, &i) &&\n+\t    !dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&\n+\t    !dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: CQ has no available QID mapping slots\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: port slot tracking failed\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Read-modify-write the priority and valid bit register */\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(p->id));\n+\n+\tr0.field.v |= 1 << i;\n+\tr0.field.prio |= (priority & 0x7) << i * 3;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(p->id), r0.val);\n+\n+\t/* Read-modify-write the QID map register */\n+\tr1.val = DLB_CSR_RD(hw, DLB_LSP_CQ2QID(p->id, i / 4));\n+\n+\tif (i == 0 || i == 4)\n+\t\tr1.field.qid_p0 = q->id;\n+\tif (i == 1 || i == 5)\n+\t\tr1.field.qid_p1 = q->id;\n+\tif (i == 2 || i == 6)\n+\t\tr1.field.qid_p2 = q->id;\n+\tif (i == 3 || i == 7)\n+\t\tr1.field.qid_p3 = q->id;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ2QID(p->id, i / 4), r1.val);\n+\n+\tr2.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,\n+\t\t\t\t\t\t\t   p->id / 4));\n+\n+\tr3.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_QID2CQIDX(q->id,\n+\t\t\t\t\t\t      p->id / 4));\n+\n+\tr4.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_QID2CQIDX2(q->id,\n+\t\t\t\t\t\t       p->id / 4));\n+\n+\tswitch (p->id % 4) {\n+\tcase 0:\n+\t\tr2.field.cq_p0 |= 1 << i;\n+\t\tr3.field.cq_p0 |= 1 << i;\n+\t\tr4.field.cq_p0 |= 1 << i;\n+\t\tbreak;\n+\n+\tcase 1:\n+\t\tr2.field.cq_p1 |= 1 << i;\n+\t\tr3.field.cq_p1 |= 1 << i;\n+\t\tr4.field.cq_p1 |= 1 << i;\n+\t\tbreak;\n+\n+\tcase 2:\n+\t\tr2.field.cq_p2 |= 1 << i;\n+\t\tr3.field.cq_p2 |= 1 << i;\n+\t\tr4.field.cq_p2 |= 1 << i;\n+\t\tbreak;\n+\n+\tcase 3:\n+\t\tr2.field.cq_p3 |= 1 << i;\n+\t\tr3.field.cq_p3 |= 1 << i;\n+\t\tr4.field.cq_p3 |= 1 << i;\n+\t\tbreak;\n+\t}\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,\n+\t\t\t\t\t\t  p->id / 4),\n+\t\t   r2.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_QID_LDB_QID2CQIDX(q->id,\n+\t\t\t\t\t     p->id / 4),\n+\t\t   r3.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_QID_LDB_QID2CQIDX2(q->id,\n+\t\t\t\t\t      p->id / 4),\n+\t\t   r4.val);\n+\n+\tdlb_flush_csr(hw);\n+\n+\tp->qid_map[i].qid = q->id;\n+\tp->qid_map[i].priority = priority;\n+\n+\tstate = DLB_QUEUE_MAPPED;\n+\n+\treturn dlb_port_slot_state_transition(hw, p, q, i, state);\n+}\n+\n+static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,\n+\t\t\t\t\t  struct dlb_ldb_port *port,\n+\t\t\t\t\t  struct dlb_ldb_queue *queue,\n+\t\t\t\t\t  int slot)\n+{\n+\tunion dlb_lsp_qid_aqed_active_cnt r0;\n+\tunion dlb_lsp_qid_ldb_enqueue_cnt r1;\n+\tunion dlb_lsp_ldb_sched_ctrl r2 = { {0} };\n+\n+\t/* Set the atomic scheduling haswork bit */\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));\n+\n+\tr2.field.cq = port->id;\n+\tr2.field.qidix = slot;\n+\tr2.field.value = 1;\n+\tr2.field.rlist_haswork_v = r0.field.count > 0;\n+\n+\t/* Set the non-atomic scheduling haswork bit */\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);\n+\n+\tr1.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));\n+\n+\tmemset(&r2, 0, sizeof(r2));\n+\n+\tr2.field.cq = port->id;\n+\tr2.field.qidix = slot;\n+\tr2.field.value = 1;\n+\tr2.field.nalb_haswork_v = (r1.field.count > 0);\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);\n+\n+\tdlb_flush_csr(hw);\n+\n+\treturn 0;\n+}\n+\n+static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,\n+\t\t\t\t\t       struct dlb_ldb_port *port,\n+\t\t\t\t\t       int slot)\n+{\n+\tunion dlb_lsp_ldb_sched_ctrl r0 = { {0} };\n+\n+\tr0.field.cq = port->id;\n+\tr0.field.qidix = slot;\n+\tr0.field.value = 0;\n+\tr0.field.inflight_ok_v = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,\n+\t\t\t\t\t     struct dlb_ldb_port *port,\n+\t\t\t\t\t     int slot)\n+{\n+\tunion dlb_lsp_ldb_sched_ctrl r0 = { {0} };\n+\n+\tr0.field.cq = port->id;\n+\tr0.field.qidix = slot;\n+\tr0.field.value = 1;\n+\tr0.field.inflight_ok_v = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,\n+\t\t\t\t\t     struct dlb_ldb_queue *queue)\n+{\n+\tunion dlb_lsp_qid_ldb_infl_lim r0 = { {0} };\n+\n+\tr0.field.limit = queue->num_qid_inflights;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r0.val);\n+}\n+\n+static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,\n+\t\t\t\t\t       struct dlb_ldb_queue *queue)\n+{\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_QID_LDB_INFL_LIM(queue->id),\n+\t\t   DLB_LSP_QID_LDB_INFL_LIM_RST);\n+}\n+\n+static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,\n+\t\t\t\t\t       struct dlb_domain *domain,\n+\t\t\t\t\t       struct dlb_ldb_port *port,\n+\t\t\t\t\t       struct dlb_ldb_queue *queue)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_lsp_qid_ldb_infl_cnt r0;\n+\tenum dlb_qid_map_state state;\n+\tint slot, ret;\n+\tu8 prio;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));\n+\n+\tif (r0.field.count) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: non-zero QID inflight count\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* For each port with a pending mapping to this queue, perform the\n+\t * static mapping and set the corresponding has_work bits.\n+\t */\n+\tstate = DLB_QUEUE_MAP_IN_PROGRESS;\n+\tif (!dlb_port_find_slot_queue(port, state, queue, &slot))\n+\t\treturn -EINVAL;\n+\n+\tif (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: port slot tracking failed\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tprio = port->qid_map[slot].priority;\n+\n+\t/* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and\n+\t * the port's qid_map state.\n+\t */\n+\tret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Ensure IF_status(cq,qid) is 0 before enabling the port to\n+\t * prevent spurious schedules to cause the queue's inflight\n+\t * count to increase.\n+\t */\n+\tdlb_ldb_port_clear_queue_if_status(hw, port, slot);\n+\n+\t/* Reset the queue's inflight status */\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tstate = DLB_QUEUE_MAPPED;\n+\t\tif (!dlb_port_find_slot_queue(port, state, queue, &slot))\n+\t\t\tcontinue;\n+\n+\t\tdlb_ldb_port_set_queue_if_status(hw, port, slot);\n+\t}\n+\n+\tdlb_ldb_queue_set_inflight_limit(hw, queue);\n+\n+\t/* Re-enable CQs mapped to this queue */\n+\tdlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t/* If this queue has other mappings pending, clear its inflight limit */\n+\tif (queue->num_pending_additions > 0)\n+\t\tdlb_ldb_queue_clear_inflight_limit(hw, queue);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * dlb_ldb_port_map_qid_dynamic() - perform a \"dynamic\" QID->CQ mapping\n+ * @hw: dlb_hw handle for a particular device.\n+ * @port: load-balanced port\n+ * @queue: load-balanced queue\n+ * @priority: queue servicing priority\n+ *\n+ * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur\n+ * at a later point, and <0 if an error occurred.\n+ */\n+static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,\n+\t\t\t\t\tstruct dlb_ldb_port *port,\n+\t\t\t\t\tstruct dlb_ldb_queue *queue,\n+\t\t\t\t\tu8 priority)\n+{\n+\tunion dlb_lsp_qid_ldb_infl_cnt r0 = { {0} };\n+\tenum dlb_qid_map_state state;\n+\tstruct dlb_domain *domain;\n+\tint slot, ret;\n+\n+\tdomain = dlb_get_domain_from_id(hw, port->domain_id);\n+\tif (domain == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: unable to find domain %d\\n\",\n+\t\t\t   __func__, port->domain_id);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Set the QID inflight limit to 0 to prevent further scheduling of the\n+\t * queue.\n+\t */\n+\tDLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), 0);\n+\n+\tif (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"Internal error: No available unmapped slots\\n\");\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: port slot tracking failed\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tport->qid_map[slot].qid = queue->id;\n+\tport->qid_map[slot].priority = priority;\n+\n+\tstate = DLB_QUEUE_MAP_IN_PROGRESS;\n+\tret = dlb_port_slot_state_transition(hw, port, queue, slot, state);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));\n+\n+\tif (r0.field.count) {\n+\t\t/* The queue is owed completions so it's not safe to map it\n+\t\t * yet. Schedule a kernel thread to complete the mapping later,\n+\t\t * once software has completed all the queue's inflight events.\n+\t\t */\n+\t\tif (!os_worker_active(hw))\n+\t\t\tos_schedule_work(hw);\n+\n+\t\treturn 1;\n+\t}\n+\n+\t/* Disable the affected CQ, and the CQs already mapped to the QID,\n+\t * before reading the QID's inflight count a second time. There is an\n+\t * unlikely race in which the QID may schedule one more QE after we\n+\t * read an inflight count of 0, and disabling the CQs guarantees that\n+\t * the race will not occur after a re-read of the inflight count\n+\t * register.\n+\t */\n+\tif (port->enabled)\n+\t\tdlb_ldb_port_cq_disable(hw, port);\n+\n+\tdlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));\n+\n+\tif (r0.field.count) {\n+\t\tif (port->enabled)\n+\t\t\tdlb_ldb_port_cq_enable(hw, port);\n+\n+\t\tdlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t\t/* The queue is owed completions so it's not safe to map it\n+\t\t * yet. Schedule a kernel thread to complete the mapping later,\n+\t\t * once software has completed all the queue's inflight events.\n+\t\t */\n+\t\tif (!os_worker_active(hw))\n+\t\t\tos_schedule_work(hw);\n+\n+\t\treturn 1;\n+\t}\n+\n+\treturn dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);\n+}\n+\n+\n+static int dlb_ldb_port_map_qid(struct dlb_hw *hw,\n+\t\t\t\tstruct dlb_domain *domain,\n+\t\t\t\tstruct dlb_ldb_port *port,\n+\t\t\t\tstruct dlb_ldb_queue *queue,\n+\t\t\t\tu8 prio)\n+{\n+\tif (domain->started)\n+\t\treturn dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);\n+\telse\n+\t\treturn dlb_ldb_port_map_qid_static(hw, port, queue, prio);\n+}\n+\n+static int dlb_ldb_port_unmap_qid(struct dlb_hw *hw,\n+\t\t\t\t  struct dlb_ldb_port *port,\n+\t\t\t\t  struct dlb_ldb_queue *queue)\n+{\n+\tenum dlb_qid_map_state mapped, in_progress, pending_map, unmapped;\n+\tunion dlb_lsp_cq2priov r0;\n+\tunion dlb_atm_pipe_qid_ldb_qid2cqidx r1;\n+\tunion dlb_lsp_qid_ldb_qid2cqidx r2;\n+\tunion dlb_lsp_qid_ldb_qid2cqidx2 r3;\n+\tu32 queue_id;\n+\tu32 port_id;\n+\tint i;\n+\n+\t/* Find the queue's slot */\n+\tmapped = DLB_QUEUE_MAPPED;\n+\tin_progress = DLB_QUEUE_UNMAP_IN_PROGRESS;\n+\tpending_map = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;\n+\n+\tif (!dlb_port_find_slot_queue(port, mapped, queue, &i) &&\n+\t    !dlb_port_find_slot_queue(port, in_progress, queue, &i) &&\n+\t    !dlb_port_find_slot_queue(port, pending_map, queue, &i)) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: QID %d isn't mapped\\n\",\n+\t\t\t   __func__, __LINE__, queue->id);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: port slot tracking failed\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tport_id = port->id;\n+\tqueue_id = queue->id;\n+\n+\t/* Read-modify-write the priority and valid bit register */\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port_id));\n+\n+\tr0.field.v &= ~(1 << i);\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port_id), r0.val);\n+\n+\tr1.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id,\n+\t\t\t\t\t\t\t   port_id / 4));\n+\n+\tr2.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_QID2CQIDX(queue_id,\n+\t\t\t\t\t\t      port_id / 4));\n+\n+\tr3.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_QID2CQIDX2(queue_id,\n+\t\t\t\t\t\t       port_id / 4));\n+\n+\tswitch (port_id % 4) {\n+\tcase 0:\n+\t\tr1.field.cq_p0 &= ~(1 << i);\n+\t\tr2.field.cq_p0 &= ~(1 << i);\n+\t\tr3.field.cq_p0 &= ~(1 << i);\n+\t\tbreak;\n+\n+\tcase 1:\n+\t\tr1.field.cq_p1 &= ~(1 << i);\n+\t\tr2.field.cq_p1 &= ~(1 << i);\n+\t\tr3.field.cq_p1 &= ~(1 << i);\n+\t\tbreak;\n+\n+\tcase 2:\n+\t\tr1.field.cq_p2 &= ~(1 << i);\n+\t\tr2.field.cq_p2 &= ~(1 << i);\n+\t\tr3.field.cq_p2 &= ~(1 << i);\n+\t\tbreak;\n+\n+\tcase 3:\n+\t\tr1.field.cq_p3 &= ~(1 << i);\n+\t\tr2.field.cq_p3 &= ~(1 << i);\n+\t\tr3.field.cq_p3 &= ~(1 << i);\n+\t\tbreak;\n+\t}\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4),\n+\t\t   r1.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4),\n+\t\t   r2.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4),\n+\t\t   r3.val);\n+\n+\tdlb_flush_csr(hw);\n+\n+\tunmapped = DLB_QUEUE_UNMAPPED;\n+\n+\treturn dlb_port_slot_state_transition(hw, port, queue, i, unmapped);\n+}\n+\n+static int\n+dlb_verify_create_sched_domain_args(struct dlb_hw *hw,\n+\t\t\t\t    struct dlb_function_resources *rsrcs,\n+\t\t\t\t    struct dlb_create_sched_domain_args *args,\n+\t\t\t\t    struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_bitmap *ldb_credit_freelist;\n+\tstruct dlb_bitmap *dir_credit_freelist;\n+\tunsigned int ldb_credit_freelist_count;\n+\tunsigned int dir_credit_freelist_count;\n+\tunsigned int max_contig_aqed_entries;\n+\tunsigned int max_contig_dqed_entries;\n+\tunsigned int max_contig_qed_entries;\n+\tunsigned int max_contig_hl_entries;\n+\tstruct dlb_bitmap *aqed_freelist;\n+\tenum dlb_dev_revision revision;\n+\n+\tldb_credit_freelist = rsrcs->avail_qed_freelist_entries;\n+\tdir_credit_freelist = rsrcs->avail_dqed_freelist_entries;\n+\taqed_freelist = rsrcs->avail_aqed_freelist_entries;\n+\n+\tldb_credit_freelist_count = dlb_bitmap_count(ldb_credit_freelist);\n+\tdir_credit_freelist_count = dlb_bitmap_count(dir_credit_freelist);\n+\n+\tmax_contig_hl_entries =\n+\t\tdlb_bitmap_longest_set_range(rsrcs->avail_hist_list_entries);\n+\tmax_contig_aqed_entries =\n+\t\tdlb_bitmap_longest_set_range(aqed_freelist);\n+\tmax_contig_qed_entries =\n+\t\tdlb_bitmap_longest_set_range(ldb_credit_freelist);\n+\tmax_contig_dqed_entries =\n+\t\tdlb_bitmap_longest_set_range(dir_credit_freelist);\n+\n+\tif (rsrcs->num_avail_domains < 1)\n+\t\tresp->status = DLB_ST_DOMAIN_UNAVAILABLE;\n+\telse if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues)\n+\t\tresp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;\n+\telse if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports)\n+\t\tresp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;\n+\telse if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0)\n+\t\tresp->status = DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;\n+\telse if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports)\n+\t\tresp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;\n+\telse if (ldb_credit_freelist_count < args->num_ldb_credits)\n+\t\tresp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;\n+\telse if (dir_credit_freelist_count < args->num_dir_credits)\n+\t\tresp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;\n+\telse if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools)\n+\t\tresp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;\n+\telse if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools)\n+\t\tresp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;\n+\telse if (max_contig_hl_entries < args->num_hist_list_entries)\n+\t\tresp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;\n+\telse if (max_contig_aqed_entries < args->num_atomic_inflights)\n+\t\tresp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;\n+\telse if (max_contig_qed_entries < args->num_ldb_credits)\n+\t\tresp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;\n+\telse if (max_contig_dqed_entries < args->num_dir_credits)\n+\t\tresp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;\n+\n+\t/* DLB A-stepping workaround for hardware write buffer lock up issue:\n+\t * limit the maximum configured ports to less than 128 and disable CQ\n+\t * occupancy interrupts.\n+\t */\n+\trevision = os_get_dev_revision(hw);\n+\n+\tif (revision < DLB_B0) {\n+\t\tu32 n = dlb_get_num_ports_in_use(hw);\n+\n+\t\tn += args->num_ldb_ports + args->num_dir_ports;\n+\n+\t\tif (n >= DLB_A_STEP_MAX_PORTS)\n+\t\t\tresp->status = args->num_ldb_ports ?\n+\t\t\t\tDLB_ST_LDB_PORTS_UNAVAILABLE :\n+\t\t\t\tDLB_ST_DIR_PORTS_UNAVAILABLE;\n+\t}\n+\n+\tif (resp->status)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+\n+static void\n+dlb_log_create_sched_domain_args(struct dlb_hw *hw,\n+\t\t\t\t struct dlb_create_sched_domain_args *args)\n+{\n+\tDLB_HW_INFO(hw, \"DLB create sched domain arguments:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tNumber of LDB queues:        %d\\n\",\n+\t\t    args->num_ldb_queues);\n+\tDLB_HW_INFO(hw, \"\\tNumber of LDB ports:         %d\\n\",\n+\t\t    args->num_ldb_ports);\n+\tDLB_HW_INFO(hw, \"\\tNumber of DIR ports:         %d\\n\",\n+\t\t    args->num_dir_ports);\n+\tDLB_HW_INFO(hw, \"\\tNumber of ATM inflights:     %d\\n\",\n+\t\t    args->num_atomic_inflights);\n+\tDLB_HW_INFO(hw, \"\\tNumber of hist list entries: %d\\n\",\n+\t\t    args->num_hist_list_entries);\n+\tDLB_HW_INFO(hw, \"\\tNumber of LDB credits:       %d\\n\",\n+\t\t    args->num_ldb_credits);\n+\tDLB_HW_INFO(hw, \"\\tNumber of DIR credits:       %d\\n\",\n+\t\t    args->num_dir_credits);\n+\tDLB_HW_INFO(hw, \"\\tNumber of LDB credit pools:  %d\\n\",\n+\t\t    args->num_ldb_credit_pools);\n+\tDLB_HW_INFO(hw, \"\\tNumber of DIR credit pools:  %d\\n\",\n+\t\t    args->num_dir_credit_pools);\n+}\n+\n+/**\n+ * dlb_hw_create_sched_domain() - Allocate and initialize a DLB scheduling\n+ *\tdomain and its resources.\n+ * @hw:\t  Contains the current state of the DLB hardware.\n+ * @args: User-provided arguments.\n+ * @resp: Response to user.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb_hw_create_sched_domain(struct dlb_hw *hw,\n+\t\t\t       struct dlb_create_sched_domain_args *args,\n+\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_domain *domain;\n+\tstruct dlb_function_resources *rsrcs;\n+\tint ret;\n+\n+\trsrcs = &hw->pf;\n+\n+\tdlb_log_create_sched_domain_args(hw, args);\n+\n+\t/* Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\tif (dlb_verify_create_sched_domain_args(hw, rsrcs, args, resp))\n+\t\treturn -EINVAL;\n+\n+\tdomain = DLB_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));\n+\n+\t/* Verification should catch this. */\n+\tif (domain == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: no available domains\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tif (domain->configured) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: avail_domains contains configured domains.\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdlb_init_domain_rsrc_lists(domain);\n+\n+\t/* Verification should catch this too. */\n+\tret = dlb_domain_attach_resources(hw, rsrcs, domain, args, resp);\n+\tif (ret < 0) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: failed to verify args.\\n\",\n+\t\t\t   __func__);\n+\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdlb_list_del(&rsrcs->avail_domains, &domain->func_list);\n+\n+\tdlb_list_add(&rsrcs->used_domains, &domain->func_list);\n+\n+\tresp->id = domain->id;\n+\tresp->status = 0;\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb_configure_ldb_credit_pool(struct dlb_hw *hw,\n+\t\t\t      struct dlb_domain *domain,\n+\t\t\t      struct dlb_create_ldb_pool_args *args,\n+\t\t\t      struct dlb_credit_pool *pool)\n+{\n+\tunion dlb_sys_ldb_pool_enbld r0 = { {0} };\n+\tunion dlb_chp_ldb_pool_crd_lim r1 = { {0} };\n+\tunion dlb_chp_ldb_pool_crd_cnt r2 = { {0} };\n+\tunion dlb_chp_qed_fl_base  r3 = { {0} };\n+\tunion dlb_chp_qed_fl_lim r4 = { {0} };\n+\tunion dlb_chp_qed_fl_push_ptr r5 = { {0} };\n+\tunion dlb_chp_qed_fl_pop_ptr  r6 = { {0} };\n+\n+\tr1.field.limit = args->num_ldb_credits;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);\n+\n+\tr2.field.count = args->num_ldb_credits;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);\n+\n+\tr3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);\n+\n+\tr4.field.freelist_disable = 0;\n+\tr4.field.limit = r3.field.base + args->num_ldb_credits - 1;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);\n+\n+\tr5.field.push_ptr = r3.field.base;\n+\tr5.field.generation = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);\n+\n+\tr6.field.pop_ptr = r3.field.base;\n+\tr6.field.generation = 0;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);\n+\n+\tr0.field.pool_enabled = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);\n+\n+\tpool->avail_credits = args->num_ldb_credits;\n+\tpool->total_credits = args->num_ldb_credits;\n+\tdomain->qed_freelist.offset += args->num_ldb_credits;\n+\n+\tpool->configured = true;\n+}\n+\n+static int\n+dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,\n+\t\t\t\tu32 domain_id,\n+\t\t\t\tstruct dlb_create_ldb_pool_args *args,\n+\t\t\t\tstruct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_freelist *qed_freelist;\n+\tstruct dlb_domain *domain;\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\n+\tif (domain == NULL) {\n+\t\tresp->status = DLB_ST_INVALID_DOMAIN_ID;\n+\t\treturn -1;\n+\t}\n+\n+\tif (!domain->configured) {\n+\t\tresp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;\n+\t\treturn -1;\n+\t}\n+\n+\tqed_freelist = &domain->qed_freelist;\n+\n+\tif (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {\n+\t\tresp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (dlb_list_empty(&domain->avail_ldb_credit_pools)) {\n+\t\tresp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (domain->started) {\n+\t\tresp->status = DLB_ST_DOMAIN_STARTED;\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb_log_create_ldb_pool_args(struct dlb_hw *hw,\n+\t\t\t     u32 domain_id,\n+\t\t\t     struct dlb_create_ldb_pool_args *args)\n+{\n+\tDLB_HW_INFO(hw, \"DLB create load-balanced credit pool arguments:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tDomain ID:             %d\\n\", domain_id);\n+\tDLB_HW_INFO(hw, \"\\tNumber of LDB credits: %d\\n\",\n+\t\t    args->num_ldb_credits);\n+}\n+\n+/**\n+ * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.\n+ * @hw:\t  Contains the current state of the DLB hardware.\n+ * @args: User-provided arguments.\n+ * @resp: Response to user.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb_hw_create_ldb_pool(struct dlb_hw *hw,\n+\t\t\t   u32 domain_id,\n+\t\t\t   struct dlb_create_ldb_pool_args *args,\n+\t\t\t   struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_credit_pool *pool;\n+\tstruct dlb_domain *domain;\n+\n+\tdlb_log_create_ldb_pool_args(hw, domain_id, args);\n+\n+\t/* Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\tif (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))\n+\t\treturn -EINVAL;\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\tif (domain == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: domain not found\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tpool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));\n+\n+\t/* Verification should catch this. */\n+\tif (pool == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: no available ldb credit pools\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdlb_configure_ldb_credit_pool(hw, domain, args, pool);\n+\n+\t/* Configuration succeeded, so move the resource from the 'avail' to\n+\t * the 'used' list.\n+\t */\n+\tdlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);\n+\n+\tdlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);\n+\n+\tresp->status = 0;\n+\tresp->id = pool->id;\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb_configure_dir_credit_pool(struct dlb_hw *hw,\n+\t\t\t      struct dlb_domain *domain,\n+\t\t\t      struct dlb_create_dir_pool_args *args,\n+\t\t\t      struct dlb_credit_pool *pool)\n+{\n+\tunion dlb_sys_dir_pool_enbld r0 = { {0} };\n+\tunion dlb_chp_dir_pool_crd_lim r1 = { {0} };\n+\tunion dlb_chp_dir_pool_crd_cnt r2 = { {0} };\n+\tunion dlb_chp_dqed_fl_base  r3 = { {0} };\n+\tunion dlb_chp_dqed_fl_lim r4 = { {0} };\n+\tunion dlb_chp_dqed_fl_push_ptr r5 = { {0} };\n+\tunion dlb_chp_dqed_fl_pop_ptr  r6 = { {0} };\n+\n+\tr1.field.limit = args->num_dir_credits;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_LIM(pool->id), r1.val);\n+\n+\tr2.field.count = args->num_dir_credits;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_CNT(pool->id), r2.val);\n+\n+\tr3.field.base = domain->dqed_freelist.base +\n+\t\t\tdomain->dqed_freelist.offset;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DQED_FL_BASE(pool->id), r3.val);\n+\n+\tr4.field.freelist_disable = 0;\n+\tr4.field.limit = r3.field.base + args->num_dir_credits - 1;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DQED_FL_LIM(pool->id), r4.val);\n+\n+\tr5.field.push_ptr = r3.field.base;\n+\tr5.field.generation = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DQED_FL_PUSH_PTR(pool->id), r5.val);\n+\n+\tr6.field.pop_ptr = r3.field.base;\n+\tr6.field.generation = 0;\n+\n+\tDLB_CSR_WR(hw, DLB_CHP_DQED_FL_POP_PTR(pool->id), r6.val);\n+\n+\tr0.field.pool_enabled = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_SYS_DIR_POOL_ENBLD(pool->id), r0.val);\n+\n+\tpool->avail_credits = args->num_dir_credits;\n+\tpool->total_credits = args->num_dir_credits;\n+\tdomain->dqed_freelist.offset += args->num_dir_credits;\n+\n+\tpool->configured = true;\n+}\n+\n+static int\n+dlb_verify_create_dir_pool_args(struct dlb_hw *hw,\n+\t\t\t\tu32 domain_id,\n+\t\t\t\tstruct dlb_create_dir_pool_args *args,\n+\t\t\t\tstruct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_freelist *dqed_freelist;\n+\tstruct dlb_domain *domain;\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\n+\tif (domain == NULL) {\n+\t\tresp->status = DLB_ST_INVALID_DOMAIN_ID;\n+\t\treturn -1;\n+\t}\n+\n+\tif (!domain->configured) {\n+\t\tresp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;\n+\t\treturn -1;\n+\t}\n+\n+\tdqed_freelist = &domain->dqed_freelist;\n+\n+\tif (dlb_freelist_count(dqed_freelist) < args->num_dir_credits) {\n+\t\tresp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (dlb_list_empty(&domain->avail_dir_credit_pools)) {\n+\t\tresp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;\n+\t\treturn -1;\n+\t}\n+\n+\tif (domain->started) {\n+\t\tresp->status = DLB_ST_DOMAIN_STARTED;\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb_log_create_dir_pool_args(struct dlb_hw *hw,\n+\t\t\t     u32 domain_id,\n+\t\t\t     struct dlb_create_dir_pool_args *args)\n+{\n+\tDLB_HW_INFO(hw, \"DLB create directed credit pool arguments:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tDomain ID:             %d\\n\", domain_id);\n+\tDLB_HW_INFO(hw, \"\\tNumber of DIR credits: %d\\n\",\n+\t\t    args->num_dir_credits);\n+}\n+\n+/**\n+ * dlb_hw_create_dir_pool() - Allocate and initialize a DLB credit pool.\n+ * @hw:\t  Contains the current state of the DLB hardware.\n+ * @args: User-provided arguments.\n+ * @resp: Response to user.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb_hw_create_dir_pool(struct dlb_hw *hw,\n+\t\t\t   u32 domain_id,\n+\t\t\t   struct dlb_create_dir_pool_args *args,\n+\t\t\t   struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_credit_pool *pool;\n+\tstruct dlb_domain *domain;\n+\n+\tdlb_log_create_dir_pool_args(hw, domain_id, args);\n+\n+\t/* Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\t/* At least one available pool */\n+\tif (dlb_verify_create_dir_pool_args(hw, domain_id, args, resp))\n+\t\treturn -EINVAL;\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\tif (domain == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: domain not found\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tpool = DLB_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool));\n+\n+\t/* Verification should catch this. */\n+\tif (pool == NULL) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s():%d] Internal error: no available dir credit pools\\n\",\n+\t\t\t   __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdlb_configure_dir_credit_pool(hw, domain, args, pool);\n+\n+\t/* Configuration succeeded, so move the resource from the 'avail' to\n+\t * the 'used' list.\n+\t */\n+\tdlb_list_del(&domain->avail_dir_credit_pools, &pool->domain_list);\n+\n+\tdlb_list_add(&domain->used_dir_credit_pools, &pool->domain_list);\n+\n+\tresp->status = 0;\n+\tresp->id = pool->id;\n+\n+\treturn 0;\n+}\n+\n+static u32 dlb_ldb_cq_inflight_count(struct dlb_hw *hw,\n+\t\t\t\t     struct dlb_ldb_port *port)\n+{\n+\tunion dlb_lsp_cq_ldb_infl_cnt r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));\n+\n+\treturn r0.field.count;\n+}\n+\n+static u32 dlb_ldb_cq_token_count(struct dlb_hw *hw,\n+\t\t\t\t  struct dlb_ldb_port *port)\n+{\n+\tunion dlb_lsp_cq_ldb_tkn_cnt r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_TKN_CNT(port->id));\n+\n+\treturn r0.field.token_count;\n+}\n+\n+static int dlb_drain_ldb_cq(struct dlb_hw *hw, struct dlb_ldb_port *port)\n+{\n+\tu32 infl_cnt, tkn_cnt;\n+\tunsigned int i;\n+\n+\tinfl_cnt = dlb_ldb_cq_inflight_count(hw, port);\n+\n+\t/* Account for the initial token count, which is used in order to\n+\t * provide a CQ with depth less than 8.\n+\t */\n+\ttkn_cnt = dlb_ldb_cq_token_count(hw, port) - port->init_tkn_cnt;\n+\n+\tif (infl_cnt || tkn_cnt) {\n+\t\tstruct dlb_hcw hcw_mem[8], *hcw;\n+\t\tvoid  *pp_addr;\n+\n+\t\tpp_addr = os_map_producer_port(hw, port->id, true);\n+\n+\t\t/* Point hcw to a 64B-aligned location */\n+\t\thcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n+\n+\t\t/* Program the first HCW for a completion and token return and\n+\t\t * the other HCWs as NOOPS\n+\t\t */\n+\n+\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n+\t\thcw->qe_comp = (infl_cnt > 0);\n+\t\thcw->cq_token = (tkn_cnt > 0);\n+\t\thcw->lock_id = tkn_cnt - 1;\n+\n+\t\t/* Return tokens in the first HCW */\n+\t\tdlb_movdir64b(pp_addr, hcw);\n+\n+\t\thcw->cq_token = 0;\n+\n+\t\t/* Issue remaining completions (if any) */\n+\t\tfor (i = 1; i < infl_cnt; i++)\n+\t\t\tdlb_movdir64b(pp_addr, hcw);\n+\n+\t\tos_fence_hcw(hw, pp_addr);\n+\n+\t\tos_unmap_producer_port(hw, pp_addr);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_drain_ldb_cqs(struct dlb_hw *hw,\n+\t\t\t\t    struct dlb_domain *domain,\n+\t\t\t\t    bool toggle_port)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\tint ret;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tif (toggle_port)\n+\t\t\tdlb_ldb_port_cq_disable(hw, port);\n+\n+\t\tret = dlb_drain_ldb_cq(hw, port);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\n+\t\tif (toggle_port)\n+\t\t\tdlb_ldb_port_cq_enable(hw, port);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw *hw,\n+\t\t\t\t\t\t     struct dlb_domain *domain)\n+{\n+\tint domain_offset = domain->id * DLB_MAX_NUM_LDB_QUEUES;\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_sys_ldb_vasqid_v r0;\n+\tstruct dlb_ldb_queue *queue;\n+\n+\tr0.field.vasqid_v = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tint idx = domain_offset + queue->id;\n+\n+\t\tDLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(idx), r0.val);\n+\t}\n+}\n+\n+static void dlb_domain_disable_ldb_seq_checks(struct dlb_hw *hw,\n+\t\t\t\t\t      struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_chp_sn_chk_enbl r1;\n+\tstruct dlb_ldb_port *port;\n+\n+\tr1.field.en = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_SN_CHK_ENBL(port->id),\n+\t\t\t   r1.val);\n+}\n+\n+static void dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw *hw,\n+\t\t\t\t\t\t    struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_chp_ldb_pp_crd_req_state r0;\n+\tstruct dlb_ldb_port *port;\n+\n+\tr0.field.no_pp_credit_update = 1;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),\n+\t\t\t   r0.val);\n+}\n+\n+static void dlb_domain_disable_ldb_port_interrupts(struct dlb_hw *hw,\n+\t\t\t\t\t\t   struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_chp_ldb_cq_int_enb r0 = { {0} };\n+\tunion dlb_chp_ldb_cq_wd_enb r1 = { {0} };\n+\tstruct dlb_ldb_port *port;\n+\n+\tr0.field.en_tim = 0;\n+\tr0.field.en_depth = 0;\n+\n+\tr1.field.wd_enable = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_LDB_CQ_INT_ENB(port->id),\n+\t\t\t   r0.val);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_LDB_CQ_WD_ENB(port->id),\n+\t\t\t   r1.val);\n+\t}\n+}\n+\n+static void dlb_domain_disable_dir_queue_write_perms(struct dlb_hw *hw,\n+\t\t\t\t\t\t     struct dlb_domain *domain)\n+{\n+\tint domain_offset = domain->id * DLB_MAX_NUM_DIR_PORTS;\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_sys_dir_vasqid_v r0;\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tr0.field.vasqid_v = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tint idx = domain_offset + port->id;\n+\n+\t\tDLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(idx), r0.val);\n+\t}\n+}\n+\n+static void dlb_domain_disable_dir_port_interrupts(struct dlb_hw *hw,\n+\t\t\t\t\t\t   struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_chp_dir_cq_int_enb r0 = { {0} };\n+\tunion dlb_chp_dir_cq_wd_enb r1 = { {0} };\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tr0.field.en_tim = 0;\n+\tr0.field.en_depth = 0;\n+\n+\tr1.field.wd_enable = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DIR_CQ_INT_ENB(port->id),\n+\t\t\t   r0.val);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DIR_CQ_WD_ENB(port->id),\n+\t\t\t   r1.val);\n+\t}\n+}\n+\n+static void dlb_domain_disable_dir_port_crd_updates(struct dlb_hw *hw,\n+\t\t\t\t\t\t    struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_chp_dir_pp_crd_req_state r0;\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tr0.field.no_pp_credit_update = 1;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),\n+\t\t\t   r0.val);\n+}\n+\n+static void dlb_domain_disable_dir_cqs(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tport->enabled = false;\n+\n+\t\tdlb_dir_port_cq_disable(hw, port);\n+\t}\n+}\n+\n+static void dlb_domain_disable_ldb_cqs(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tport->enabled = false;\n+\n+\t\tdlb_ldb_port_cq_disable(hw, port);\n+\t}\n+}\n+\n+static void dlb_domain_enable_ldb_cqs(struct dlb_hw *hw,\n+\t\t\t\t      struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tport->enabled = true;\n+\n+\t\tdlb_ldb_port_cq_enable(hw, port);\n+\t}\n+}\n+\n+static struct dlb_ldb_queue *dlb_get_ldb_queue_from_id(struct dlb_hw *hw,\n+\t\t\t\t\t\t       u32 id)\n+{\n+\tif (id >= DLB_MAX_NUM_LDB_QUEUES)\n+\t\treturn NULL;\n+\n+\treturn &hw->rsrcs.ldb_queues[id];\n+}\n+\n+static void dlb_ldb_port_clear_has_work_bits(struct dlb_hw *hw,\n+\t\t\t\t\t     struct dlb_ldb_port *port,\n+\t\t\t\t\t     u8 slot)\n+{\n+\tunion dlb_lsp_ldb_sched_ctrl r2 = { {0} };\n+\n+\tr2.field.cq = port->id;\n+\tr2.field.qidix = slot;\n+\tr2.field.value = 0;\n+\tr2.field.rlist_haswork_v = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);\n+\n+\tmemset(&r2, 0, sizeof(r2));\n+\n+\tr2.field.cq = port->id;\n+\tr2.field.qidix = slot;\n+\tr2.field.value = 0;\n+\tr2.field.nalb_haswork_v = 1;\n+\n+\tDLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);\n+\n+\tdlb_flush_csr(hw);\n+}\n+\n+static void dlb_domain_finish_map_port(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain,\n+\t\t\t\t       struct dlb_ldb_port *port)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tunion dlb_lsp_qid_ldb_infl_cnt r0;\n+\t\tstruct dlb_ldb_queue *queue;\n+\t\tint qid;\n+\n+\t\tif (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROGRESS)\n+\t\t\tcontinue;\n+\n+\t\tqid = port->qid_map[i].qid;\n+\n+\t\tqueue = dlb_get_ldb_queue_from_id(hw, qid);\n+\n+\t\tif (queue == NULL) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: unable to find queue %d\\n\",\n+\t\t\t\t   __func__, qid);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));\n+\n+\t\tif (r0.field.count)\n+\t\t\tcontinue;\n+\n+\t\t/* Disable the affected CQ, and the CQs already mapped to the\n+\t\t * QID, before reading the QID's inflight count a second time.\n+\t\t * There is an unlikely race in which the QID may schedule one\n+\t\t * more QE after we read an inflight count of 0, and disabling\n+\t\t * the CQs guarantees that the race will not occur after a\n+\t\t * re-read of the inflight count register.\n+\t\t */\n+\t\tif (port->enabled)\n+\t\t\tdlb_ldb_port_cq_disable(hw, port);\n+\n+\t\tdlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);\n+\n+\t\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));\n+\n+\t\tif (r0.field.count) {\n+\t\t\tif (port->enabled)\n+\t\t\t\tdlb_ldb_port_cq_enable(hw, port);\n+\n+\t\t\tdlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tdlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);\n+\t}\n+}\n+\n+static unsigned int\n+dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,\n+\t\t\t\t     struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tif (!domain->configured || domain->num_pending_additions == 0)\n+\t\treturn 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)\n+\t\tdlb_domain_finish_map_port(hw, domain, port);\n+\n+\treturn domain->num_pending_additions;\n+}\n+\n+unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw)\n+{\n+\tint i, num = 0;\n+\n+\t/* Finish queue map jobs for any domain that needs it */\n+\tfor (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {\n+\t\tstruct dlb_domain *domain = &hw->domains[i];\n+\n+\t\tnum += dlb_domain_finish_map_qid_procedures(hw, domain);\n+\t}\n+\n+\treturn num;\n+}\n+\n+\n+static int dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw *hw,\n+\t\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < DLB_MAX_CQ_COMP_CHECK_LOOPS; i++) {\n+\t\t\tif (dlb_ldb_cq_inflight_count(hw, port) == 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (i == DLB_MAX_CQ_COMP_CHECK_LOOPS) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to flush load-balanced port %d's completions.\\n\",\n+\t\t\t\t   __func__, port->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n+static void dlb_domain_finish_unmap_port_slot(struct dlb_hw *hw,\n+\t\t\t\t\t      struct dlb_domain *domain,\n+\t\t\t\t\t      struct dlb_ldb_port *port,\n+\t\t\t\t\t      int slot)\n+{\n+\tenum dlb_qid_map_state state;\n+\tstruct dlb_ldb_queue *queue;\n+\n+\tqueue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];\n+\n+\tstate = port->qid_map[slot].state;\n+\n+\t/* Update the QID2CQIDX and CQ2QID vectors */\n+\tdlb_ldb_port_unmap_qid(hw, port, queue);\n+\n+\t/* Ensure the QID will not be serviced by this {CQ, slot} by clearing\n+\t * the has_work bits\n+\t */\n+\tdlb_ldb_port_clear_has_work_bits(hw, port, slot);\n+\n+\t/* Reset the {CQ, slot} to its default state */\n+\tdlb_ldb_port_set_queue_if_status(hw, port, slot);\n+\n+\t/* Re-enable the CQ if it was not manually disabled by the user */\n+\tif (port->enabled)\n+\t\tdlb_ldb_port_cq_enable(hw, port);\n+\n+\t/* If there is a mapping that is pending this slot's removal, perform\n+\t * the mapping now.\n+\t */\n+\tif (state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) {\n+\t\tstruct dlb_ldb_port_qid_map *map;\n+\t\tstruct dlb_ldb_queue *map_queue;\n+\t\tu8 prio;\n+\n+\t\tmap = &port->qid_map[slot];\n+\n+\t\tmap->qid = map->pending_qid;\n+\t\tmap->priority = map->pending_priority;\n+\n+\t\tmap_queue = &hw->rsrcs.ldb_queues[map->qid];\n+\t\tprio = map->priority;\n+\n+\t\tdlb_ldb_port_map_qid(hw, domain, port, map_queue, prio);\n+\t}\n+}\n+\n+static bool dlb_domain_finish_unmap_port(struct dlb_hw *hw,\n+\t\t\t\t\t struct dlb_domain *domain,\n+\t\t\t\t\t struct dlb_ldb_port *port)\n+{\n+\tunion dlb_lsp_cq_ldb_infl_cnt r0;\n+\tint i;\n+\n+\tif (port->num_pending_removals == 0)\n+\t\treturn false;\n+\n+\t/* The unmap requires all the CQ's outstanding inflights to be\n+\t * completed.\n+\t */\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));\n+\tif (r0.field.count > 0)\n+\t\treturn false;\n+\n+\tfor (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tstruct dlb_ldb_port_qid_map *map;\n+\n+\t\tmap = &port->qid_map[i];\n+\n+\t\tif (map->state != DLB_QUEUE_UNMAP_IN_PROGRESS &&\n+\t\t    map->state != DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP)\n+\t\t\tcontinue;\n+\n+\t\tdlb_domain_finish_unmap_port_slot(hw, domain, port, i);\n+\t}\n+\n+\treturn true;\n+}\n+\n+static unsigned int\n+dlb_domain_finish_unmap_qid_procedures(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tif (!domain->configured || domain->num_pending_removals == 0)\n+\t\treturn 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)\n+\t\tdlb_domain_finish_unmap_port(hw, domain, port);\n+\n+\treturn domain->num_pending_removals;\n+}\n+\n+unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw)\n+{\n+\tint i, num = 0;\n+\n+\t/* Finish queue unmap jobs for any domain that needs it */\n+\tfor (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {\n+\t\tstruct dlb_domain *domain = &hw->domains[i];\n+\n+\t\tnum += dlb_domain_finish_unmap_qid_procedures(hw, domain);\n+\t}\n+\n+\treturn num;\n+}\n+\n+/* Returns whether the queue is empty, including its inflight and replay\n+ * counts.\n+ */\n+static bool dlb_ldb_queue_is_empty(struct dlb_hw *hw,\n+\t\t\t\t   struct dlb_ldb_queue *queue)\n+{\n+\tunion dlb_lsp_qid_ldb_replay_cnt r0;\n+\tunion dlb_lsp_qid_aqed_active_cnt r1;\n+\tunion dlb_lsp_qid_atq_enqueue_cnt r2;\n+\tunion dlb_lsp_qid_ldb_enqueue_cnt r3;\n+\tunion dlb_lsp_qid_ldb_infl_cnt r4;\n+\n+\tr0.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_REPLAY_CNT(queue->id));\n+\tif (r0.val)\n+\t\treturn false;\n+\n+\tr1.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));\n+\tif (r1.val)\n+\t\treturn false;\n+\n+\tr2.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));\n+\tif (r2.val)\n+\t\treturn false;\n+\n+\tr3.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));\n+\tif (r3.val)\n+\t\treturn false;\n+\n+\tr4.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_INFL_CNT(queue->id));\n+\tif (r4.val)\n+\t\treturn false;\n+\n+\treturn true;\n+}\n+\n+static bool dlb_domain_mapped_queues_empty(struct dlb_hw *hw,\n+\t\t\t\t\t   struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_queue *queue;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (queue->num_mappings == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (!dlb_ldb_queue_is_empty(hw, queue))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static int dlb_domain_drain_mapped_queues(struct dlb_hw *hw,\n+\t\t\t\t\t  struct dlb_domain *domain)\n+{\n+\tint i, ret;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tif (domain->num_pending_removals > 0) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: failed to unmap domain queues\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tfor (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\tret = dlb_domain_drain_ldb_cqs(hw, domain, true);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\n+\t\tif (dlb_domain_mapped_queues_empty(hw, domain))\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: failed to empty queues\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Drain the CQs one more time. For the queues to go empty, they would\n+\t * have scheduled one or more QEs.\n+\t */\n+\tret = dlb_domain_drain_ldb_cqs(hw, domain, true);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_drain_unmapped_queue(struct dlb_hw *hw,\n+\t\t\t\t\t   struct dlb_domain *domain,\n+\t\t\t\t\t   struct dlb_ldb_queue *queue)\n+{\n+\tstruct dlb_ldb_port *port;\n+\tint ret;\n+\n+\t/* If a domain has LDB queues, it must have LDB ports */\n+\tif (dlb_list_empty(&domain->used_ldb_ports)) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: No configured LDB ports\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tport = DLB_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port));\n+\n+\t/* If necessary, free up a QID slot in this CQ */\n+\tif (port->num_mappings == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tstruct dlb_ldb_queue *mapped_queue;\n+\n+\t\tmapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];\n+\n+\t\tret = dlb_ldb_port_unmap_qid(hw, port, mapped_queue);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tret = dlb_ldb_port_map_qid_dynamic(hw, port, queue, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn dlb_domain_drain_mapped_queues(hw, domain);\n+}\n+\n+static int dlb_domain_drain_unmapped_queues(struct dlb_hw *hw,\n+\t\t\t\t\t    struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_queue *queue;\n+\tint ret;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (queue->num_mappings != 0 ||\n+\t\t    dlb_ldb_queue_is_empty(hw, queue))\n+\t\t\tcontinue;\n+\n+\t\tret = dlb_domain_drain_unmapped_queue(hw, domain, queue);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw *hw,\n+\t\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_credit_pool *pool;\n+\n+\t/* Confirm that all credits are returned to the domain's credit pools */\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {\n+\t\tunion dlb_chp_qed_fl_push_ptr r0;\n+\t\tunion dlb_chp_qed_fl_pop_ptr r1;\n+\t\tunsigned long pop_offs, push_offs;\n+\t\tint i;\n+\n+\t\tpush_offs = DLB_CHP_QED_FL_PUSH_PTR(pool->id);\n+\t\tpop_offs = DLB_CHP_QED_FL_POP_PTR(pool->id);\n+\n+\t\tfor (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\t\tr0.val = DLB_CSR_RD(hw, push_offs);\n+\n+\t\t\tr1.val = DLB_CSR_RD(hw, pop_offs);\n+\n+\t\t\t/* Break early if the freelist is replenished */\n+\t\t\tif (r1.field.pop_ptr == r0.field.push_ptr &&\n+\t\t\t    r1.field.generation != r0.field.generation) {\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Error if the freelist is not full */\n+\t\tif (r1.field.pop_ptr != r0.field.push_ptr ||\n+\t\t    r1.field.generation == r0.field.generation) {\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_wait_for_dir_pool_refill(struct dlb_hw *hw,\n+\t\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_credit_pool *pool;\n+\n+\t/* Confirm that all credits are returned to the domain's credit pools */\n+\tDLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {\n+\t\tunion dlb_chp_dqed_fl_push_ptr r0;\n+\t\tunion dlb_chp_dqed_fl_pop_ptr r1;\n+\t\tunsigned long pop_offs, push_offs;\n+\t\tint i;\n+\n+\t\tpush_offs = DLB_CHP_DQED_FL_PUSH_PTR(pool->id);\n+\t\tpop_offs = DLB_CHP_DQED_FL_POP_PTR(pool->id);\n+\n+\t\tfor (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\t\tr0.val = DLB_CSR_RD(hw, push_offs);\n+\n+\t\t\tr1.val = DLB_CSR_RD(hw, pop_offs);\n+\n+\t\t\t/* Break early if the freelist is replenished */\n+\t\t\tif (r1.field.pop_ptr == r0.field.push_ptr &&\n+\t\t\t    r1.field.generation != r0.field.generation) {\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Error if the freelist is not full */\n+\t\tif (r1.field.pop_ptr != r0.field.push_ptr ||\n+\t\t    r1.field.generation == r0.field.generation) {\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static u32 dlb_dir_queue_depth(struct dlb_hw *hw,\n+\t\t\t       struct dlb_dir_pq_pair *queue)\n+{\n+\tunion dlb_lsp_qid_dir_enqueue_cnt r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_QID_DIR_ENQUEUE_CNT(queue->id));\n+\n+\treturn r0.field.count;\n+}\n+\n+static bool dlb_dir_queue_is_empty(struct dlb_hw *hw,\n+\t\t\t\t   struct dlb_dir_pq_pair *queue)\n+{\n+\treturn dlb_dir_queue_depth(hw, queue) == 0;\n+}\n+\n+static bool dlb_domain_dir_queues_empty(struct dlb_hw *hw,\n+\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *queue;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n+\t\tif (!dlb_dir_queue_is_empty(hw, queue))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static u32 dlb_dir_cq_token_count(struct dlb_hw *hw,\n+\t\t\t\t  struct dlb_dir_pq_pair *port)\n+{\n+\tunion dlb_lsp_cq_dir_tkn_cnt r0;\n+\n+\tr0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_DIR_TKN_CNT(port->id));\n+\n+\treturn r0.field.count;\n+}\n+\n+static void dlb_drain_dir_cq(struct dlb_hw *hw, struct dlb_dir_pq_pair *port)\n+{\n+\tunsigned int port_id = port->id;\n+\tu32 cnt;\n+\n+\t/* Return any outstanding tokens */\n+\tcnt = dlb_dir_cq_token_count(hw, port);\n+\n+\tif (cnt != 0) {\n+\t\tstruct dlb_hcw hcw_mem[8], *hcw;\n+\t\tvoid  *pp_addr;\n+\n+\t\tpp_addr = os_map_producer_port(hw, port_id, false);\n+\n+\t\t/* Point hcw to a 64B-aligned location */\n+\t\thcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n+\n+\t\t/* Program the first HCW for a batch token return and\n+\t\t * the rest as NOOPS\n+\t\t */\n+\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n+\t\thcw->cq_token = 1;\n+\t\thcw->lock_id = cnt - 1;\n+\n+\t\tdlb_movdir64b(pp_addr, hcw);\n+\n+\t\tos_fence_hcw(hw, pp_addr);\n+\n+\t\tos_unmap_producer_port(hw, pp_addr);\n+\t}\n+}\n+\n+static int dlb_domain_drain_dir_cqs(struct dlb_hw *hw,\n+\t\t\t\t    struct dlb_domain *domain,\n+\t\t\t\t    bool toggle_port)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\t/* Can't drain a port if it's not configured, and there's\n+\t\t * nothing to drain if its queue is unconfigured.\n+\t\t */\n+\t\tif (!port->port_configured || !port->queue_configured)\n+\t\t\tcontinue;\n+\n+\t\tif (toggle_port)\n+\t\t\tdlb_dir_port_cq_disable(hw, port);\n+\n+\t\tdlb_drain_dir_cq(hw, port);\n+\n+\t\tif (toggle_port)\n+\t\t\tdlb_dir_port_cq_enable(hw, port);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_drain_dir_queues(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tint i;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\tdlb_domain_drain_dir_cqs(hw, domain, true);\n+\n+\t\tif (dlb_domain_dir_queues_empty(hw, domain))\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: failed to empty queues\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Drain the CQs one more time. For the queues to go empty, they would\n+\t * have scheduled one or more QEs.\n+\t */\n+\tdlb_domain_drain_dir_cqs(hw, domain, true);\n+\n+\treturn 0;\n+}\n+\n+static void dlb_domain_disable_dir_producer_ports(struct dlb_hw *hw,\n+\t\t\t\t\t\t  struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *port;\n+\tunion dlb_sys_dir_pp_v r1;\n+\n+\tr1.field.pp_v = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_DIR_PP_V(port->id),\n+\t\t\t   r1.val);\n+}\n+\n+static void dlb_domain_disable_ldb_producer_ports(struct dlb_hw *hw,\n+\t\t\t\t\t\t  struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_sys_ldb_pp_v r1;\n+\tstruct dlb_ldb_port *port;\n+\n+\tr1.field.pp_v = 0;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_LDB_PP_V(port->id),\n+\t\t\t   r1.val);\n+\n+\t\thw->pf.num_enabled_ldb_ports--;\n+\t}\n+}\n+\n+static void dlb_domain_disable_dir_pools(struct dlb_hw *hw,\n+\t\t\t\t\t struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_sys_dir_pool_enbld r0 = { {0} };\n+\tstruct dlb_credit_pool *pool;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_DIR_POOL_ENBLD(pool->id),\n+\t\t\t   r0.val);\n+}\n+\n+static void dlb_domain_disable_ldb_pools(struct dlb_hw *hw,\n+\t\t\t\t\t struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tunion dlb_sys_ldb_pool_enbld r0 = { {0} };\n+\tstruct dlb_credit_pool *pool;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_LDB_POOL_ENBLD(pool->id),\n+\t\t\t   r0.val);\n+}\n+\n+static int dlb_reset_hw_resource(struct dlb_hw *hw, int type, int id)\n+{\n+\tunion dlb_cfg_mstr_diag_reset_sts r0 = { {0} };\n+\tunion dlb_cfg_mstr_bcast_reset_vf_start r1 = { {0} };\n+\tint i;\n+\n+\tr1.field.vf_reset_start = 1;\n+\n+\tr1.field.vf_reset_type = type;\n+\tr1.field.vf_reset_id = id;\n+\n+\tDLB_CSR_WR(hw, DLB_CFG_MSTR_BCAST_RESET_VF_START, r1.val);\n+\n+\t/* Wait for hardware to complete. This is a finite time operation,\n+\t * but wait set a loop bound just in case.\n+\t */\n+\tfor (i = 0; i < 1024 * 1024; i++) {\n+\t\tr0.val = DLB_CSR_RD(hw, DLB_CFG_MSTR_DIAG_RESET_STS);\n+\n+\t\tif (r0.field.chp_vf_reset_done &&\n+\t\t    r0.field.rop_vf_reset_done &&\n+\t\t    r0.field.lsp_vf_reset_done &&\n+\t\t    r0.field.nalb_vf_reset_done &&\n+\t\t    r0.field.ap_vf_reset_done &&\n+\t\t    r0.field.dp_vf_reset_done &&\n+\t\t    r0.field.qed_vf_reset_done &&\n+\t\t    r0.field.dqed_vf_reset_done &&\n+\t\t    r0.field.aqed_vf_reset_done)\n+\t\t\treturn 0;\n+\n+\t\tos_udelay(1);\n+\t}\n+\n+\treturn -ETIMEDOUT;\n+}\n+\n+static int dlb_domain_reset_hw_resources(struct dlb_hw *hw,\n+\t\t\t\t\t struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *dir_port;\n+\tstruct dlb_ldb_queue *ldb_queue;\n+\tstruct dlb_ldb_port *ldb_port;\n+\tstruct dlb_credit_pool *pool;\n+\tint ret;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_POOL_LDB,\n+\t\t\t\t\t    pool->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_POOL_DIR,\n+\t\t\t\t\t    pool->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_QID_LDB,\n+\t\t\t\t\t    ldb_queue->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_QID_DIR,\n+\t\t\t\t\t    dir_port->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_CQ_LDB,\n+\t\t\t\t\t    ldb_port->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {\n+\t\tret = dlb_reset_hw_resource(hw,\n+\t\t\t\t\t    VF_RST_TYPE_CQ_DIR,\n+\t\t\t\t\t    dir_port->id);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dlb_domain_verify_reset_success(struct dlb_hw *hw,\n+\t\t\t\t\t   struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *dir_port;\n+\tstruct dlb_ldb_port *ldb_port;\n+\tstruct dlb_credit_pool *pool;\n+\tstruct dlb_ldb_queue *queue;\n+\n+\t/* Confirm that all credits are returned to the domain's credit pools */\n+\tDLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {\n+\t\tunion dlb_chp_dqed_fl_pop_ptr r0;\n+\t\tunion dlb_chp_dqed_fl_push_ptr r1;\n+\n+\t\tr0.val = DLB_CSR_RD(hw,\n+\t\t\t\t    DLB_CHP_DQED_FL_POP_PTR(pool->id));\n+\n+\t\tr1.val = DLB_CSR_RD(hw,\n+\t\t\t\t    DLB_CHP_DQED_FL_PUSH_PTR(pool->id));\n+\n+\t\tif (r0.field.pop_ptr != r1.field.push_ptr ||\n+\t\t    r0.field.generation == r1.field.generation) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to refill directed pool %d's credits.\\n\",\n+\t\t\t\t   __func__, pool->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\t/* Confirm that all the domain's queue's inflight counts and AQED\n+\t * active counts are 0.\n+\t */\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (!dlb_ldb_queue_is_empty(hw, queue)) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to empty ldb queue %d\\n\",\n+\t\t\t\t   __func__, queue->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\t/* Confirm that all the domain's CQs inflight and token counts are 0. */\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {\n+\t\tif (dlb_ldb_cq_inflight_count(hw, ldb_port) ||\n+\t\t    dlb_ldb_cq_token_count(hw, ldb_port)) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to empty ldb port %d\\n\",\n+\t\t\t\t   __func__, ldb_port->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {\n+\t\tif (!dlb_dir_queue_is_empty(hw, dir_port)) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to empty dir queue %d\\n\",\n+\t\t\t\t   __func__, dir_port->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\n+\t\tif (dlb_dir_cq_token_count(hw, dir_port)) {\n+\t\t\tDLB_HW_ERR(hw,\n+\t\t\t\t   \"[%s()] Internal error: failed to empty dir port %d\\n\",\n+\t\t\t\t   __func__, dir_port->id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void __dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\t  struct dlb_ldb_port *port)\n+{\n+\tunion dlb_chp_ldb_pp_state_reset r0 = { {0} };\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),\n+\t\t   DLB_CHP_LDB_PP_CRD_REQ_STATE_RST);\n+\n+\t/* Reset the port's load-balanced and directed credit state */\n+\tr0.field.dir_type = 0;\n+\tr0.field.reset_pp_state = 1;\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_STATE_RESET(port->id),\n+\t\t   r0.val);\n+\n+\tr0.field.dir_type = 1;\n+\tr0.field.reset_pp_state = 1;\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_STATE_RESET(port->id),\n+\t\t   r0.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id),\n+\t\t   DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id),\n+\t\t   DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),\n+\t\t   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id),\n+\t\t   DLB_CHP_LDB_PP_LDB_CRD_LWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id),\n+\t\t   DLB_CHP_LDB_PP_LDB_CRD_HWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_LDB_PP2POOL(port->id),\n+\t\t   DLB_CHP_LDB_LDB_PP2POOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),\n+\t\t   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id),\n+\t\t   DLB_CHP_LDB_PP_DIR_CRD_LWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id),\n+\t\t   DLB_CHP_LDB_PP_DIR_CRD_HWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_DIR_PP2POOL(port->id),\n+\t\t   DLB_CHP_LDB_DIR_PP2POOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP2LDBPOOL(port->id),\n+\t\t   DLB_SYS_LDB_PP2LDBPOOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP2DIRPOOL(port->id),\n+\t\t   DLB_SYS_LDB_PP2DIRPOOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_HIST_LIST_LIM(port->id),\n+\t\t   DLB_CHP_HIST_LIST_LIM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_HIST_LIST_BASE(port->id),\n+\t\t   DLB_CHP_HIST_LIST_BASE_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_HIST_LIST_POP_PTR(port->id),\n+\t\t   DLB_CHP_HIST_LIST_POP_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_HIST_LIST_PUSH_PTR(port->id),\n+\t\t   DLB_CHP_HIST_LIST_PUSH_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_CQ_WPTR(port->id),\n+\t\t   DLB_CHP_LDB_CQ_WPTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id),\n+\t\t   DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_CQ_TMR_THRESHOLD(port->id),\n+\t\t   DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_CQ_INT_ENB(port->id),\n+\t\t   DLB_CHP_LDB_CQ_INT_ENB_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_LDB_INFL_LIM(port->id),\n+\t\t   DLB_LSP_CQ_LDB_INFL_LIM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ2PRIOV(port->id),\n+\t\t   DLB_LSP_CQ2PRIOV_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id),\n+\t\t   DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),\n+\t\t   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),\n+\t\t   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_LDB_DSBL(port->id),\n+\t\t   DLB_LSP_CQ_LDB_DSBL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_CQ2VF_PF(port->id),\n+\t\t   DLB_SYS_LDB_CQ2VF_PF_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP2VF_PF(port->id),\n+\t\t   DLB_SYS_LDB_PP2VF_PF_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_CQ_ADDR_L(port->id),\n+\t\t   DLB_SYS_LDB_CQ_ADDR_L_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_CQ_ADDR_U(port->id),\n+\t\t   DLB_SYS_LDB_CQ_ADDR_U_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP_ADDR_L(port->id),\n+\t\t   DLB_SYS_LDB_PP_ADDR_L_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP_ADDR_U(port->id),\n+\t\t   DLB_SYS_LDB_PP_ADDR_U_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP_V(port->id),\n+\t\t   DLB_SYS_LDB_PP_V_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_PP2VAS(port->id),\n+\t\t   DLB_SYS_LDB_PP2VAS_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_LDB_CQ_ISR(port->id),\n+\t\t   DLB_SYS_LDB_CQ_ISR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_WBUF_LDB_FLAGS(port->id),\n+\t\t   DLB_SYS_WBUF_LDB_FLAGS_RST);\n+}\n+\n+static void __dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\t  struct dlb_dir_pq_pair *port)\n+{\n+\tunion dlb_chp_dir_pp_state_reset r0 = { {0} };\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),\n+\t\t   DLB_CHP_DIR_PP_CRD_REQ_STATE_RST);\n+\n+\t/* Reset the port's load-balanced and directed credit state */\n+\tr0.field.dir_type = 0;\n+\tr0.field.reset_pp_state = 1;\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_STATE_RESET(port->id),\n+\t\t   r0.val);\n+\n+\tr0.field.dir_type = 1;\n+\tr0.field.reset_pp_state = 1;\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_STATE_RESET(port->id),\n+\t\t   r0.val);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),\n+\t\t   DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),\n+\t\t   DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),\n+\t\t   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),\n+\t\t   DLB_CHP_DIR_PP_LDB_CRD_LWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),\n+\t\t   DLB_CHP_DIR_PP_LDB_CRD_HWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_LDB_PP2POOL(port->id),\n+\t\t   DLB_CHP_DIR_LDB_PP2POOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),\n+\t\t   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),\n+\t\t   DLB_CHP_DIR_PP_DIR_CRD_LWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),\n+\t\t   DLB_CHP_DIR_PP_DIR_CRD_HWM_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_DIR_PP2POOL(port->id),\n+\t\t   DLB_CHP_DIR_DIR_PP2POOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP2LDBPOOL(port->id),\n+\t\t   DLB_SYS_DIR_PP2LDBPOOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP2DIRPOOL(port->id),\n+\t\t   DLB_SYS_DIR_PP2DIRPOOL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_WPTR(port->id),\n+\t\t   DLB_CHP_DIR_CQ_WPTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),\n+\t\t   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),\n+\t\t   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_LSP_CQ_DIR_DSBL(port->id),\n+\t\t   DLB_LSP_CQ_DIR_DSBL_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_WPTR(port->id),\n+\t\t   DLB_CHP_DIR_CQ_WPTR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id),\n+\t\t   DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_TMR_THRESHOLD(port->id),\n+\t\t   DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_CHP_DIR_CQ_INT_ENB(port->id),\n+\t\t   DLB_CHP_DIR_CQ_INT_ENB_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_CQ2VF_PF(port->id),\n+\t\t   DLB_SYS_DIR_CQ2VF_PF_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP2VF_PF(port->id),\n+\t\t   DLB_SYS_DIR_PP2VF_PF_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_CQ_ADDR_L(port->id),\n+\t\t   DLB_SYS_DIR_CQ_ADDR_L_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_CQ_ADDR_U(port->id),\n+\t\t   DLB_SYS_DIR_CQ_ADDR_U_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP_ADDR_L(port->id),\n+\t\t   DLB_SYS_DIR_PP_ADDR_L_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP_ADDR_U(port->id),\n+\t\t   DLB_SYS_DIR_PP_ADDR_U_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP_V(port->id),\n+\t\t   DLB_SYS_DIR_PP_V_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_PP2VAS(port->id),\n+\t\t   DLB_SYS_DIR_PP2VAS_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_DIR_CQ_ISR(port->id),\n+\t\t   DLB_SYS_DIR_CQ_ISR_RST);\n+\n+\tDLB_CSR_WR(hw,\n+\t\t   DLB_SYS_WBUF_DIR_FLAGS(port->id),\n+\t\t   DLB_SYS_WBUF_DIR_FLAGS_RST);\n+}\n+\n+static void dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n+\t\t__dlb_domain_reset_dir_port_registers(hw, port);\n+}\n+\n+static void dlb_domain_reset_ldb_queue_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\t struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_queue *queue;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_AQED_PIPE_FL_LIM(queue->id),\n+\t\t\t   DLB_AQED_PIPE_FL_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_AQED_PIPE_FL_BASE(queue->id),\n+\t\t\t   DLB_AQED_PIPE_FL_BASE_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_AQED_PIPE_FL_POP_PTR(queue->id),\n+\t\t\t   DLB_AQED_PIPE_FL_POP_PTR_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_AQED_PIPE_FL_PUSH_PTR(queue->id),\n+\t\t\t   DLB_AQED_PIPE_FL_PUSH_PTR_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_AQED_PIPE_QID_FID_LIM(queue->id),\n+\t\t\t   DLB_AQED_PIPE_QID_FID_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id),\n+\t\t\t   DLB_LSP_QID_AQED_ACTIVE_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_LSP_QID_LDB_INFL_LIM(queue->id),\n+\t\t\t   DLB_LSP_QID_LDB_INFL_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_LDB_QID_V(queue->id),\n+\t\t\t   DLB_SYS_LDB_QID_V_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_LDB_QID_V(queue->id),\n+\t\t\t   DLB_SYS_LDB_QID_V_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_ORD_QID_SN(queue->id),\n+\t\t\t   DLB_CHP_ORD_QID_SN_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_ORD_QID_SN_MAP(queue->id),\n+\t\t\t   DLB_CHP_ORD_QID_SN_MAP_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_RO_PIPE_QID2GRPSLT(queue->id),\n+\t\t\t   DLB_RO_PIPE_QID2GRPSLT_RST);\n+\t}\n+}\n+\n+static void dlb_domain_reset_dir_queue_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\t struct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_dir_pq_pair *queue;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_SYS_DIR_QID_V(queue->id),\n+\t\t\t   DLB_SYS_DIR_QID_V_RST);\n+\t}\n+}\n+\n+static void dlb_domain_reset_ldb_pool_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_credit_pool *pool;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_LDB_POOL_CRD_LIM(pool->id),\n+\t\t\t   DLB_CHP_LDB_POOL_CRD_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_LDB_POOL_CRD_CNT(pool->id),\n+\t\t\t   DLB_CHP_LDB_POOL_CRD_CNT_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_QED_FL_BASE(pool->id),\n+\t\t\t   DLB_CHP_QED_FL_BASE_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_QED_FL_LIM(pool->id),\n+\t\t\t   DLB_CHP_QED_FL_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_QED_FL_PUSH_PTR(pool->id),\n+\t\t\t   DLB_CHP_QED_FL_PUSH_PTR_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_QED_FL_POP_PTR(pool->id),\n+\t\t\t   DLB_CHP_QED_FL_POP_PTR_RST);\n+\t}\n+}\n+\n+static void dlb_domain_reset_dir_pool_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_credit_pool *pool;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DIR_POOL_CRD_LIM(pool->id),\n+\t\t\t   DLB_CHP_DIR_POOL_CRD_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DIR_POOL_CRD_CNT(pool->id),\n+\t\t\t   DLB_CHP_DIR_POOL_CRD_CNT_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DQED_FL_BASE(pool->id),\n+\t\t\t   DLB_CHP_DQED_FL_BASE_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DQED_FL_LIM(pool->id),\n+\t\t\t   DLB_CHP_DQED_FL_LIM_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DQED_FL_PUSH_PTR(pool->id),\n+\t\t\t   DLB_CHP_DQED_FL_PUSH_PTR_RST);\n+\n+\t\tDLB_CSR_WR(hw,\n+\t\t\t   DLB_CHP_DQED_FL_POP_PTR(pool->id),\n+\t\t\t   DLB_CHP_DQED_FL_POP_PTR_RST);\n+\t}\n+}\n+\n+static void dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,\n+\t\t\t\t\t\tstruct dlb_domain *domain)\n+{\n+\tstruct dlb_list_entry *iter;\n+\tRTE_SET_USED(iter);\n+\tstruct dlb_ldb_port *port;\n+\n+\tDLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)\n+\t\t__dlb_domain_reset_ldb_port_registers(hw, port);\n+}\n+\n+static void dlb_domain_reset_registers(struct dlb_hw *hw,\n+\t\t\t\t       struct dlb_domain *domain)\n+{\n+\tdlb_domain_reset_ldb_port_registers(hw, domain);\n+\n+\tdlb_domain_reset_dir_port_registers(hw, domain);\n+\n+\tdlb_domain_reset_ldb_queue_registers(hw, domain);\n+\n+\tdlb_domain_reset_dir_queue_registers(hw, domain);\n+\n+\tdlb_domain_reset_ldb_pool_registers(hw, domain);\n+\n+\tdlb_domain_reset_dir_pool_registers(hw, domain);\n+}\n+\n+static int dlb_domain_reset_software_state(struct dlb_hw *hw,\n+\t\t\t\t\t   struct dlb_domain *domain)\n+{\n+\tstruct dlb_ldb_queue *tmp_ldb_queue;\n+\tRTE_SET_USED(tmp_ldb_queue);\n+\tstruct dlb_dir_pq_pair *tmp_dir_port;\n+\tRTE_SET_USED(tmp_dir_port);\n+\tstruct dlb_ldb_port *tmp_ldb_port;\n+\tRTE_SET_USED(tmp_ldb_port);\n+\tstruct dlb_credit_pool *tmp_pool;\n+\tRTE_SET_USED(tmp_pool);\n+\tstruct dlb_list_entry *iter1;\n+\tRTE_SET_USED(iter1);\n+\tstruct dlb_list_entry *iter2;\n+\tRTE_SET_USED(iter2);\n+\tstruct dlb_ldb_queue *ldb_queue;\n+\tstruct dlb_dir_pq_pair *dir_port;\n+\tstruct dlb_ldb_port *ldb_port;\n+\tstruct dlb_credit_pool *pool;\n+\n+\tstruct dlb_function_resources *rsrcs;\n+\tstruct dlb_list_head *list;\n+\tint ret;\n+\n+\trsrcs = domain->parent_func;\n+\n+\t/* Move the domain's ldb queues to the function's avail list */\n+\tlist = &domain->used_ldb_queues;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n+\t\tif (ldb_queue->sn_cfg_valid) {\n+\t\t\tstruct dlb_sn_group *grp;\n+\n+\t\t\tgrp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];\n+\n+\t\t\tdlb_sn_group_free_slot(grp, ldb_queue->sn_slot);\n+\t\t\tldb_queue->sn_cfg_valid = false;\n+\t\t}\n+\n+\t\tldb_queue->owned = false;\n+\t\tldb_queue->num_mappings = 0;\n+\t\tldb_queue->num_pending_additions = 0;\n+\n+\t\tdlb_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list);\n+\t\trsrcs->num_avail_ldb_queues++;\n+\t}\n+\n+\tlist = &domain->avail_ldb_queues;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n+\t\tldb_queue->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_queues,\n+\t\t\t     &ldb_queue->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_queues,\n+\t\t\t     &ldb_queue->func_list);\n+\t\trsrcs->num_avail_ldb_queues++;\n+\t}\n+\n+\t/* Move the domain's ldb ports to the function's avail list */\n+\tlist = &domain->used_ldb_ports;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {\n+\t\tint i;\n+\n+\t\tldb_port->owned = false;\n+\t\tldb_port->configured = false;\n+\t\tldb_port->num_pending_removals = 0;\n+\t\tldb_port->num_mappings = 0;\n+\t\tfor (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)\n+\t\t\tldb_port->qid_map[i].state = DLB_QUEUE_UNMAPPED;\n+\n+\t\tdlb_list_del(&domain->used_ldb_ports, &ldb_port->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);\n+\t\trsrcs->num_avail_ldb_ports++;\n+\t}\n+\n+\tlist = &domain->avail_ldb_ports;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {\n+\t\tldb_port->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);\n+\t\trsrcs->num_avail_ldb_ports++;\n+\t}\n+\n+\t/* Move the domain's dir ports to the function's avail list */\n+\tlist = &domain->used_dir_pq_pairs;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n+\t\tdir_port->owned = false;\n+\t\tdir_port->port_configured = false;\n+\n+\t\tdlb_list_del(&domain->used_dir_pq_pairs,\n+\t\t\t     &dir_port->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_dir_pq_pairs,\n+\t\t\t     &dir_port->func_list);\n+\t\trsrcs->num_avail_dir_pq_pairs++;\n+\t}\n+\n+\tlist = &domain->avail_dir_pq_pairs;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n+\t\tdir_port->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_dir_pq_pairs,\n+\t\t\t     &dir_port->domain_list);\n+\n+\t\tdlb_list_add(&rsrcs->avail_dir_pq_pairs,\n+\t\t\t     &dir_port->func_list);\n+\t\trsrcs->num_avail_dir_pq_pairs++;\n+\t}\n+\n+\t/* Return hist list entries to the function */\n+\tret = dlb_bitmap_set_range(rsrcs->avail_hist_list_entries,\n+\t\t\t\t   domain->hist_list_entry_base,\n+\t\t\t\t   domain->total_hist_list_entries);\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: domain hist list base does not match the function's bitmap.\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdomain->total_hist_list_entries = 0;\n+\tdomain->avail_hist_list_entries = 0;\n+\tdomain->hist_list_entry_base = 0;\n+\tdomain->hist_list_entry_offset = 0;\n+\n+\t/* Return QED entries to the function */\n+\tret = dlb_bitmap_set_range(rsrcs->avail_qed_freelist_entries,\n+\t\t\t\t   domain->qed_freelist.base,\n+\t\t\t\t   (domain->qed_freelist.bound -\n+\t\t\t\t\tdomain->qed_freelist.base));\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: domain QED base does not match the function's bitmap.\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdomain->qed_freelist.base = 0;\n+\tdomain->qed_freelist.bound = 0;\n+\tdomain->qed_freelist.offset = 0;\n+\n+\t/* Return DQED entries back to the function */\n+\tret = dlb_bitmap_set_range(rsrcs->avail_dqed_freelist_entries,\n+\t\t\t\t   domain->dqed_freelist.base,\n+\t\t\t\t   (domain->dqed_freelist.bound -\n+\t\t\t\t\tdomain->dqed_freelist.base));\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: domain DQED base does not match the function's bitmap.\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdomain->dqed_freelist.base = 0;\n+\tdomain->dqed_freelist.bound = 0;\n+\tdomain->dqed_freelist.offset = 0;\n+\n+\t/* Return AQED entries back to the function */\n+\tret = dlb_bitmap_set_range(rsrcs->avail_aqed_freelist_entries,\n+\t\t\t\t   domain->aqed_freelist.base,\n+\t\t\t\t   (domain->aqed_freelist.bound -\n+\t\t\t\t\tdomain->aqed_freelist.base));\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: domain AQED base does not match the function's bitmap.\\n\",\n+\t\t\t   __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tdomain->aqed_freelist.base = 0;\n+\tdomain->aqed_freelist.bound = 0;\n+\tdomain->aqed_freelist.offset = 0;\n+\n+\t/* Return ldb credit pools back to the function's avail list */\n+\tlist = &domain->used_ldb_credit_pools;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {\n+\t\tpool->owned = false;\n+\t\tpool->configured = false;\n+\n+\t\tdlb_list_del(&domain->used_ldb_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t\trsrcs->num_avail_ldb_credit_pools++;\n+\t}\n+\n+\tlist = &domain->avail_ldb_credit_pools;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {\n+\t\tpool->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_ldb_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_ldb_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t\trsrcs->num_avail_ldb_credit_pools++;\n+\t}\n+\n+\t/* Move dir credit pools back to the function */\n+\tlist = &domain->used_dir_credit_pools;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {\n+\t\tpool->owned = false;\n+\t\tpool->configured = false;\n+\n+\t\tdlb_list_del(&domain->used_dir_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_dir_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t\trsrcs->num_avail_dir_credit_pools++;\n+\t}\n+\n+\tlist = &domain->avail_dir_credit_pools;\n+\tDLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {\n+\t\tpool->owned = false;\n+\n+\t\tdlb_list_del(&domain->avail_dir_credit_pools,\n+\t\t\t     &pool->domain_list);\n+\t\tdlb_list_add(&rsrcs->avail_dir_credit_pools,\n+\t\t\t     &pool->func_list);\n+\t\trsrcs->num_avail_dir_credit_pools++;\n+\t}\n+\n+\tdomain->num_pending_removals = 0;\n+\tdomain->num_pending_additions = 0;\n+\tdomain->configured = false;\n+\tdomain->started = false;\n+\n+\t/* Move the domain out of the used_domains list and back to the\n+\t * function's avail_domains list.\n+\t */\n+\tdlb_list_del(&rsrcs->used_domains, &domain->func_list);\n+\tdlb_list_add(&rsrcs->avail_domains, &domain->func_list);\n+\trsrcs->num_avail_domains++;\n+\n+\treturn 0;\n+}\n+\n+static void dlb_log_reset_domain(struct dlb_hw *hw, u32 domain_id)\n+{\n+\tDLB_HW_INFO(hw, \"DLB reset domain:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+}\n+\n+/**\n+ * dlb_reset_domain() - Reset a DLB scheduling domain and its associated\n+ *\thardware resources.\n+ * @hw:\t  Contains the current state of the DLB hardware.\n+ * @args: User-provided arguments.\n+ * @resp: Response to user.\n+ *\n+ * Note: User software *must* stop sending to this domain's producer ports\n+ * before invoking this function, otherwise undefined behavior will result.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise.\n+ */\n+int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id)\n+{\n+\tstruct dlb_domain *domain;\n+\tint ret;\n+\n+\tdlb_log_reset_domain(hw, domain_id);\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\n+\tif (domain  == NULL || !domain->configured)\n+\t\treturn -EINVAL;\n+\n+\t/* For each queue owned by this domain, disable its write permissions to\n+\t * cause any traffic sent to it to be dropped. Well-behaved software\n+\t * should not be sending QEs at this point.\n+\t */\n+\tdlb_domain_disable_dir_queue_write_perms(hw, domain);\n+\n+\tdlb_domain_disable_ldb_queue_write_perms(hw, domain);\n+\n+\t/* Disable credit updates and turn off completion tracking on all the\n+\t * domain's PPs.\n+\t */\n+\tdlb_domain_disable_dir_port_crd_updates(hw, domain);\n+\n+\tdlb_domain_disable_ldb_port_crd_updates(hw, domain);\n+\n+\tdlb_domain_disable_dir_port_interrupts(hw, domain);\n+\n+\tdlb_domain_disable_ldb_port_interrupts(hw, domain);\n+\n+\tdlb_domain_disable_ldb_seq_checks(hw, domain);\n+\n+\t/* Disable the LDB CQs and drain them in order to complete the map and\n+\t * unmap procedures, which require zero CQ inflights and zero QID\n+\t * inflights respectively.\n+\t */\n+\tdlb_domain_disable_ldb_cqs(hw, domain);\n+\n+\tret = dlb_domain_drain_ldb_cqs(hw, domain, false);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_wait_for_ldb_cqs_to_empty(hw, domain);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_finish_unmap_qid_procedures(hw, domain);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_finish_map_qid_procedures(hw, domain);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\t/* Re-enable the CQs in order to drain the mapped queues. */\n+\tdlb_domain_enable_ldb_cqs(hw, domain);\n+\n+\tret = dlb_domain_drain_mapped_queues(hw, domain);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_drain_unmapped_queues(hw, domain);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_wait_for_ldb_pool_refill(hw, domain);\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: LDB credits failed to refill\\n\",\n+\t\t\t   __func__);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Done draining LDB QEs, so disable the CQs. */\n+\tdlb_domain_disable_ldb_cqs(hw, domain);\n+\n+\t/* Directed queues are reset in dlb_domain_reset_hw_resources(), but\n+\t * that process does not decrement the directed queue size counters used\n+\t * by SMON for its average DQED depth measurement. So, we manually drain\n+\t * the directed queues here.\n+\t */\n+\tdlb_domain_drain_dir_queues(hw, domain);\n+\n+\tret = dlb_domain_wait_for_dir_pool_refill(hw, domain);\n+\tif (ret) {\n+\t\tDLB_HW_ERR(hw,\n+\t\t\t   \"[%s()] Internal error: DIR credits failed to refill\\n\",\n+\t\t\t   __func__);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Done draining DIR QEs, so disable the CQs. */\n+\tdlb_domain_disable_dir_cqs(hw, domain);\n+\n+\tdlb_domain_disable_dir_producer_ports(hw, domain);\n+\n+\tdlb_domain_disable_ldb_producer_ports(hw, domain);\n+\n+\tdlb_domain_disable_dir_pools(hw, domain);\n+\n+\tdlb_domain_disable_ldb_pools(hw, domain);\n+\n+\t/* Reset the QID, credit pool, and CQ hardware.\n+\t *\n+\t * Note: DLB 1.0 A0 h/w does not disarm CQ interrupts during sched\n+\t * domain reset.\n+\t * A spurious interrupt can occur on subsequent use of a reset CQ.\n+\t */\n+\tret = dlb_domain_reset_hw_resources(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb_domain_verify_reset_success(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdlb_domain_reset_registers(hw, domain);\n+\n+\t/* Hardware reset complete. Reset the domain's software state */\n+\tret = dlb_domain_reset_software_state(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn 0;\n+}\n+\n+void dlb_hw_get_num_resources(struct dlb_hw *hw,\n+\t\t\t      struct dlb_get_num_resources_args *arg)\n+{\n+\tstruct dlb_function_resources *rsrcs;\n+\tstruct dlb_bitmap *map;\n+\n+\trsrcs = &hw->pf;\n+\n+\targ->num_sched_domains = rsrcs->num_avail_domains;\n+\n+\targ->num_ldb_queues = rsrcs->num_avail_ldb_queues;\n+\n+\targ->num_ldb_ports = rsrcs->num_avail_ldb_ports;\n+\n+\targ->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;\n+\n+\tmap = rsrcs->avail_aqed_freelist_entries;\n+\n+\targ->num_atomic_inflights = dlb_bitmap_count(map);\n+\n+\targ->max_contiguous_atomic_inflights =\n+\t\tdlb_bitmap_longest_set_range(map);\n+\n+\tmap = rsrcs->avail_hist_list_entries;\n+\n+\targ->num_hist_list_entries = dlb_bitmap_count(map);\n+\n+\targ->max_contiguous_hist_list_entries =\n+\t\tdlb_bitmap_longest_set_range(map);\n+\n+\tmap = rsrcs->avail_qed_freelist_entries;\n+\n+\targ->num_ldb_credits = dlb_bitmap_count(map);\n+\n+\targ->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);\n+\n+\tmap = rsrcs->avail_dqed_freelist_entries;\n+\n+\targ->num_dir_credits = dlb_bitmap_count(map);\n+\n+\targ->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);\n+\n+\targ->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;\n+\n+\targ->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;\n+}\n+\n void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)\n {\n \tunion dlb_sys_sys_alarm_int_enable r0;\ndiff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c\nindex 7fc85e9..57a150c 100644\n--- a/drivers/event/dlb/pf/dlb_pf.c\n+++ b/drivers/event/dlb/pf/dlb_pf.c\n@@ -78,6 +78,17 @@ dlb_pf_open(struct dlb_hw_dev *handle, const char *name)\n \treturn 0;\n }\n \n+static void\n+dlb_pf_domain_close(struct dlb_eventdev *dlb)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;\n+\tint ret;\n+\n+\tret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);\n+\tif (ret)\n+\t\tDLB_LOG_ERR(\"dlb_pf_reset_domain err %d\", ret);\n+}\n+\n static int\n dlb_pf_get_device_version(struct dlb_hw_dev *handle,\n \t\t\t  uint8_t *revision)\n@@ -101,6 +112,79 @@ dlb_pf_get_num_resources(struct dlb_hw_dev *handle,\n }\n \n static int\n+dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_create_sched_domain_args *arg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (dlb_dev->domain_reset_failed) {\n+\t\tresponse.status = DLB_ST_DOMAIN_RESET_FAILED;\n+\t\tret = -EINVAL;\n+\t\tgoto done;\n+\t}\n+\n+\tret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);\n+\tif (ret)\n+\t\tgoto done;\n+\n+done:\n+\n+\t*(struct dlb_cmd_response *)arg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,\n+\t\t\t      struct dlb_create_ldb_pool_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_ldb_pool(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     &response);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,\n+\t\t\t      struct dlb_create_dir_pool_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_dir_pool(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     &response);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,\n \t\t\tenum dlb_cq_poll_modes *mode)\n {\n@@ -119,8 +203,12 @@ dlb_pf_iface_fn_ptrs_init(void)\n {\n \tdlb_iface_low_level_io_init = dlb_pf_low_level_io_init;\n \tdlb_iface_open = dlb_pf_open;\n+\tdlb_iface_domain_close = dlb_pf_domain_close;\n \tdlb_iface_get_device_version = dlb_pf_get_device_version;\n \tdlb_iface_get_num_resources = dlb_pf_get_num_resources;\n+\tdlb_iface_sched_domain_create = dlb_pf_sched_domain_create;\n+\tdlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;\n+\tdlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;\n \tdlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;\n }\n \n",
    "prefixes": [
        "v11",
        "10/23"
    ]
}