get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/91492/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 91492,
    "url": "https://patches.dpdk.org/api/patches/91492/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1618451359-20693-8-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1618451359-20693-8-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1618451359-20693-8-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2021-04-15T01:48:59",
    "name": "[v4,07/27] event/dlb2: add v2.5 domain reset",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "aa23978d4accd8ddca057fa8159915b030e2dfdb",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1618451359-20693-8-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 16383,
            "url": "https://patches.dpdk.org/api/series/16383/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=16383",
            "date": "2021-04-15T01:48:52",
            "name": "Add DLB v2.5",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/16383/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/91492/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/91492/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B4EB2A0A02;\n\tThu, 15 Apr 2021 03:51:38 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A5233161E43;\n\tThu, 15 Apr 2021 03:50:44 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n by mails.dpdk.org (Postfix) with ESMTP id 0FFE8161E0B\n for <dev@dpdk.org>; Thu, 15 Apr 2021 03:50:35 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 14 Apr 2021 18:50:34 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga003.jf.intel.com with ESMTP; 14 Apr 2021 18:50:34 -0700"
        ],
        "IronPort-SDR": [
            "\n SHDBDwC1edk0ia2PfOQwm3Ah9W/vi+XyyuJlBrCHPDZlM/TT0DcBYb+MIKwiJZrWvo7JwxGqV1\n aRG23sl3HZxg==",
            "\n AdWiwcr0xU72khzq1qFU+66AZYqRlACuz8UZHwbVddtYdI0x+pXxI6Vy4MUXLmXR+0/noseziL\n cAhZUi0RLbfQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,9954\"; a=\"215272800\"",
            "E=Sophos;i=\"5.82,223,1613462400\"; d=\"scan'208\";a=\"215272800\"",
            "E=Sophos;i=\"5.82,223,1613462400\"; d=\"scan'208\";a=\"382569832\""
        ],
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, harry.van.haaren@intel.com,\n jerinj@marvell.com, thomas@monjalon.net",
        "Date": "Wed, 14 Apr 2021 20:48:59 -0500",
        "Message-Id": "<1618451359-20693-8-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1618451359-20693-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<20210316221857.2254-2-timothy.mcdaniel@intel.com>\n <1618451359-20693-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v4 07/27] event/dlb2: add v2.5 domain reset",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Reset hardware registers, consumer queues, ports,\ninterrupts and software. Queues must also be drained\nas part of the reset process.\n\nThe logic is very similar to what was done for v2.0,\nbut the new combined register map for v2.0 and v2.5\nuses new register names and bit names.  Additionally,\nnew register access macros are used so that the code\ncan perform the correct action, based on the hardware\nversion, v2.0 or v2.5.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\n---\n .../event/dlb2/pf/base/dlb2_hw_types_new.h    |    1 +\n drivers/event/dlb2/pf/base/dlb2_resource.c    | 1494 ----------\n .../event/dlb2/pf/base/dlb2_resource_new.c    | 2562 +++++++++++++++++\n 3 files changed, 2563 insertions(+), 1494 deletions(-)",
    "diff": "diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h b/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h\nindex 4a4185acd..4a6037775 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h\n+++ b/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h\n@@ -181,6 +181,7 @@ struct dlb2_ldb_port {\n \tu32 hist_list_entry_base;\n \tu32 hist_list_entry_limit;\n \tu32 ref_cnt;\n+\tu8 cq_depth;\n \tu8 init_tkn_cnt;\n \tu8 num_pending_removals;\n \tu8 num_mappings;\ndiff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c\nindex 99c3d031d..041aeaeee 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_resource.c\n+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c\n@@ -65,69 +65,6 @@ static inline void dlb2_flush_csr(struct dlb2_hw *hw)\n \tDLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);\n }\n \n-static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,\n-\t\t\t\t     struct dlb2_dir_pq_pair *port)\n-{\n-\tunion dlb2_lsp_cq_dir_dsbl reg;\n-\n-\treg.field.disabled = 1;\n-\n-\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);\n-\n-\tdlb2_flush_csr(hw);\n-}\n-\n-static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,\n-\t\t\t\t   struct dlb2_dir_pq_pair *port)\n-{\n-\tunion dlb2_lsp_cq_dir_tkn_cnt r0;\n-\n-\tr0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));\n-\n-\t/*\n-\t * Account for the initial token count, which is used in order to\n-\t * provide a CQ with depth less than 8.\n-\t */\n-\n-\treturn r0.field.count - port->init_tkn_cnt;\n-}\n-\n-static int dlb2_drain_dir_cq(struct dlb2_hw *hw,\n-\t\t\t     struct dlb2_dir_pq_pair *port)\n-{\n-\tunsigned int port_id = port->id.phys_id;\n-\tu32 cnt;\n-\n-\t/* Return any outstanding tokens */\n-\tcnt = dlb2_dir_cq_token_count(hw, port);\n-\n-\tif (cnt != 0) {\n-\t\tstruct dlb2_hcw hcw_mem[8], *hcw;\n-\t\tvoid  *pp_addr;\n-\n-\t\tpp_addr = os_map_producer_port(hw, port_id, false);\n-\n-\t\t/* Point hcw to a 64B-aligned location */\n-\t\thcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n-\n-\t\t/*\n-\t\t * Program the first HCW for a batch token return and\n-\t\t * the rest as NOOPS\n-\t\t */\n-\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n-\t\thcw->cq_token = 1;\n-\t\thcw->lock_id = cnt - 1;\n-\n-\t\tdlb2_movdir64b(pp_addr, hcw);\n-\n-\t\tos_fence_hcw(hw, pp_addr);\n-\n-\t\tos_unmap_producer_port(hw, pp_addr);\n-\t}\n-\n-\treturn 0;\n-}\n-\n static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,\n \t\t\t\t    struct dlb2_dir_pq_pair *port)\n {\n@@ -140,37 +77,6 @@ static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,\n \tdlb2_flush_csr(hw);\n }\n \n-static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,\n-\t\t\t\t     struct dlb2_hw_domain *domain,\n-\t\t\t\t     bool toggle_port)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *port;\n-\tint ret;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n-\t\t/*\n-\t\t * Can't drain a port if it's not configured, and there's\n-\t\t * nothing to drain if its queue is unconfigured.\n-\t\t */\n-\t\tif (!port->port_configured || !port->queue_configured)\n-\t\t\tcontinue;\n-\n-\t\tif (toggle_port)\n-\t\t\tdlb2_dir_port_cq_disable(hw, port);\n-\n-\t\tret = dlb2_drain_dir_cq(hw, port);\n-\t\tif (ret < 0)\n-\t\t\treturn ret;\n-\n-\t\tif (toggle_port)\n-\t\t\tdlb2_dir_port_cq_enable(hw, port);\n-\t}\n-\n-\treturn 0;\n-}\n-\n static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,\n \t\t\t\tstruct dlb2_dir_pq_pair *queue)\n {\n@@ -182,63 +88,6 @@ static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,\n \treturn r0.field.count;\n }\n \n-static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,\n-\t\t\t\t    struct dlb2_dir_pq_pair *queue)\n-{\n-\treturn dlb2_dir_queue_depth(hw, queue) == 0;\n-}\n-\n-static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,\n-\t\t\t\t\t struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n-\t\tif (!dlb2_dir_queue_is_empty(hw, queue))\n-\t\t\treturn false;\n-\t}\n-\n-\treturn true;\n-}\n-\n-static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tint i, ret;\n-\n-\t/* If the domain hasn't been started, there's no traffic to drain */\n-\tif (!domain->started)\n-\t\treturn 0;\n-\n-\tfor (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n-\t\tret = dlb2_domain_drain_dir_cqs(hw, domain, true);\n-\t\tif (ret < 0)\n-\t\t\treturn ret;\n-\n-\t\tif (dlb2_domain_dir_queues_empty(hw, domain))\n-\t\t\tbreak;\n-\t}\n-\n-\tif (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {\n-\t\tDLB2_HW_ERR(hw,\n-\t\t\t    \"[%s()] Internal error: failed to empty queues\\n\",\n-\t\t\t    __func__);\n-\t\treturn -EFAULT;\n-\t}\n-\n-\t/*\n-\t * Drain the CQs one more time. For the queues to go empty, they would\n-\t * have scheduled one or more QEs.\n-\t */\n-\tret = dlb2_domain_drain_dir_cqs(hw, domain, true);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\treturn 0;\n-}\n-\n static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,\n \t\t\t\t    struct dlb2_ldb_port *port)\n {\n@@ -271,105 +120,6 @@ static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,\n \tdlb2_flush_csr(hw);\n }\n \n-static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,\n-\t\t\t\t      struct dlb2_ldb_port *port)\n-{\n-\tunion dlb2_lsp_cq_ldb_infl_cnt r0;\n-\n-\tr0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));\n-\n-\treturn r0.field.count;\n-}\n-\n-static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,\n-\t\t\t\t   struct dlb2_ldb_port *port)\n-{\n-\tunion dlb2_lsp_cq_ldb_tkn_cnt r0;\n-\n-\tr0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));\n-\n-\t/*\n-\t * Account for the initial token count, which is used in order to\n-\t * provide a CQ with depth less than 8.\n-\t */\n-\n-\treturn r0.field.token_count - port->init_tkn_cnt;\n-}\n-\n-static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)\n-{\n-\tu32 infl_cnt, tkn_cnt;\n-\tunsigned int i;\n-\n-\tinfl_cnt = dlb2_ldb_cq_inflight_count(hw, port);\n-\ttkn_cnt = dlb2_ldb_cq_token_count(hw, port);\n-\n-\tif (infl_cnt || tkn_cnt) {\n-\t\tstruct dlb2_hcw hcw_mem[8], *hcw;\n-\t\tvoid  *pp_addr;\n-\n-\t\tpp_addr = os_map_producer_port(hw, port->id.phys_id, true);\n-\n-\t\t/* Point hcw to a 64B-aligned location */\n-\t\thcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n-\n-\t\t/*\n-\t\t * Program the first HCW for a completion and token return and\n-\t\t * the other HCWs as NOOPS\n-\t\t */\n-\n-\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n-\t\thcw->qe_comp = (infl_cnt > 0);\n-\t\thcw->cq_token = (tkn_cnt > 0);\n-\t\thcw->lock_id = tkn_cnt - 1;\n-\n-\t\t/* Return tokens in the first HCW */\n-\t\tdlb2_movdir64b(pp_addr, hcw);\n-\n-\t\thcw->cq_token = 0;\n-\n-\t\t/* Issue remaining completions (if any) */\n-\t\tfor (i = 1; i < infl_cnt; i++)\n-\t\t\tdlb2_movdir64b(pp_addr, hcw);\n-\n-\t\tos_fence_hcw(hw, pp_addr);\n-\n-\t\tos_unmap_producer_port(hw, pp_addr);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,\n-\t\t\t\t     struct dlb2_hw_domain *domain,\n-\t\t\t\t     bool toggle_port)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_port *port;\n-\tint ret, i;\n-\tRTE_SET_USED(iter);\n-\n-\t/* If the domain hasn't been started, there's no traffic to drain */\n-\tif (!domain->started)\n-\t\treturn 0;\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tif (toggle_port)\n-\t\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n-\n-\t\t\tret = dlb2_drain_ldb_cq(hw, port);\n-\t\t\tif (ret < 0)\n-\t\t\t\treturn ret;\n-\n-\t\t\tif (toggle_port)\n-\t\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,\n \t\t\t\tstruct dlb2_ldb_queue *queue)\n {\n@@ -388,90 +138,6 @@ static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,\n \treturn r0.field.count + r1.field.count + r2.field.count;\n }\n \n-static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,\n-\t\t\t\t    struct dlb2_ldb_queue *queue)\n-{\n-\treturn dlb2_ldb_queue_depth(hw, queue) == 0;\n-}\n-\n-static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,\n-\t\t\t\t\t    struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_queue *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n-\t\tif (queue->num_mappings == 0)\n-\t\t\tcontinue;\n-\n-\t\tif (!dlb2_ldb_queue_is_empty(hw, queue))\n-\t\t\treturn false;\n-\t}\n-\n-\treturn true;\n-}\n-\n-static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,\n-\t\t\t\t\t   struct dlb2_hw_domain *domain)\n-{\n-\tint i, ret;\n-\n-\t/* If the domain hasn't been started, there's no traffic to drain */\n-\tif (!domain->started)\n-\t\treturn 0;\n-\n-\tif (domain->num_pending_removals > 0) {\n-\t\tDLB2_HW_ERR(hw,\n-\t\t\t    \"[%s()] Internal error: failed to unmap domain queues\\n\",\n-\t\t\t    __func__);\n-\t\treturn -EFAULT;\n-\t}\n-\n-\tfor (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n-\t\tret = dlb2_domain_drain_ldb_cqs(hw, domain, true);\n-\t\tif (ret < 0)\n-\t\t\treturn ret;\n-\n-\t\tif (dlb2_domain_mapped_queues_empty(hw, domain))\n-\t\t\tbreak;\n-\t}\n-\n-\tif (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {\n-\t\tDLB2_HW_ERR(hw,\n-\t\t\t    \"[%s()] Internal error: failed to empty queues\\n\",\n-\t\t\t    __func__);\n-\t\treturn -EFAULT;\n-\t}\n-\n-\t/*\n-\t * Drain the CQs one more time. For the queues to go empty, they would\n-\t * have scheduled one or more QEs.\n-\t */\n-\tret = dlb2_domain_drain_ldb_cqs(hw, domain, true);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\treturn 0;\n-}\n-\n-static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,\n-\t\t\t\t       struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tport->enabled = true;\n-\n-\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n-\t\t}\n-\t}\n-}\n-\n static struct dlb2_ldb_queue *\n dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,\n \t\t\t   u32 id,\n@@ -1455,1166 +1121,6 @@ dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,\n \treturn domain->num_pending_removals;\n }\n \n-static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tport->enabled = false;\n-\n-\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n-\t\t}\n-\t}\n-}\n-\n-static void dlb2_log_reset_domain(struct dlb2_hw *hw,\n-\t\t\t\t  u32 domain_id,\n-\t\t\t\t  bool vdev_req,\n-\t\t\t\t  unsigned int vdev_id)\n-{\n-\tDLB2_HW_DBG(hw, \"DLB2 reset domain:\\n\");\n-\tif (vdev_req)\n-\t\tDLB2_HW_DBG(hw, \"(Request from vdev %d)\\n\", vdev_id);\n-\tDLB2_HW_DBG(hw, \"\\tDomain ID: %d\\n\", domain_id);\n-}\n-\n-static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,\n-\t\t\t\t\t struct dlb2_hw_domain *domain,\n-\t\t\t\t\t unsigned int vdev_id)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_sys_vf_dir_vpp_v r1;\n-\tstruct dlb2_dir_pq_pair *port;\n-\tRTE_SET_USED(iter);\n-\n-\tr1.field.vpp_v = 0;\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n-\t\tunsigned int offs;\n-\t\tu32 virt_id;\n-\n-\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n-\t\t\tvirt_id = port->id.virt_id;\n-\t\telse\n-\t\t\tvirt_id = port->id.phys_id;\n-\n-\t\toffs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;\n-\n-\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);\n-\t}\n-}\n-\n-static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,\n-\t\t\t\t\t struct dlb2_hw_domain *domain,\n-\t\t\t\t\t unsigned int vdev_id)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_sys_vf_ldb_vpp_v r1;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tr1.field.vpp_v = 0;\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tunsigned int offs;\n-\t\t\tu32 virt_id;\n-\n-\t\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n-\t\t\t\tvirt_id = port->id.virt_id;\n-\t\t\telse\n-\t\t\t\tvirt_id = port->id.phys_id;\n-\n-\t\t\toffs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;\n-\n-\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);\n-\t\t}\n-\t}\n-}\n-\n-static void\n-dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_chp_ldb_cq_int_enb r0 = { {0} };\n-\tunion dlb2_chp_ldb_cq_wd_enb r1 = { {0} };\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tr0.field.en_tim = 0;\n-\tr0.field.en_depth = 0;\n-\n-\tr1.field.wd_enable = 0;\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),\n-\t\t\t\t    r0.val);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),\n-\t\t\t\t    r1.val);\n-\t\t}\n-\t}\n-}\n-\n-static void\n-dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_chp_dir_cq_int_enb r0 = { {0} };\n-\tunion dlb2_chp_dir_cq_wd_enb r1 = { {0} };\n-\tstruct dlb2_dir_pq_pair *port;\n-\tRTE_SET_USED(iter);\n-\n-\tr0.field.en_tim = 0;\n-\tr0.field.en_depth = 0;\n-\n-\tr1.field.wd_enable = 0;\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),\n-\t\t\t    r0.val);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),\n-\t\t\t    r1.val);\n-\t}\n-}\n-\n-static void\n-dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,\n-\t\t\t\t\t  struct dlb2_hw_domain *domain)\n-{\n-\tint domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_queue *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n-\t\tunion dlb2_sys_ldb_vasqid_v r0 = { {0} };\n-\t\tunion dlb2_sys_ldb_qid2vqid r1 = { {0} };\n-\t\tunion dlb2_sys_vf_ldb_vqid_v r2 = { {0} };\n-\t\tunion dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };\n-\t\tint idx;\n-\n-\t\tidx = domain_offset + queue->id.phys_id;\n-\n-\t\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);\n-\n-\t\tif (queue->id.vdev_owned) {\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),\n-\t\t\t\t    r1.val);\n-\n-\t\t\tidx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +\n-\t\t\t\tqueue->id.virt_id;\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_VF_LDB_VQID_V(idx),\n-\t\t\t\t    r2.val);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_VF_LDB_VQID2QID(idx),\n-\t\t\t\t    r3.val);\n-\t\t}\n-\t}\n-}\n-\n-static void\n-dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,\n-\t\t\t\t\t  struct dlb2_hw_domain *domain)\n-{\n-\tint domain_offset = domain->id.phys_id *\n-\t\tDLB2_MAX_NUM_DIR_PORTS(hw->ver);\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n-\t\tunion dlb2_sys_dir_vasqid_v r0 = { {0} };\n-\t\tunion dlb2_sys_vf_dir_vqid_v r1 = { {0} };\n-\t\tunion dlb2_sys_vf_dir_vqid2qid r2 = { {0} };\n-\t\tint idx;\n-\n-\t\tidx = domain_offset + queue->id.phys_id;\n-\n-\t\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);\n-\n-\t\tif (queue->id.vdev_owned) {\n-\t\t\tidx = queue->id.vdev_id *\n-\t\t\t\tDLB2_MAX_NUM_DIR_PORTS(hw->ver) +\n-\t\t\t\tqueue->id.virt_id;\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_VF_DIR_VQID_V(idx),\n-\t\t\t\t    r1.val);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_VF_DIR_VQID2QID(idx),\n-\t\t\t\t    r2.val);\n-\t\t}\n-\t}\n-}\n-\n-static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,\n-\t\t\t\t\t       struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_chp_sn_chk_enbl r1;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tr1.field.en = 0;\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),\n-\t\t\t\t    r1.val);\n-\t}\n-}\n-\n-static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,\n-\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n-\t\t\tint i;\n-\n-\t\t\tfor (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {\n-\t\t\t\tif (dlb2_ldb_cq_inflight_count(hw, port) == 0)\n-\t\t\t\t\tbreak;\n-\t\t\t}\n-\n-\t\t\tif (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {\n-\t\t\t\tDLB2_HW_ERR(hw,\n-\t\t\t\t\t    \"[%s()] Internal error: failed to flush load-balanced port %d's completions.\\n\",\n-\t\t\t\t\t    __func__, port->id.phys_id);\n-\t\t\t\treturn -EFAULT;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *port;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n-\t\tport->enabled = false;\n-\n-\t\tdlb2_dir_port_cq_disable(hw, port);\n-\t}\n-}\n-\n-static void\n-dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,\n-\t\t\t\t       struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *port;\n-\tunion dlb2_sys_dir_pp_v r1;\n-\tRTE_SET_USED(iter);\n-\n-\tr1.field.pp_v = 0;\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_DIR_PP_V(port->id.phys_id),\n-\t\t\t    r1.val);\n-}\n-\n-static void\n-dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,\n-\t\t\t\t       struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tunion dlb2_sys_ldb_pp_v r1;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tr1.field.pp_v = 0;\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_SYS_LDB_PP_V(port->id.phys_id),\n-\t\t\t\t    r1.val);\n-\t}\n-}\n-\n-static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,\n-\t\t\t\t\t    struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *dir_port;\n-\tstruct dlb2_ldb_port *ldb_port;\n-\tstruct dlb2_ldb_queue *queue;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\t/*\n-\t * Confirm that all the domain's queue's inflight counts and AQED\n-\t * active counts are 0.\n-\t */\n-\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n-\t\tif (!dlb2_ldb_queue_is_empty(hw, queue)) {\n-\t\t\tDLB2_HW_ERR(hw,\n-\t\t\t\t    \"[%s()] Internal error: failed to empty ldb queue %d\\n\",\n-\t\t\t\t    __func__, queue->id.phys_id);\n-\t\t\treturn -EFAULT;\n-\t\t}\n-\t}\n-\n-\t/* Confirm that all the domain's CQs inflight and token counts are 0. */\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {\n-\t\t\tif (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||\n-\t\t\t    dlb2_ldb_cq_token_count(hw, ldb_port)) {\n-\t\t\t\tDLB2_HW_ERR(hw,\n-\t\t\t\t\t    \"[%s()] Internal error: failed to empty ldb port %d\\n\",\n-\t\t\t\t\t    __func__, ldb_port->id.phys_id);\n-\t\t\t\treturn -EFAULT;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {\n-\t\tif (!dlb2_dir_queue_is_empty(hw, dir_port)) {\n-\t\t\tDLB2_HW_ERR(hw,\n-\t\t\t\t    \"[%s()] Internal error: failed to empty dir queue %d\\n\",\n-\t\t\t\t    __func__, dir_port->id.phys_id);\n-\t\t\treturn -EFAULT;\n-\t\t}\n-\n-\t\tif (dlb2_dir_cq_token_count(hw, dir_port)) {\n-\t\t\tDLB2_HW_ERR(hw,\n-\t\t\t\t    \"[%s()] Internal error: failed to empty dir port %d\\n\",\n-\t\t\t\t    __func__, dir_port->id.phys_id);\n-\t\t\treturn -EFAULT;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,\n-\t\t\t\t\t\t   struct dlb2_ldb_port *port)\n-{\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_PP2VAS_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ2VAS_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_PP2VDEV_RST);\n-\n-\tif (port->id.vdev_owned) {\n-\t\tunsigned int offs;\n-\t\tu32 virt_id;\n-\n-\t\t/*\n-\t\t * DLB uses producer port address bits 17:12 to determine the\n-\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n-\t\t * through the PF MMIO window for the physical producer port,\n-\t\t * so for translation purposes the virtual and physical port\n-\t\t * IDs are equal.\n-\t\t */\n-\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n-\t\t\tvirt_id = port->id.virt_id;\n-\t\telse\n-\t\t\tvirt_id = port->id.phys_id;\n-\n-\t\toffs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_VF_LDB_VPP2PP(offs),\n-\t\t\t    DLB2_SYS_VF_LDB_VPP2PP_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_VF_LDB_VPP_V(offs),\n-\t\t\t    DLB2_SYS_VF_LDB_VPP_V_RST);\n-\t}\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_PP_V(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_PP_V_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_DSBL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_DEPTH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_INFL_LIM_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),\n-\t\t    DLB2_CHP_HIST_LIST_LIM_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),\n-\t\t    DLB2_CHP_HIST_LIST_BASE_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),\n-\t\t    DLB2_CHP_HIST_LIST_POP_PTR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),\n-\t\t    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_INT_ENB_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ_ISR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),\n-\t\t    DLB2_CHP_LDB_CQ_WPTR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_TKN_CNT_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ_ADDR_L_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ_ADDR_U_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ_AT_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ_PASID_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),\n-\t\t    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ2QID0(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ2QID0_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ2QID1(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ2QID1_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ2PRIOV(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ2PRIOV_RST);\n-}\n-\n-static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,\n-\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_port *port;\n-\tint i;\n-\tRTE_SET_USED(iter);\n-\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n-\t\t\t__dlb2_domain_reset_ldb_port_registers(hw, port);\n-\t}\n-}\n-\n-static void\n-__dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,\n-\t\t\t\t       struct dlb2_dir_pq_pair *port)\n-{\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ2VAS_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_DIR_DSBL_RST);\n-\n-\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_DEPTH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_INT_ENB_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_ISR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ_WPTR_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_DIR_TKN_CNT_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_ADDR_L_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_ADDR_U_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_AT_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_PASID_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ_FMT_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),\n-\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_PP2VAS_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),\n-\t\t    DLB2_CHP_DIR_CQ2VAS_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_PP2VDEV_RST);\n-\n-\tif (port->id.vdev_owned) {\n-\t\tunsigned int offs;\n-\t\tu32 virt_id;\n-\n-\t\t/*\n-\t\t * DLB uses producer port address bits 17:12 to determine the\n-\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n-\t\t * through the PF MMIO window for the physical producer port,\n-\t\t * so for translation purposes the virtual and physical port\n-\t\t * IDs are equal.\n-\t\t */\n-\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n-\t\t\tvirt_id = port->id.virt_id;\n-\t\telse\n-\t\t\tvirt_id = port->id.phys_id;\n-\n-\t\toffs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver)\n-\t\t\t+ virt_id;\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_VF_DIR_VPP2PP(offs),\n-\t\t\t    DLB2_SYS_VF_DIR_VPP2PP_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_VF_DIR_VPP_V(offs),\n-\t\t\t    DLB2_SYS_VF_DIR_VPP_V_RST);\n-\t}\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_SYS_DIR_PP_V(port->id.phys_id),\n-\t\t    DLB2_SYS_DIR_PP_V_RST);\n-}\n-\n-static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,\n-\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *port;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n-\t\t__dlb2_domain_reset_dir_port_registers(hw, port);\n-}\n-\n-static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,\n-\t\t\t\t\t\t  struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_queue *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n-\t\tunsigned int queue_id = queue->id.phys_id;\n-\t\tint i;\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),\n-\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),\n-\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),\n-\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),\n-\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),\n-\t\t\t    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_LDB_INFL_LIM(queue_id),\n-\t\t\t    DLB2_LSP_QID_LDB_INFL_LIM_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),\n-\t\t\t    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),\n-\t\t\t    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),\n-\t\t\t    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_LDB_QID_ITS(queue_id),\n-\t\t\t    DLB2_SYS_LDB_QID_ITS_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_CHP_ORD_QID_SN(queue_id),\n-\t\t\t    DLB2_CHP_ORD_QID_SN_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_CHP_ORD_QID_SN_MAP(queue_id),\n-\t\t\t    DLB2_CHP_ORD_QID_SN_MAP_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_LDB_QID_V(queue_id),\n-\t\t\t    DLB2_SYS_LDB_QID_V_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_LDB_QID_CFG_V(queue_id),\n-\t\t\t    DLB2_SYS_LDB_QID_CFG_V_RST);\n-\n-\t\tif (queue->sn_cfg_valid) {\n-\t\t\tu32 offs[2];\n-\n-\t\t\toffs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);\n-\t\t\toffs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    offs[queue->sn_group],\n-\t\t\t\t    DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);\n-\t\t}\n-\n-\t\tfor (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_LSP_QID2CQIDIX(queue_id, i),\n-\t\t\t\t    DLB2_LSP_QID2CQIDIX_00_RST);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_LSP_QID2CQIDIX2(queue_id, i),\n-\t\t\t\t    DLB2_LSP_QID2CQIDIX2_00_RST);\n-\n-\t\t\tDLB2_CSR_WR(hw,\n-\t\t\t\t    DLB2_ATM_QID2CQIDIX(queue_id, i),\n-\t\t\t\t    DLB2_ATM_QID2CQIDIX_00_RST);\n-\t\t}\n-\t}\n-}\n-\n-static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,\n-\t\t\t\t\t\t  struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_dir_pq_pair *queue;\n-\tRTE_SET_USED(iter);\n-\n-\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),\n-\t\t\t    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),\n-\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),\n-\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),\n-\t\t\t    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),\n-\t\t\t    DLB2_SYS_DIR_QID_ITS_RST);\n-\n-\t\tDLB2_CSR_WR(hw,\n-\t\t\t    DLB2_SYS_DIR_QID_V(queue->id.phys_id),\n-\t\t\t    DLB2_SYS_DIR_QID_V_RST);\n-\t}\n-}\n-\n-static void dlb2_domain_reset_registers(struct dlb2_hw *hw,\n-\t\t\t\t\tstruct dlb2_hw_domain *domain)\n-{\n-\tdlb2_domain_reset_ldb_port_registers(hw, domain);\n-\n-\tdlb2_domain_reset_dir_port_registers(hw, domain);\n-\n-\tdlb2_domain_reset_ldb_queue_registers(hw, domain);\n-\n-\tdlb2_domain_reset_dir_queue_registers(hw, domain);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),\n-\t\t    DLB2_CHP_CFG_LDB_VAS_CRD_RST);\n-\n-\tDLB2_CSR_WR(hw,\n-\t\t    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),\n-\t\t    DLB2_CHP_CFG_DIR_VAS_CRD_RST);\n-}\n-\n-static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,\n-\t\t\t\t\t    struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_dir_pq_pair *tmp_dir_port;\n-\tstruct dlb2_ldb_queue *tmp_ldb_queue;\n-\tstruct dlb2_ldb_port *tmp_ldb_port;\n-\tstruct dlb2_list_entry *iter1;\n-\tstruct dlb2_list_entry *iter2;\n-\tstruct dlb2_function_resources *rsrcs;\n-\tstruct dlb2_dir_pq_pair *dir_port;\n-\tstruct dlb2_ldb_queue *ldb_queue;\n-\tstruct dlb2_ldb_port *ldb_port;\n-\tstruct dlb2_list_head *list;\n-\tint ret, i;\n-\tRTE_SET_USED(tmp_dir_port);\n-\tRTE_SET_USED(tmp_ldb_queue);\n-\tRTE_SET_USED(tmp_ldb_port);\n-\tRTE_SET_USED(iter1);\n-\tRTE_SET_USED(iter2);\n-\n-\trsrcs = domain->parent_func;\n-\n-\t/* Move the domain's ldb queues to the function's avail list */\n-\tlist = &domain->used_ldb_queues;\n-\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n-\t\tif (ldb_queue->sn_cfg_valid) {\n-\t\t\tstruct dlb2_sn_group *grp;\n-\n-\t\t\tgrp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];\n-\n-\t\t\tdlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);\n-\t\t\tldb_queue->sn_cfg_valid = false;\n-\t\t}\n-\n-\t\tldb_queue->owned = false;\n-\t\tldb_queue->num_mappings = 0;\n-\t\tldb_queue->num_pending_additions = 0;\n-\n-\t\tdlb2_list_del(&domain->used_ldb_queues,\n-\t\t\t      &ldb_queue->domain_list);\n-\t\tdlb2_list_add(&rsrcs->avail_ldb_queues,\n-\t\t\t      &ldb_queue->func_list);\n-\t\trsrcs->num_avail_ldb_queues++;\n-\t}\n-\n-\tlist = &domain->avail_ldb_queues;\n-\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n-\t\tldb_queue->owned = false;\n-\n-\t\tdlb2_list_del(&domain->avail_ldb_queues,\n-\t\t\t      &ldb_queue->domain_list);\n-\t\tdlb2_list_add(&rsrcs->avail_ldb_queues,\n-\t\t\t      &ldb_queue->func_list);\n-\t\trsrcs->num_avail_ldb_queues++;\n-\t}\n-\n-\t/* Move the domain's ldb ports to the function's avail list */\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tlist = &domain->used_ldb_ports[i];\n-\t\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,\n-\t\t\t\t       iter1, iter2) {\n-\t\t\tint j;\n-\n-\t\t\tldb_port->owned = false;\n-\t\t\tldb_port->configured = false;\n-\t\t\tldb_port->num_pending_removals = 0;\n-\t\t\tldb_port->num_mappings = 0;\n-\t\t\tldb_port->init_tkn_cnt = 0;\n-\t\t\tfor (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)\n-\t\t\t\tldb_port->qid_map[j].state =\n-\t\t\t\t\tDLB2_QUEUE_UNMAPPED;\n-\n-\t\t\tdlb2_list_del(&domain->used_ldb_ports[i],\n-\t\t\t\t      &ldb_port->domain_list);\n-\t\t\tdlb2_list_add(&rsrcs->avail_ldb_ports[i],\n-\t\t\t\t      &ldb_port->func_list);\n-\t\t\trsrcs->num_avail_ldb_ports[i]++;\n-\t\t}\n-\n-\t\tlist = &domain->avail_ldb_ports[i];\n-\t\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,\n-\t\t\t\t       iter1, iter2) {\n-\t\t\tldb_port->owned = false;\n-\n-\t\t\tdlb2_list_del(&domain->avail_ldb_ports[i],\n-\t\t\t\t      &ldb_port->domain_list);\n-\t\t\tdlb2_list_add(&rsrcs->avail_ldb_ports[i],\n-\t\t\t\t      &ldb_port->func_list);\n-\t\t\trsrcs->num_avail_ldb_ports[i]++;\n-\t\t}\n-\t}\n-\n-\t/* Move the domain's dir ports to the function's avail list */\n-\tlist = &domain->used_dir_pq_pairs;\n-\tDLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n-\t\tdir_port->owned = false;\n-\t\tdir_port->port_configured = false;\n-\t\tdir_port->init_tkn_cnt = 0;\n-\n-\t\tdlb2_list_del(&domain->used_dir_pq_pairs,\n-\t\t\t      &dir_port->domain_list);\n-\n-\t\tdlb2_list_add(&rsrcs->avail_dir_pq_pairs,\n-\t\t\t      &dir_port->func_list);\n-\t\trsrcs->num_avail_dir_pq_pairs++;\n-\t}\n-\n-\tlist = &domain->avail_dir_pq_pairs;\n-\tDLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n-\t\tdir_port->owned = false;\n-\n-\t\tdlb2_list_del(&domain->avail_dir_pq_pairs,\n-\t\t\t      &dir_port->domain_list);\n-\n-\t\tdlb2_list_add(&rsrcs->avail_dir_pq_pairs,\n-\t\t\t      &dir_port->func_list);\n-\t\trsrcs->num_avail_dir_pq_pairs++;\n-\t}\n-\n-\t/* Return hist list entries to the function */\n-\tret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,\n-\t\t\t\t    domain->hist_list_entry_base,\n-\t\t\t\t    domain->total_hist_list_entries);\n-\tif (ret) {\n-\t\tDLB2_HW_ERR(hw,\n-\t\t\t    \"[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\\n\",\n-\t\t\t    __func__);\n-\t\treturn ret;\n-\t}\n-\n-\tdomain->total_hist_list_entries = 0;\n-\tdomain->avail_hist_list_entries = 0;\n-\tdomain->hist_list_entry_base = 0;\n-\tdomain->hist_list_entry_offset = 0;\n-\n-\trsrcs->num_avail_qed_entries += domain->num_ldb_credits;\n-\tdomain->num_ldb_credits = 0;\n-\n-\trsrcs->num_avail_dqed_entries += domain->num_dir_credits;\n-\tdomain->num_dir_credits = 0;\n-\n-\trsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;\n-\trsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;\n-\tdomain->num_avail_aqed_entries = 0;\n-\tdomain->num_used_aqed_entries = 0;\n-\n-\tdomain->num_pending_removals = 0;\n-\tdomain->num_pending_additions = 0;\n-\tdomain->configured = false;\n-\tdomain->started = false;\n-\n-\t/*\n-\t * Move the domain out of the used_domains list and back to the\n-\t * function's avail_domains list.\n-\t */\n-\tdlb2_list_del(&rsrcs->used_domains, &domain->func_list);\n-\tdlb2_list_add(&rsrcs->avail_domains, &domain->func_list);\n-\trsrcs->num_avail_domains++;\n-\n-\treturn 0;\n-}\n-\n-static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,\n-\t\t\t\t\t    struct dlb2_hw_domain *domain,\n-\t\t\t\t\t    struct dlb2_ldb_queue *queue)\n-{\n-\tstruct dlb2_ldb_port *port;\n-\tint ret, i;\n-\n-\t/* If a domain has LDB queues, it must have LDB ports */\n-\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n-\t\tif (!dlb2_list_empty(&domain->used_ldb_ports[i]))\n-\t\t\tbreak;\n-\t}\n-\n-\tif (i == DLB2_NUM_COS_DOMAINS) {\n-\t\tDLB2_HW_ERR(hw,\n-\t\t\t    \"[%s()] Internal error: No configured LDB ports\\n\",\n-\t\t\t    __func__);\n-\t\treturn -EFAULT;\n-\t}\n-\n-\tport = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));\n-\n-\t/* If necessary, free up a QID slot in this CQ */\n-\tif (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {\n-\t\tstruct dlb2_ldb_queue *mapped_queue;\n-\n-\t\tmapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];\n-\n-\t\tret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);\n-\t\tif (ret)\n-\t\t\treturn ret;\n-\t}\n-\n-\tret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\treturn dlb2_domain_drain_mapped_queues(hw, domain);\n-}\n-\n-static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,\n-\t\t\t\t\t     struct dlb2_hw_domain *domain)\n-{\n-\tstruct dlb2_list_entry *iter;\n-\tstruct dlb2_ldb_queue *queue;\n-\tint ret;\n-\tRTE_SET_USED(iter);\n-\n-\t/* If the domain hasn't been started, there's no traffic to drain */\n-\tif (!domain->started)\n-\t\treturn 0;\n-\n-\t/*\n-\t * Pre-condition: the unattached queue must not have any outstanding\n-\t * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()\n-\t * prior to this in dlb2_domain_drain_mapped_queues().\n-\t */\n-\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n-\t\tif (queue->num_mappings != 0 ||\n-\t\t    dlb2_ldb_queue_is_empty(hw, queue))\n-\t\t\tcontinue;\n-\n-\t\tret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);\n-\t\tif (ret)\n-\t\t\treturn ret;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated\n- *\thardware resources.\n- * @hw:\tContains the current state of the DLB2 hardware.\n- * @domain_id: Domain ID\n- * @vdev_req: Request came from a virtual device.\n- * @vdev_id: If vdev_req is true, this contains the virtual device's ID.\n- *\n- * Note: User software *must* stop sending to this domain's producer ports\n- * before invoking this function, otherwise undefined behavior will result.\n- *\n- * Return: returns < 0 on error, 0 otherwise.\n- */\n-int dlb2_reset_domain(struct dlb2_hw *hw,\n-\t\t      u32 domain_id,\n-\t\t      bool vdev_req,\n-\t\t      unsigned int vdev_id)\n-{\n-\tstruct dlb2_hw_domain *domain;\n-\tint ret;\n-\n-\tdlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);\n-\n-\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n-\n-\tif (domain  == NULL || !domain->configured)\n-\t\treturn -EINVAL;\n-\n-\t/* Disable VPPs */\n-\tif (vdev_req) {\n-\t\tdlb2_domain_disable_dir_vpps(hw, domain, vdev_id);\n-\n-\t\tdlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);\n-\t}\n-\n-\t/* Disable CQ interrupts */\n-\tdlb2_domain_disable_dir_port_interrupts(hw, domain);\n-\n-\tdlb2_domain_disable_ldb_port_interrupts(hw, domain);\n-\n-\t/*\n-\t * For each queue owned by this domain, disable its write permissions to\n-\t * cause any traffic sent to it to be dropped. Well-behaved software\n-\t * should not be sending QEs at this point.\n-\t */\n-\tdlb2_domain_disable_dir_queue_write_perms(hw, domain);\n-\n-\tdlb2_domain_disable_ldb_queue_write_perms(hw, domain);\n-\n-\t/* Turn off completion tracking on all the domain's PPs. */\n-\tdlb2_domain_disable_ldb_seq_checks(hw, domain);\n-\n-\t/*\n-\t * Disable the LDB CQs and drain them in order to complete the map and\n-\t * unmap procedures, which require zero CQ inflights and zero QID\n-\t * inflights respectively.\n-\t */\n-\tdlb2_domain_disable_ldb_cqs(hw, domain);\n-\n-\tret = dlb2_domain_drain_ldb_cqs(hw, domain, false);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\tret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\tret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\tret = dlb2_domain_finish_map_qid_procedures(hw, domain);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\t/* Re-enable the CQs in order to drain the mapped queues. */\n-\tdlb2_domain_enable_ldb_cqs(hw, domain);\n-\n-\tret = dlb2_domain_drain_mapped_queues(hw, domain);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\tret = dlb2_domain_drain_unmapped_queues(hw, domain);\n-\tif (ret < 0)\n-\t\treturn ret;\n-\n-\t/* Done draining LDB QEs, so disable the CQs. */\n-\tdlb2_domain_disable_ldb_cqs(hw, domain);\n-\n-\tdlb2_domain_drain_dir_queues(hw, domain);\n-\n-\t/* Done draining DIR QEs, so disable the CQs. */\n-\tdlb2_domain_disable_dir_cqs(hw, domain);\n-\n-\t/* Disable PPs */\n-\tdlb2_domain_disable_dir_producer_ports(hw, domain);\n-\n-\tdlb2_domain_disable_ldb_producer_ports(hw, domain);\n-\n-\tret = dlb2_domain_verify_reset_success(hw, domain);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\t/* Reset the QID and port state. */\n-\tdlb2_domain_reset_registers(hw, domain);\n-\n-\t/* Hardware reset complete. Reset the domain's software state */\n-\tret = dlb2_domain_reset_software_state(hw, domain);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\treturn 0;\n-}\n-\n unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)\n {\n \tint i, num = 0;\ndiff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c\nindex 8f97dd865..641812412 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c\n+++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c\n@@ -34,6 +34,17 @@\n #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \\\n \tDLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)\n \n+/*\n+ * The PF driver cannot assume that a register write will affect subsequent HCW\n+ * writes. To ensure a write completes, the driver must read back a CSR. This\n+ * function only need be called for configuration that can occur after the\n+ * domain has started; prior to starting, applications can't send HCWs.\n+ */\n+static inline void dlb2_flush_csr(struct dlb2_hw *hw)\n+{\n+\tDLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));\n+}\n+\n static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)\n {\n \tint i;\n@@ -1019,3 +1030,2554 @@ int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,\n \n \treturn 0;\n }\n+\n+static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,\n+\t\t\t\t     struct dlb2_dir_pq_pair *port)\n+{\n+\tu32 reg = 0;\n+\n+\tDLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_dir_pq_pair *port)\n+{\n+\tu32 cnt;\n+\n+\tcnt = DLB2_CSR_RD(hw,\n+\t\t\t  DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));\n+\n+\t/*\n+\t * Account for the initial token count, which is used in order to\n+\t * provide a CQ with depth less than 8.\n+\t */\n+\n+\treturn DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -\n+\t       port->init_tkn_cnt;\n+}\n+\n+static void dlb2_drain_dir_cq(struct dlb2_hw *hw,\n+\t\t\t      struct dlb2_dir_pq_pair *port)\n+{\n+\tunsigned int port_id = port->id.phys_id;\n+\tu32 cnt;\n+\n+\t/* Return any outstanding tokens */\n+\tcnt = dlb2_dir_cq_token_count(hw, port);\n+\n+\tif (cnt != 0) {\n+\t\tstruct dlb2_hcw hcw_mem[8], *hcw;\n+\t\tvoid __iomem *pp_addr;\n+\n+\t\tpp_addr = os_map_producer_port(hw, port_id, false);\n+\n+\t\t/* Point hcw to a 64B-aligned location */\n+\t\thcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n+\n+\t\t/*\n+\t\t * Program the first HCW for a batch token return and\n+\t\t * the rest as NOOPS\n+\t\t */\n+\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n+\t\thcw->cq_token = 1;\n+\t\thcw->lock_id = cnt - 1;\n+\n+\t\tdlb2_movdir64b(pp_addr, hcw);\n+\n+\t\tos_fence_hcw(hw, pp_addr);\n+\n+\t\tos_unmap_producer_port(hw, pp_addr);\n+\t}\n+}\n+\n+static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,\n+\t\t\t\t    struct dlb2_dir_pq_pair *port)\n+{\n+\tu32 reg = 0;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,\n+\t\t\t\t     struct dlb2_hw_domain *domain,\n+\t\t\t\t     bool toggle_port)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\t/*\n+\t\t * Can't drain a port if it's not configured, and there's\n+\t\t * nothing to drain if its queue is unconfigured.\n+\t\t */\n+\t\tif (!port->port_configured || !port->queue_configured)\n+\t\t\tcontinue;\n+\n+\t\tif (toggle_port)\n+\t\t\tdlb2_dir_port_cq_disable(hw, port);\n+\n+\t\tdlb2_drain_dir_cq(hw, port);\n+\n+\t\tif (toggle_port)\n+\t\t\tdlb2_dir_port_cq_enable(hw, port);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\tstruct dlb2_dir_pq_pair *queue)\n+{\n+\tu32 cnt;\n+\n+\tcnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,\n+\t\t\t\t\t\t      queue->id.phys_id));\n+\n+\treturn DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);\n+}\n+\n+static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,\n+\t\t\t\t    struct dlb2_dir_pq_pair *queue)\n+{\n+\treturn dlb2_dir_queue_depth(hw, queue) == 0;\n+}\n+\n+static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,\n+\t\t\t\t\t struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *queue;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n+\t\tif (!dlb2_dir_queue_is_empty(hw, queue))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tint i;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\tdlb2_domain_drain_dir_cqs(hw, domain, true);\n+\n+\t\tif (dlb2_domain_dir_queues_empty(hw, domain))\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: failed to empty queues\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/*\n+\t * Drain the CQs one more time. For the queues to go empty, they would\n+\t * have scheduled one or more QEs.\n+\t */\n+\tdlb2_domain_drain_dir_cqs(hw, domain, true);\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,\n+\t\t\t\t    struct dlb2_ldb_port *port)\n+{\n+\tu32 reg = 0;\n+\n+\t/*\n+\t * Don't re-enable the port if a removal is pending. The caller should\n+\t * mark this port as enabled (if it isn't already), and when the\n+\t * removal completes the port will be enabled.\n+\t */\n+\tif (port->num_pending_removals)\n+\t\treturn;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,\n+\t\t\t\t     struct dlb2_ldb_port *port)\n+{\n+\tu32 reg = 0;\n+\n+\tDLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,\n+\t\t\t\t      struct dlb2_ldb_port *port)\n+{\n+\tu32 cnt;\n+\n+\tcnt = DLB2_CSR_RD(hw,\n+\t\t\t  DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));\n+\n+\treturn DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);\n+}\n+\n+static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_ldb_port *port)\n+{\n+\tu32 cnt;\n+\n+\tcnt = DLB2_CSR_RD(hw,\n+\t\t\t  DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));\n+\n+\t/*\n+\t * Account for the initial token count, which is used in order to\n+\t * provide a CQ with depth less than 8.\n+\t */\n+\n+\treturn DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -\n+\t\tport->init_tkn_cnt;\n+}\n+\n+static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)\n+{\n+\tu32 infl_cnt, tkn_cnt;\n+\tunsigned int i;\n+\n+\tinfl_cnt = dlb2_ldb_cq_inflight_count(hw, port);\n+\ttkn_cnt = dlb2_ldb_cq_token_count(hw, port);\n+\n+\tif (infl_cnt || tkn_cnt) {\n+\t\tstruct dlb2_hcw hcw_mem[8], *hcw;\n+\t\tvoid __iomem *pp_addr;\n+\n+\t\tpp_addr = os_map_producer_port(hw, port->id.phys_id, true);\n+\n+\t\t/* Point hcw to a 64B-aligned location */\n+\t\thcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);\n+\n+\t\t/*\n+\t\t * Program the first HCW for a completion and token return and\n+\t\t * the other HCWs as NOOPS\n+\t\t */\n+\n+\t\tmemset(hcw, 0, 4 * sizeof(*hcw));\n+\t\thcw->qe_comp = (infl_cnt > 0);\n+\t\thcw->cq_token = (tkn_cnt > 0);\n+\t\thcw->lock_id = tkn_cnt - 1;\n+\n+\t\t/* Return tokens in the first HCW */\n+\t\tdlb2_movdir64b(pp_addr, hcw);\n+\n+\t\thcw->cq_token = 0;\n+\n+\t\t/* Issue remaining completions (if any) */\n+\t\tfor (i = 1; i < infl_cnt; i++)\n+\t\t\tdlb2_movdir64b(pp_addr, hcw);\n+\n+\t\tos_fence_hcw(hw, pp_addr);\n+\n+\t\tos_unmap_producer_port(hw, pp_addr);\n+\t}\n+}\n+\n+static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,\n+\t\t\t\t      struct dlb2_hw_domain *domain,\n+\t\t\t\t      bool toggle_port)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn;\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tif (toggle_port)\n+\t\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n+\n+\t\t\tdlb2_drain_ldb_cq(hw, port);\n+\n+\t\t\tif (toggle_port)\n+\t\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\t\t}\n+\t}\n+}\n+\n+static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\tstruct dlb2_ldb_queue *queue)\n+{\n+\tu32 aqed, ldb, atm;\n+\n+\taqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,\n+\t\t\t\t\t\t       queue->id.phys_id));\n+\tldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,\n+\t\t\t\t\t\t      queue->id.phys_id));\n+\tatm = DLB2_CSR_RD(hw,\n+\t\t\t  DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));\n+\n+\treturn DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)\n+\t       + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)\n+\t       + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);\n+}\n+\n+static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,\n+\t\t\t\t    struct dlb2_ldb_queue *queue)\n+{\n+\treturn dlb2_ldb_queue_depth(hw, queue) == 0;\n+}\n+\n+static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,\n+\t\t\t\t\t    struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_queue *queue;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (queue->num_mappings == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (!dlb2_ldb_queue_is_empty(hw, queue))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,\n+\t\t\t\t\t   struct dlb2_hw_domain *domain)\n+{\n+\tint i;\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\tif (domain->num_pending_removals > 0) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: failed to unmap domain queues\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tfor (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {\n+\t\tdlb2_domain_drain_ldb_cqs(hw, domain, true);\n+\n+\t\tif (dlb2_domain_mapped_queues_empty(hw, domain))\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: failed to empty queues\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/*\n+\t * Drain the CQs one more time. For the queues to go empty, they would\n+\t * have scheduled one or more QEs.\n+\t */\n+\tdlb2_domain_drain_ldb_cqs(hw, domain, true);\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tport->enabled = true;\n+\n+\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\t\t}\n+\t}\n+}\n+\n+static struct dlb2_ldb_queue *\n+dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,\n+\t\t\t   u32 id,\n+\t\t\t   bool vdev_req,\n+\t\t\t   unsigned int vdev_id)\n+{\n+\tstruct dlb2_list_entry *iter1;\n+\tstruct dlb2_list_entry *iter2;\n+\tstruct dlb2_function_resources *rsrcs;\n+\tstruct dlb2_hw_domain *domain;\n+\tstruct dlb2_ldb_queue *queue;\n+\tRTE_SET_USED(iter1);\n+\tRTE_SET_USED(iter2);\n+\n+\tif (id >= DLB2_MAX_NUM_LDB_QUEUES)\n+\t\treturn NULL;\n+\n+\trsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;\n+\n+\tif (!vdev_req)\n+\t\treturn &hw->rsrcs.ldb_queues[id];\n+\n+\tDLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {\n+\t\t\tif (queue->id.virt_id == id)\n+\t\t\t\treturn queue;\n+\t\t}\n+\t}\n+\n+\tDLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {\n+\t\tif (queue->id.virt_id == id)\n+\t\t\treturn queue;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,\n+\t\t\t\t\t\t      u32 id,\n+\t\t\t\t\t\t      bool vdev_req,\n+\t\t\t\t\t\t      unsigned int vdev_id)\n+{\n+\tstruct dlb2_list_entry *iteration;\n+\tstruct dlb2_function_resources *rsrcs;\n+\tstruct dlb2_hw_domain *domain;\n+\tRTE_SET_USED(iteration);\n+\n+\tif (id >= DLB2_MAX_NUM_DOMAINS)\n+\t\treturn NULL;\n+\n+\tif (!vdev_req)\n+\t\treturn &hw->domains[id];\n+\n+\trsrcs = &hw->vdev[vdev_id];\n+\n+\tDLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {\n+\t\tif (domain->id.virt_id == id)\n+\t\t\treturn domain;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,\n+\t\t\t\t\t   struct dlb2_ldb_port *port,\n+\t\t\t\t\t   struct dlb2_ldb_queue *queue,\n+\t\t\t\t\t   int slot,\n+\t\t\t\t\t   enum dlb2_qid_map_state new_state)\n+{\n+\tenum dlb2_qid_map_state curr_state = port->qid_map[slot].state;\n+\tstruct dlb2_hw_domain *domain;\n+\tint domain_id;\n+\n+\tdomain_id = port->domain_id.phys_id;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, false, 0);\n+\tif (domain == NULL) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: unable to find domain %d\\n\",\n+\t\t\t    __func__, domain_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tswitch (curr_state) {\n+\tcase DLB2_QUEUE_UNMAPPED:\n+\t\tswitch (new_state) {\n+\t\tcase DLB2_QUEUE_MAPPED:\n+\t\t\tqueue->num_mappings++;\n+\t\t\tport->num_mappings++;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_MAP_IN_PROG:\n+\t\t\tqueue->num_pending_additions++;\n+\t\t\tdomain->num_pending_additions++;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB2_QUEUE_MAPPED:\n+\t\tswitch (new_state) {\n+\t\tcase DLB2_QUEUE_UNMAPPED:\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_UNMAP_IN_PROG:\n+\t\t\tport->num_pending_removals++;\n+\t\t\tdomain->num_pending_removals++;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_MAPPED:\n+\t\t\t/* Priority change, nothing to update */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB2_QUEUE_MAP_IN_PROG:\n+\t\tswitch (new_state) {\n+\t\tcase DLB2_QUEUE_UNMAPPED:\n+\t\t\tqueue->num_pending_additions--;\n+\t\t\tdomain->num_pending_additions--;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_MAPPED:\n+\t\t\tqueue->num_mappings++;\n+\t\t\tport->num_mappings++;\n+\t\t\tqueue->num_pending_additions--;\n+\t\t\tdomain->num_pending_additions--;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB2_QUEUE_UNMAP_IN_PROG:\n+\t\tswitch (new_state) {\n+\t\tcase DLB2_QUEUE_UNMAPPED:\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_MAPPED:\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:\n+\t\t\t/* Nothing to update */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tcase DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:\n+\t\tswitch (new_state) {\n+\t\tcase DLB2_QUEUE_UNMAP_IN_PROG:\n+\t\t\t/* Nothing to update */\n+\t\t\tbreak;\n+\t\tcase DLB2_QUEUE_UNMAPPED:\n+\t\t\t/*\n+\t\t\t * An UNMAP_IN_PROG_PENDING_MAP slot briefly\n+\t\t\t * becomes UNMAPPED before it transitions to\n+\t\t\t * MAP_IN_PROG.\n+\t\t\t */\n+\t\t\tqueue->num_mappings--;\n+\t\t\tport->num_mappings--;\n+\t\t\tport->num_pending_removals--;\n+\t\t\tdomain->num_pending_removals--;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tgoto error;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tgoto error;\n+\t}\n+\n+\tport->qid_map[slot].state = new_state;\n+\n+\tDLB2_HW_DBG(hw,\n+\t\t    \"[%s()] queue %d -> port %d state transition (%d -> %d)\\n\",\n+\t\t    __func__, queue->id.phys_id, port->id.phys_id,\n+\t\t    curr_state, new_state);\n+\treturn 0;\n+\n+error:\n+\tDLB2_HW_ERR(hw,\n+\t\t    \"[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\\n\",\n+\t\t    __func__, queue->id.phys_id, port->id.phys_id,\n+\t\t    curr_state, new_state);\n+\treturn -EFAULT;\n+}\n+\n+static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,\n+\t\t\t\tenum dlb2_qid_map_state state,\n+\t\t\t\tint *slot)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tif (port->qid_map[i].state == state)\n+\t\t\tbreak;\n+\t}\n+\n+\t*slot = i;\n+\n+\treturn (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);\n+}\n+\n+static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,\n+\t\t\t\t      enum dlb2_qid_map_state state,\n+\t\t\t\t      struct dlb2_ldb_queue *queue,\n+\t\t\t\t      int *slot)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tif (port->qid_map[i].state == state &&\n+\t\t    port->qid_map[i].qid == queue->id.phys_id)\n+\t\t\tbreak;\n+\t}\n+\n+\t*slot = i;\n+\n+\treturn (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);\n+}\n+\n+/*\n+ * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as\n+ * their function names imply, and should only be called by the dynamic CQ\n+ * mapping code.\n+ */\n+static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,\n+\t\t\t\t\t      struct dlb2_hw_domain *domain,\n+\t\t\t\t\t      struct dlb2_ldb_queue *queue)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint slot, i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tenum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;\n+\n+\t\t\tif (!dlb2_port_find_slot_queue(port, state,\n+\t\t\t\t\t\t       queue, &slot))\n+\t\t\t\tcontinue;\n+\n+\t\t\tif (port->enabled)\n+\t\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n+\t\t}\n+\t}\n+}\n+\n+static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,\n+\t\t\t\t\t     struct dlb2_hw_domain *domain,\n+\t\t\t\t\t     struct dlb2_ldb_queue *queue)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint slot, i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tenum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;\n+\n+\t\t\tif (!dlb2_port_find_slot_queue(port, state,\n+\t\t\t\t\t\t       queue, &slot))\n+\t\t\t\tcontinue;\n+\n+\t\t\tif (port->enabled)\n+\t\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\t\t}\n+\t}\n+}\n+\n+static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,\n+\t\t\t\t\t\tstruct dlb2_ldb_port *port,\n+\t\t\t\t\t\tint slot)\n+{\n+\tu32 ctrl = 0;\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,\n+\t\t\t\t\t      struct dlb2_ldb_port *port,\n+\t\t\t\t\t      int slot)\n+{\n+\tu32 ctrl = 0;\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_ldb_port *p,\n+\t\t\t\t\tstruct dlb2_ldb_queue *q,\n+\t\t\t\t\tu8 priority)\n+{\n+\tenum dlb2_qid_map_state state;\n+\tu32 lsp_qid2cq2;\n+\tu32 lsp_qid2cq;\n+\tu32 atm_qid2cq;\n+\tu32 cq2priov;\n+\tu32 cq2qid;\n+\tint i;\n+\n+\t/* Look for a pending or already mapped slot, else an unused slot */\n+\tif (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&\n+\t    !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&\n+\t    !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: CQ has no available QID mapping slots\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* Read-modify-write the priority and valid bit register */\n+\tcq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));\n+\n+\tcq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;\n+\tcq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)\n+\t\t    & DLB2_LSP_CQ2PRIOV_PRIO;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);\n+\n+\t/* Read-modify-write the QID map register */\n+\tif (i < 4)\n+\t\tcq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,\n+\t\t\t\t\t\t\t  p->id.phys_id));\n+\telse\n+\t\tcq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,\n+\t\t\t\t\t\t\t  p->id.phys_id));\n+\n+\tif (i == 0 || i == 4)\n+\t\tDLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);\n+\tif (i == 1 || i == 5)\n+\t\tDLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);\n+\tif (i == 2 || i == 6)\n+\t\tDLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);\n+\tif (i == 3 || i == 7)\n+\t\tDLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);\n+\n+\tif (i < 4)\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);\n+\telse\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);\n+\n+\tatm_qid2cq = DLB2_CSR_RD(hw,\n+\t\t\t\t DLB2_ATM_QID2CQIDIX(q->id.phys_id,\n+\t\t\t\t\t\tp->id.phys_id / 4));\n+\n+\tlsp_qid2cq = DLB2_CSR_RD(hw,\n+\t\t\t\t DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,\n+\t\t\t\t\t\tp->id.phys_id / 4));\n+\n+\tlsp_qid2cq2 = DLB2_CSR_RD(hw,\n+\t\t\t\t  DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,\n+\t\t\t\t\t\t  p->id.phys_id / 4));\n+\n+\tswitch (p->id.phys_id % 4) {\n+\tcase 0:\n+\t\tDLB2_BIT_SET(atm_qid2cq,\n+\t\t\t     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq2,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));\n+\t\tbreak;\n+\n+\tcase 1:\n+\t\tDLB2_BIT_SET(atm_qid2cq,\n+\t\t\t     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq2,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));\n+\t\tbreak;\n+\n+\tcase 2:\n+\t\tDLB2_BIT_SET(atm_qid2cq,\n+\t\t\t     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq2,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));\n+\t\tbreak;\n+\n+\tcase 3:\n+\t\tDLB2_BIT_SET(atm_qid2cq,\n+\t\t\t     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));\n+\t\tDLB2_BIT_SET(lsp_qid2cq2,\n+\t\t\t     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));\n+\t\tbreak;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),\n+\t\t    atm_qid2cq);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID2CQIDIX(hw->ver,\n+\t\t\t\t\tq->id.phys_id, p->id.phys_id / 4),\n+\t\t    lsp_qid2cq);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID2CQIDIX2(hw->ver,\n+\t\t\t\t\t q->id.phys_id, p->id.phys_id / 4),\n+\t\t    lsp_qid2cq2);\n+\n+\tdlb2_flush_csr(hw);\n+\n+\tp->qid_map[i].qid = q->id.phys_id;\n+\tp->qid_map[i].priority = priority;\n+\n+\tstate = DLB2_QUEUE_MAPPED;\n+\n+\treturn dlb2_port_slot_state_transition(hw, p, q, i, state);\n+}\n+\n+static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,\n+\t\t\t\t\t   struct dlb2_ldb_port *port,\n+\t\t\t\t\t   struct dlb2_ldb_queue *queue,\n+\t\t\t\t\t   int slot)\n+{\n+\tu32 ctrl = 0;\n+\tu32 active;\n+\tu32 enq;\n+\n+\t/* Set the atomic scheduling haswork bit */\n+\tactive = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,\n+\t\t\t\t\t\t\t queue->id.phys_id));\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);\n+\tDLB2_BITS_SET(ctrl,\n+\t\t      DLB2_BITS_GET(active,\n+\t\t\t\t    DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,\n+\t\t\t\t    DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);\n+\n+\t/* Set the non-atomic scheduling haswork bit */\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tenq = DLB2_CSR_RD(hw,\n+\t\t\t  DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,\n+\t\t\t\t\t\t       queue->id.phys_id));\n+\n+\tmemset(&ctrl, 0, sizeof(ctrl));\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);\n+\tDLB2_BITS_SET(ctrl,\n+\t\t      DLB2_BITS_GET(enq,\n+\t\t\t\t    DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,\n+\t\t      DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tdlb2_flush_csr(hw);\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,\n+\t\t\t\t\t      struct dlb2_ldb_port *port,\n+\t\t\t\t\t      u8 slot)\n+{\n+\tu32 ctrl = 0;\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tmemset(&ctrl, 0, sizeof(ctrl));\n+\n+\tDLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);\n+\tDLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);\n+\tDLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);\n+\n+\tdlb2_flush_csr(hw);\n+}\n+\n+\n+static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,\n+\t\t\t\t\t      struct dlb2_ldb_queue *queue)\n+{\n+\tu32 infl_lim = 0;\n+\n+\tDLB2_BITS_SET(infl_lim, queue->num_qid_inflights,\n+\t\t DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),\n+\t\t    infl_lim);\n+}\n+\n+static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,\n+\t\t\t\t\t\tstruct dlb2_ldb_queue *queue)\n+{\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),\n+\t\t    DLB2_LSP_QID_LDB_INFL_LIM_RST);\n+}\n+\n+static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,\n+\t\t\t\t\t\tstruct dlb2_hw_domain *domain,\n+\t\t\t\t\t\tstruct dlb2_ldb_port *port,\n+\t\t\t\t\t\tstruct dlb2_ldb_queue *queue)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tenum dlb2_qid_map_state state;\n+\tint slot, ret, i;\n+\tu32 infl_cnt;\n+\tu8 prio;\n+\tRTE_SET_USED(iter);\n+\n+\tinfl_cnt = DLB2_CSR_RD(hw,\n+\t\t\t       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,\n+\t\t\t\t\t\t    queue->id.phys_id));\n+\n+\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: non-zero QID inflight count\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * Static map the port and set its corresponding has_work bits.\n+\t */\n+\tstate = DLB2_QUEUE_MAP_IN_PROG;\n+\tif (!dlb2_port_find_slot_queue(port, state, queue, &slot))\n+\t\treturn -EINVAL;\n+\n+\tprio = port->qid_map[slot].priority;\n+\n+\t/*\n+\t * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and\n+\t * the port's qid_map state.\n+\t */\n+\tret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Ensure IF_status(cq,qid) is 0 before enabling the port to\n+\t * prevent spurious schedules to cause the queue's inflight\n+\t * count to increase.\n+\t */\n+\tdlb2_ldb_port_clear_queue_if_status(hw, port, slot);\n+\n+\t/* Reset the queue's inflight status */\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tstate = DLB2_QUEUE_MAPPED;\n+\t\t\tif (!dlb2_port_find_slot_queue(port, state,\n+\t\t\t\t\t\t       queue, &slot))\n+\t\t\t\tcontinue;\n+\n+\t\t\tdlb2_ldb_port_set_queue_if_status(hw, port, slot);\n+\t\t}\n+\t}\n+\n+\tdlb2_ldb_queue_set_inflight_limit(hw, queue);\n+\n+\t/* Re-enable CQs mapped to this queue */\n+\tdlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t/* If this queue has other mappings pending, clear its inflight limit */\n+\tif (queue->num_pending_additions > 0)\n+\t\tdlb2_ldb_queue_clear_inflight_limit(hw, queue);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * dlb2_ldb_port_map_qid_dynamic() - perform a \"dynamic\" QID->CQ mapping\n+ * @hw: dlb2_hw handle for a particular device.\n+ * @port: load-balanced port\n+ * @queue: load-balanced queue\n+ * @priority: queue servicing priority\n+ *\n+ * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur\n+ * at a later point, and <0 if an error occurred.\n+ */\n+static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,\n+\t\t\t\t\t struct dlb2_ldb_port *port,\n+\t\t\t\t\t struct dlb2_ldb_queue *queue,\n+\t\t\t\t\t u8 priority)\n+{\n+\tenum dlb2_qid_map_state state;\n+\tstruct dlb2_hw_domain *domain;\n+\tint domain_id, slot, ret;\n+\tu32 infl_cnt;\n+\n+\tdomain_id = port->domain_id.phys_id;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, false, 0);\n+\tif (domain == NULL) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: unable to find domain %d\\n\",\n+\t\t\t    __func__, port->domain_id.phys_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * Set the QID inflight limit to 0 to prevent further scheduling of the\n+\t * queue.\n+\t */\n+\tDLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,\n+\t\t\t\t\t\t  queue->id.phys_id), 0);\n+\n+\tif (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"Internal error: No available unmapped slots\\n\");\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tport->qid_map[slot].qid = queue->id.phys_id;\n+\tport->qid_map[slot].priority = priority;\n+\n+\tstate = DLB2_QUEUE_MAP_IN_PROG;\n+\tret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tinfl_cnt = DLB2_CSR_RD(hw,\n+\t\t\t       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,\n+\t\t\t\t\t\t    queue->id.phys_id));\n+\n+\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {\n+\t\t/*\n+\t\t * The queue is owed completions so it's not safe to map it\n+\t\t * yet. Schedule a kernel thread to complete the mapping later,\n+\t\t * once software has completed all the queue's inflight events.\n+\t\t */\n+\t\tif (!os_worker_active(hw))\n+\t\t\tos_schedule_work(hw);\n+\n+\t\treturn 1;\n+\t}\n+\n+\t/*\n+\t * Disable the affected CQ, and the CQs already mapped to the QID,\n+\t * before reading the QID's inflight count a second time. There is an\n+\t * unlikely race in which the QID may schedule one more QE after we\n+\t * read an inflight count of 0, and disabling the CQs guarantees that\n+\t * the race will not occur after a re-read of the inflight count\n+\t * register.\n+\t */\n+\tif (port->enabled)\n+\t\tdlb2_ldb_port_cq_disable(hw, port);\n+\n+\tdlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);\n+\n+\tinfl_cnt = DLB2_CSR_RD(hw,\n+\t\t\t       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,\n+\t\t\t\t\t\t    queue->id.phys_id));\n+\n+\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {\n+\t\tif (port->enabled)\n+\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\n+\t\tdlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t\t/*\n+\t\t * The queue is owed completions so it's not safe to map it\n+\t\t * yet. Schedule a kernel thread to complete the mapping later,\n+\t\t * once software has completed all the queue's inflight events.\n+\t\t */\n+\t\tif (!os_worker_active(hw))\n+\t\t\tos_schedule_work(hw);\n+\n+\t\treturn 1;\n+\t}\n+\n+\treturn dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);\n+}\n+\n+static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain,\n+\t\t\t\t\tstruct dlb2_ldb_port *port)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tu32 infl_cnt;\n+\t\tstruct dlb2_ldb_queue *queue;\n+\t\tint qid;\n+\n+\t\tif (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)\n+\t\t\tcontinue;\n+\n+\t\tqid = port->qid_map[i].qid;\n+\n+\t\tqueue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);\n+\n+\t\tif (queue == NULL) {\n+\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t    \"[%s()] Internal error: unable to find queue %d\\n\",\n+\t\t\t\t    __func__, qid);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tinfl_cnt = DLB2_CSR_RD(hw,\n+\t\t\t\t       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));\n+\n+\t\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))\n+\t\t\tcontinue;\n+\n+\t\t/*\n+\t\t * Disable the affected CQ, and the CQs already mapped to the\n+\t\t * QID, before reading the QID's inflight count a second time.\n+\t\t * There is an unlikely race in which the QID may schedule one\n+\t\t * more QE after we read an inflight count of 0, and disabling\n+\t\t * the CQs guarantees that the race will not occur after a\n+\t\t * re-read of the inflight count register.\n+\t\t */\n+\t\tif (port->enabled)\n+\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n+\n+\t\tdlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);\n+\n+\t\tinfl_cnt = DLB2_CSR_RD(hw,\n+\t\t\t\t       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));\n+\n+\t\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {\n+\t\t\tif (port->enabled)\n+\t\t\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\n+\t\t\tdlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);\n+\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tdlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);\n+\t}\n+}\n+\n+static unsigned int\n+dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,\n+\t\t\t\t      struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tif (!domain->configured || domain->num_pending_additions == 0)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n+\t\t\tdlb2_domain_finish_map_port(hw, domain, port);\n+\t}\n+\n+\treturn domain->num_pending_additions;\n+}\n+\n+static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_ldb_port *port,\n+\t\t\t\t   struct dlb2_ldb_queue *queue)\n+{\n+\tenum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;\n+\tu32 lsp_qid2cq2;\n+\tu32 lsp_qid2cq;\n+\tu32 atm_qid2cq;\n+\tu32 cq2priov;\n+\tu32 queue_id;\n+\tu32 port_id;\n+\tint i;\n+\n+\t/* Find the queue's slot */\n+\tmapped = DLB2_QUEUE_MAPPED;\n+\tin_progress = DLB2_QUEUE_UNMAP_IN_PROG;\n+\tpending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;\n+\n+\tif (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&\n+\t    !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&\n+\t    !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: QID %d isn't mapped\\n\",\n+\t\t\t    __func__, __LINE__, queue->id.phys_id);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tport_id = port->id.phys_id;\n+\tqueue_id = queue->id.phys_id;\n+\n+\t/* Read-modify-write the priority and valid bit register */\n+\tcq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));\n+\n+\tcq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);\n+\n+\tatm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,\n+\t\t\t\t\t\t\t port_id / 4));\n+\n+\tlsp_qid2cq = DLB2_CSR_RD(hw,\n+\t\t\t\t DLB2_LSP_QID2CQIDIX(hw->ver,\n+\t\t\t\t\t\tqueue_id, port_id / 4));\n+\n+\tlsp_qid2cq2 = DLB2_CSR_RD(hw,\n+\t\t\t\t  DLB2_LSP_QID2CQIDIX2(hw->ver,\n+\t\t\t\t\t\t  queue_id, port_id / 4));\n+\n+\tswitch (port_id % 4) {\n+\tcase 0:\n+\t\tatm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));\n+\t\tlsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));\n+\t\tlsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));\n+\t\tbreak;\n+\n+\tcase 1:\n+\t\tatm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));\n+\t\tlsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));\n+\t\tlsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));\n+\t\tbreak;\n+\n+\tcase 2:\n+\t\tatm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));\n+\t\tlsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));\n+\t\tlsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));\n+\t\tbreak;\n+\n+\tcase 3:\n+\t\tatm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));\n+\t\tlsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));\n+\t\tlsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));\n+\t\tbreak;\n+\t}\n+\n+\tDLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),\n+\t\t    lsp_qid2cq);\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),\n+\t\t    lsp_qid2cq2);\n+\n+\tdlb2_flush_csr(hw);\n+\n+\tunmapped = DLB2_QUEUE_UNMAPPED;\n+\n+\treturn dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);\n+}\n+\n+static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,\n+\t\t\t\t struct dlb2_hw_domain *domain,\n+\t\t\t\t struct dlb2_ldb_port *port,\n+\t\t\t\t struct dlb2_ldb_queue *queue,\n+\t\t\t\t u8 prio)\n+{\n+\tif (domain->started)\n+\t\treturn dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);\n+\telse\n+\t\treturn dlb2_ldb_port_map_qid_static(hw, port, queue, prio);\n+}\n+\n+static void\n+dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,\n+\t\t\t\t   struct dlb2_hw_domain *domain,\n+\t\t\t\t   struct dlb2_ldb_port *port,\n+\t\t\t\t   int slot)\n+{\n+\tenum dlb2_qid_map_state state;\n+\tstruct dlb2_ldb_queue *queue;\n+\n+\tqueue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];\n+\n+\tstate = port->qid_map[slot].state;\n+\n+\t/* Update the QID2CQIDX and CQ2QID vectors */\n+\tdlb2_ldb_port_unmap_qid(hw, port, queue);\n+\n+\t/*\n+\t * Ensure the QID will not be serviced by this {CQ, slot} by clearing\n+\t * the has_work bits\n+\t */\n+\tdlb2_ldb_port_clear_has_work_bits(hw, port, slot);\n+\n+\t/* Reset the {CQ, slot} to its default state */\n+\tdlb2_ldb_port_set_queue_if_status(hw, port, slot);\n+\n+\t/* Re-enable the CQ if it was not manually disabled by the user */\n+\tif (port->enabled)\n+\t\tdlb2_ldb_port_cq_enable(hw, port);\n+\n+\t/*\n+\t * If there is a mapping that is pending this slot's removal, perform\n+\t * the mapping now.\n+\t */\n+\tif (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {\n+\t\tstruct dlb2_ldb_port_qid_map *map;\n+\t\tstruct dlb2_ldb_queue *map_queue;\n+\t\tu8 prio;\n+\n+\t\tmap = &port->qid_map[slot];\n+\n+\t\tmap->qid = map->pending_qid;\n+\t\tmap->priority = map->pending_priority;\n+\n+\t\tmap_queue = &hw->rsrcs.ldb_queues[map->qid];\n+\t\tprio = map->priority;\n+\n+\t\tdlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);\n+\t}\n+}\n+\n+\n+static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,\n+\t\t\t\t\t  struct dlb2_hw_domain *domain,\n+\t\t\t\t\t  struct dlb2_ldb_port *port)\n+{\n+\tu32 infl_cnt;\n+\tint i;\n+\n+\tif (port->num_pending_removals == 0)\n+\t\treturn false;\n+\n+\t/*\n+\t * The unmap requires all the CQ's outstanding inflights to be\n+\t * completed.\n+\t */\n+\tinfl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,\n+\t\t\t\t\t\t       port->id.phys_id));\n+\tif (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)\n+\t\treturn false;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {\n+\t\tstruct dlb2_ldb_port_qid_map *map;\n+\n+\t\tmap = &port->qid_map[i];\n+\n+\t\tif (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&\n+\t\t    map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)\n+\t\t\tcontinue;\n+\n+\t\tdlb2_domain_finish_unmap_port_slot(hw, domain, port, i);\n+\t}\n+\n+\treturn true;\n+}\n+\n+static unsigned int\n+dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tif (!domain->configured || domain->num_pending_removals == 0)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n+\t\t\tdlb2_domain_finish_unmap_port(hw, domain, port);\n+\t}\n+\n+\treturn domain->num_pending_removals;\n+}\n+\n+static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tport->enabled = false;\n+\n+\t\t\tdlb2_ldb_port_cq_disable(hw, port);\n+\t\t}\n+\t}\n+}\n+\n+\n+static void dlb2_log_reset_domain(struct dlb2_hw *hw,\n+\t\t\t\t  u32 domain_id,\n+\t\t\t\t  bool vdev_req,\n+\t\t\t\t  unsigned int vdev_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB2 reset domain:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from vdev %d)\\n\", vdev_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+}\n+\n+static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,\n+\t\t\t\t\t struct dlb2_hw_domain *domain,\n+\t\t\t\t\t unsigned int vdev_id)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tu32 vpp_v = 0;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tunsigned int offs;\n+\t\tu32 virt_id;\n+\n+\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\tvirt_id = port->id.virt_id;\n+\t\telse\n+\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\toffs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);\n+\t}\n+}\n+\n+static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,\n+\t\t\t\t\t struct dlb2_hw_domain *domain,\n+\t\t\t\t\t unsigned int vdev_id)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tu32 vpp_v = 0;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tunsigned int offs;\n+\t\t\tu32 virt_id;\n+\n+\t\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\t\tvirt_id = port->id.virt_id;\n+\t\t\telse\n+\t\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\t\toffs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;\n+\n+\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);\n+\t\t}\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tu32 int_en = 0;\n+\tu32 wd_en = 0;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,\n+\t\t\t\t\t\t       port->id.phys_id),\n+\t\t\t\t    int_en);\n+\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,\n+\t\t\t\t\t\t      port->id.phys_id),\n+\t\t\t\t    wd_en);\n+\t\t}\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tu32 int_en = 0;\n+\tu32 wd_en = 0;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),\n+\t\t\t    int_en);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),\n+\t\t\t    wd_en);\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,\n+\t\t\t\t\t  struct dlb2_hw_domain *domain)\n+{\n+\tint domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_queue *queue;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tint idx = domain_offset + queue->id.phys_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);\n+\n+\t\tif (queue->id.vdev_owned) {\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),\n+\t\t\t\t    0);\n+\n+\t\t\tidx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +\n+\t\t\t\tqueue->id.virt_id;\n+\n+\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);\n+\n+\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);\n+\t\t}\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,\n+\t\t\t\t\t  struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *queue;\n+\tunsigned long max_ports;\n+\tint domain_offset;\n+\tRTE_SET_USED(iter);\n+\n+\tmax_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);\n+\n+\tdomain_offset = domain->id.phys_id * max_ports;\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n+\t\tint idx = domain_offset + queue->id.phys_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);\n+\n+\t\tif (queue->id.vdev_owned) {\n+\t\t\tidx = queue->id.vdev_id * max_ports + queue->id.virt_id;\n+\n+\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);\n+\n+\t\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);\n+\t\t}\n+\t}\n+}\n+\n+static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,\n+\t\t\t\t\t       struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tu32 chk_en = 0;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_CHP_SN_CHK_ENBL(hw->ver,\n+\t\t\t\t\t\t\t port->id.phys_id),\n+\t\t\t\t    chk_en);\n+\t\t}\n+\t}\n+}\n+\n+static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,\n+\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tint j;\n+\n+\t\t\tfor (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {\n+\t\t\t\tif (dlb2_ldb_cq_inflight_count(hw, port) == 0)\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tif (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {\n+\t\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t\t    \"[%s()] Internal error: failed to flush load-balanced port %d's completions.\\n\",\n+\t\t\t\t\t    __func__, port->id.phys_id);\n+\t\t\t\treturn -EFAULT;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tport->enabled = false;\n+\n+\t\tdlb2_dir_port_cq_disable(hw, port);\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tu32 pp_v = 0;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_DIR_PP_V(port->id.phys_id),\n+\t\t\t    pp_v);\n+\t}\n+}\n+\n+static void\n+dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tu32 pp_v = 0;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_SYS_LDB_PP_V(port->id.phys_id),\n+\t\t\t\t    pp_v);\n+\t\t}\n+\t}\n+}\n+\n+static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,\n+\t\t\t\t\t    struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *dir_port;\n+\tstruct dlb2_ldb_port *ldb_port;\n+\tstruct dlb2_ldb_queue *queue;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\t/*\n+\t * Confirm that all the domain's queue's inflight counts and AQED\n+\t * active counts are 0.\n+\t */\n+\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (!dlb2_ldb_queue_is_empty(hw, queue)) {\n+\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t    \"[%s()] Internal error: failed to empty ldb queue %d\\n\",\n+\t\t\t\t    __func__, queue->id.phys_id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\t/* Confirm that all the domain's CQs inflight and token counts are 0. */\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {\n+\t\t\tif (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||\n+\t\t\t    dlb2_ldb_cq_token_count(hw, ldb_port)) {\n+\t\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t\t    \"[%s()] Internal error: failed to empty ldb port %d\\n\",\n+\t\t\t\t\t    __func__, ldb_port->id.phys_id);\n+\t\t\t\treturn -EFAULT;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {\n+\t\tif (!dlb2_dir_queue_is_empty(hw, dir_port)) {\n+\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t    \"[%s()] Internal error: failed to empty dir queue %d\\n\",\n+\t\t\t\t    __func__, dir_port->id.phys_id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\n+\t\tif (dlb2_dir_cq_token_count(hw, dir_port)) {\n+\t\t\tDLB2_HW_ERR(hw,\n+\t\t\t\t    \"[%s()] Internal error: failed to empty dir port %d\\n\",\n+\t\t\t\t    __func__, dir_port->id.phys_id);\n+\t\t\treturn -EFAULT;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,\n+\t\t\t\t\t\t   struct dlb2_ldb_port *port)\n+{\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_PP2VAS_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ2VAS_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_PP2VDEV_RST);\n+\n+\tif (port->id.vdev_owned) {\n+\t\tunsigned int offs;\n+\t\tu32 virt_id;\n+\n+\t\t/*\n+\t\t * DLB uses producer port address bits 17:12 to determine the\n+\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n+\t\t * through the PF MMIO window for the physical producer port,\n+\t\t * so for translation purposes the virtual and physical port\n+\t\t * IDs are equal.\n+\t\t */\n+\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\tvirt_id = port->id.virt_id;\n+\t\telse\n+\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\toffs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_VF_LDB_VPP2PP(offs),\n+\t\t\t    DLB2_SYS_VF_LDB_VPP2PP_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_VF_LDB_VPP_V(offs),\n+\t\t\t    DLB2_SYS_VF_LDB_VPP_V_RST);\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_PP_V(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_PP_V_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_DSBL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_DEPTH_RST);\n+\n+\tif (hw->ver != DLB2_HW_V2)\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),\n+\t\t\t    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_INFL_LIM_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_HIST_LIST_LIM_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_HIST_LIST_BASE_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_HIST_LIST_POP_PTR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_INT_ENB_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_CQ_ISR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_LDB_CQ_WPTR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_TKN_CNT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_CQ_ADDR_L_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_CQ_ADDR_U_RST);\n+\n+\tif (hw->ver == DLB2_HW_V2)\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),\n+\t\t\t    DLB2_SYS_LDB_CQ_AT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_CQ_PASID_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),\n+\t\t    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ2QID0_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ2QID1_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ2PRIOV_RST);\n+}\n+\n+static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,\n+\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_port *port;\n+\tint i;\n+\tRTE_SET_USED(iter);\n+\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tDLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)\n+\t\t\t__dlb2_domain_reset_ldb_port_registers(hw, port);\n+\t}\n+}\n+\n+static void\n+__dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,\n+\t\t\t\t       struct dlb2_dir_pq_pair *port)\n+{\n+\tu32 reg = 0;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ2VAS_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_DIR_DSBL_RST);\n+\n+\tDLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);\n+\n+\tif (hw->ver == DLB2_HW_V2)\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);\n+\telse\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_DEPTH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_INT_ENB_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_ISR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,\n+\t\t\t\t\t\t      port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ_WPTR_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_DIR_TKN_CNT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_ADDR_L_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_ADDR_U_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_AT_RST);\n+\n+\tif (hw->ver == DLB2_HW_V2)\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),\n+\t\t\t    DLB2_SYS_DIR_CQ_AT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_PASID_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ_FMT_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),\n+\t\t    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_PP2VAS_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),\n+\t\t    DLB2_CHP_DIR_CQ2VAS_RST);\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_PP2VDEV_RST);\n+\n+\tif (port->id.vdev_owned) {\n+\t\tunsigned int offs;\n+\t\tu32 virt_id;\n+\n+\t\t/*\n+\t\t * DLB uses producer port address bits 17:12 to determine the\n+\t\t * producer port ID. In Scalable IOV mode, PP accesses come\n+\t\t * through the PF MMIO window for the physical producer port,\n+\t\t * so for translation purposes the virtual and physical port\n+\t\t * IDs are equal.\n+\t\t */\n+\t\tif (hw->virt_mode == DLB2_VIRT_SRIOV)\n+\t\t\tvirt_id = port->id.virt_id;\n+\t\telse\n+\t\t\tvirt_id = port->id.phys_id;\n+\n+\t\toffs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +\n+\t\t\tvirt_id;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_VF_DIR_VPP2PP(offs),\n+\t\t\t    DLB2_SYS_VF_DIR_VPP2PP_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_VF_DIR_VPP_V(offs),\n+\t\t\t    DLB2_SYS_VF_DIR_VPP_V_RST);\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_DIR_PP_V(port->id.phys_id),\n+\t\t    DLB2_SYS_DIR_PP_V_RST);\n+}\n+\n+static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,\n+\t\t\t\t\t\t struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *port;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)\n+\t\t__dlb2_domain_reset_dir_port_registers(hw, port);\n+}\n+\n+static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,\n+\t\t\t\t\t\t  struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_queue *queue;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tunsigned int queue_id = queue->id.phys_id;\n+\t\tint i;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_LDB_INFL_LIM_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),\n+\t\t\t    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_QID_ITS(queue_id),\n+\t\t\t    DLB2_SYS_LDB_QID_ITS_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),\n+\t\t\t    DLB2_CHP_ORD_QID_SN_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),\n+\t\t\t    DLB2_CHP_ORD_QID_SN_MAP_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_QID_V(queue_id),\n+\t\t\t    DLB2_SYS_LDB_QID_V_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_QID_CFG_V(queue_id),\n+\t\t\t    DLB2_SYS_LDB_QID_CFG_V_RST);\n+\n+\t\tif (queue->sn_cfg_valid) {\n+\t\t\tu32 offs[2];\n+\n+\t\t\toffs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,\n+\t\t\t\t\t\t\t queue->sn_slot);\n+\t\t\toffs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,\n+\t\t\t\t\t\t\t queue->sn_slot);\n+\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    offs[queue->sn_group],\n+\t\t\t\t    DLB2_RO_GRP_0_SLT_SHFT_RST);\n+\t\t}\n+\n+\t\tfor (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),\n+\t\t\t\t    DLB2_LSP_QID2CQIDIX_00_RST);\n+\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),\n+\t\t\t\t    DLB2_LSP_QID2CQIDIX2_00_RST);\n+\n+\t\t\tDLB2_CSR_WR(hw,\n+\t\t\t\t    DLB2_ATM_QID2CQIDIX(queue_id, i),\n+\t\t\t\t    DLB2_ATM_QID2CQIDIX_00_RST);\n+\t\t}\n+\t}\n+}\n+\n+static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,\n+\t\t\t\t\t\t  struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_dir_pq_pair *queue;\n+\tRTE_SET_USED(iter);\n+\n+\tDLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,\n+\t\t\t\t\t\t       queue->id.phys_id),\n+\t\t\t    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,\n+\t\t\t\t\t\t\t  queue->id.phys_id),\n+\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,\n+\t\t\t\t\t\t\t  queue->id.phys_id),\n+\t\t\t    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,\n+\t\t\t\t\t\t\t queue->id.phys_id),\n+\t\t\t    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),\n+\t\t\t    DLB2_SYS_DIR_QID_ITS_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_DIR_QID_V(queue->id.phys_id),\n+\t\t\t    DLB2_SYS_DIR_QID_V_RST);\n+\t}\n+}\n+\n+\n+\n+\n+\n+static void dlb2_domain_reset_registers(struct dlb2_hw *hw,\n+\t\t\t\t\tstruct dlb2_hw_domain *domain)\n+{\n+\tdlb2_domain_reset_ldb_port_registers(hw, domain);\n+\n+\tdlb2_domain_reset_dir_port_registers(hw, domain);\n+\n+\tdlb2_domain_reset_ldb_queue_registers(hw, domain);\n+\n+\tdlb2_domain_reset_dir_queue_registers(hw, domain);\n+\n+\tif (hw->ver == DLB2_HW_V2) {\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),\n+\t\t\t    DLB2_CHP_CFG_LDB_VAS_CRD_RST);\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),\n+\t\t\t    DLB2_CHP_CFG_DIR_VAS_CRD_RST);\n+\t} else\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),\n+\t\t\t    DLB2_CHP_CFG_VAS_CRD_RST);\n+}\n+\n+static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,\n+\t\t\t\t\t    struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_dir_pq_pair *tmp_dir_port;\n+\tstruct dlb2_ldb_queue *tmp_ldb_queue;\n+\tstruct dlb2_ldb_port *tmp_ldb_port;\n+\tstruct dlb2_list_entry *iter1;\n+\tstruct dlb2_list_entry *iter2;\n+\tstruct dlb2_function_resources *rsrcs;\n+\tstruct dlb2_dir_pq_pair *dir_port;\n+\tstruct dlb2_ldb_queue *ldb_queue;\n+\tstruct dlb2_ldb_port *ldb_port;\n+\tstruct dlb2_list_head *list;\n+\tint ret, i;\n+\tRTE_SET_USED(tmp_dir_port);\n+\tRTE_SET_USED(tmp_ldb_queue);\n+\tRTE_SET_USED(tmp_ldb_port);\n+\tRTE_SET_USED(iter1);\n+\tRTE_SET_USED(iter2);\n+\n+\trsrcs = domain->parent_func;\n+\n+\t/* Move the domain's ldb queues to the function's avail list */\n+\tlist = &domain->used_ldb_queues;\n+\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n+\t\tif (ldb_queue->sn_cfg_valid) {\n+\t\t\tstruct dlb2_sn_group *grp;\n+\n+\t\t\tgrp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];\n+\n+\t\t\tdlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);\n+\t\t\tldb_queue->sn_cfg_valid = false;\n+\t\t}\n+\n+\t\tldb_queue->owned = false;\n+\t\tldb_queue->num_mappings = 0;\n+\t\tldb_queue->num_pending_additions = 0;\n+\n+\t\tdlb2_list_del(&domain->used_ldb_queues,\n+\t\t\t      &ldb_queue->domain_list);\n+\t\tdlb2_list_add(&rsrcs->avail_ldb_queues,\n+\t\t\t      &ldb_queue->func_list);\n+\t\trsrcs->num_avail_ldb_queues++;\n+\t}\n+\n+\tlist = &domain->avail_ldb_queues;\n+\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {\n+\t\tldb_queue->owned = false;\n+\n+\t\tdlb2_list_del(&domain->avail_ldb_queues,\n+\t\t\t      &ldb_queue->domain_list);\n+\t\tdlb2_list_add(&rsrcs->avail_ldb_queues,\n+\t\t\t      &ldb_queue->func_list);\n+\t\trsrcs->num_avail_ldb_queues++;\n+\t}\n+\n+\t/* Move the domain's ldb ports to the function's avail list */\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tlist = &domain->used_ldb_ports[i];\n+\t\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,\n+\t\t\t\t       iter1, iter2) {\n+\t\t\tint j;\n+\n+\t\t\tldb_port->owned = false;\n+\t\t\tldb_port->configured = false;\n+\t\t\tldb_port->num_pending_removals = 0;\n+\t\t\tldb_port->num_mappings = 0;\n+\t\t\tldb_port->init_tkn_cnt = 0;\n+\t\t\tldb_port->cq_depth = 0;\n+\t\t\tfor (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)\n+\t\t\t\tldb_port->qid_map[j].state =\n+\t\t\t\t\tDLB2_QUEUE_UNMAPPED;\n+\n+\t\t\tdlb2_list_del(&domain->used_ldb_ports[i],\n+\t\t\t\t      &ldb_port->domain_list);\n+\t\t\tdlb2_list_add(&rsrcs->avail_ldb_ports[i],\n+\t\t\t\t      &ldb_port->func_list);\n+\t\t\trsrcs->num_avail_ldb_ports[i]++;\n+\t\t}\n+\n+\t\tlist = &domain->avail_ldb_ports[i];\n+\t\tDLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,\n+\t\t\t\t       iter1, iter2) {\n+\t\t\tldb_port->owned = false;\n+\n+\t\t\tdlb2_list_del(&domain->avail_ldb_ports[i],\n+\t\t\t\t      &ldb_port->domain_list);\n+\t\t\tdlb2_list_add(&rsrcs->avail_ldb_ports[i],\n+\t\t\t\t      &ldb_port->func_list);\n+\t\t\trsrcs->num_avail_ldb_ports[i]++;\n+\t\t}\n+\t}\n+\n+\t/* Move the domain's dir ports to the function's avail list */\n+\tlist = &domain->used_dir_pq_pairs;\n+\tDLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n+\t\tdir_port->owned = false;\n+\t\tdir_port->port_configured = false;\n+\t\tdir_port->init_tkn_cnt = 0;\n+\n+\t\tdlb2_list_del(&domain->used_dir_pq_pairs,\n+\t\t\t      &dir_port->domain_list);\n+\n+\t\tdlb2_list_add(&rsrcs->avail_dir_pq_pairs,\n+\t\t\t      &dir_port->func_list);\n+\t\trsrcs->num_avail_dir_pq_pairs++;\n+\t}\n+\n+\tlist = &domain->avail_dir_pq_pairs;\n+\tDLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {\n+\t\tdir_port->owned = false;\n+\n+\t\tdlb2_list_del(&domain->avail_dir_pq_pairs,\n+\t\t\t      &dir_port->domain_list);\n+\n+\t\tdlb2_list_add(&rsrcs->avail_dir_pq_pairs,\n+\t\t\t      &dir_port->func_list);\n+\t\trsrcs->num_avail_dir_pq_pairs++;\n+\t}\n+\n+\t/* Return hist list entries to the function */\n+\tret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,\n+\t\t\t\t    domain->hist_list_entry_base,\n+\t\t\t\t    domain->total_hist_list_entries);\n+\tif (ret) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: domain hist list base does not match the function's bitmap.\\n\",\n+\t\t\t    __func__);\n+\t\treturn ret;\n+\t}\n+\n+\tdomain->total_hist_list_entries = 0;\n+\tdomain->avail_hist_list_entries = 0;\n+\tdomain->hist_list_entry_base = 0;\n+\tdomain->hist_list_entry_offset = 0;\n+\n+\tif (hw->ver == DLB2_HW_V2_5) {\n+\t\trsrcs->num_avail_entries += domain->num_credits;\n+\t\tdomain->num_credits = 0;\n+\t} else {\n+\t\trsrcs->num_avail_qed_entries += domain->num_ldb_credits;\n+\t\tdomain->num_ldb_credits = 0;\n+\n+\t\trsrcs->num_avail_dqed_entries += domain->num_dir_credits;\n+\t\tdomain->num_dir_credits = 0;\n+\t}\n+\trsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;\n+\trsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;\n+\tdomain->num_avail_aqed_entries = 0;\n+\tdomain->num_used_aqed_entries = 0;\n+\n+\tdomain->num_pending_removals = 0;\n+\tdomain->num_pending_additions = 0;\n+\tdomain->configured = false;\n+\tdomain->started = false;\n+\n+\t/*\n+\t * Move the domain out of the used_domains list and back to the\n+\t * function's avail_domains list.\n+\t */\n+\tdlb2_list_del(&rsrcs->used_domains, &domain->func_list);\n+\tdlb2_list_add(&rsrcs->avail_domains, &domain->func_list);\n+\trsrcs->num_avail_domains++;\n+\n+\treturn 0;\n+}\n+\n+static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,\n+\t\t\t\t\t    struct dlb2_hw_domain *domain,\n+\t\t\t\t\t    struct dlb2_ldb_queue *queue)\n+{\n+\tstruct dlb2_ldb_port *port = NULL;\n+\tint ret, i;\n+\n+\t/* If a domain has LDB queues, it must have LDB ports */\n+\tfor (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {\n+\t\tport = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],\n+\t\t\t\t\t  typeof(*port));\n+\t\tif (port)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (port == NULL) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s()] Internal error: No configured LDB ports\\n\",\n+\t\t\t    __func__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\t/* If necessary, free up a QID slot in this CQ */\n+\tif (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {\n+\t\tstruct dlb2_ldb_queue *mapped_queue;\n+\n+\t\tmapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];\n+\n+\t\tret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn dlb2_domain_drain_mapped_queues(hw, domain);\n+}\n+\n+static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,\n+\t\t\t\t\t     struct dlb2_hw_domain *domain)\n+{\n+\tstruct dlb2_list_entry *iter;\n+\tstruct dlb2_ldb_queue *queue;\n+\tint ret;\n+\tRTE_SET_USED(iter);\n+\n+\t/* If the domain hasn't been started, there's no traffic to drain */\n+\tif (!domain->started)\n+\t\treturn 0;\n+\n+\t/*\n+\t * Pre-condition: the unattached queue must not have any outstanding\n+\t * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()\n+\t * prior to this in dlb2_domain_drain_mapped_queues().\n+\t */\n+\tDLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {\n+\t\tif (queue->num_mappings != 0 ||\n+\t\t    dlb2_ldb_queue_is_empty(hw, queue))\n+\t\t\tcontinue;\n+\n+\t\tret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * dlb2_reset_domain() - reset a scheduling domain\n+ * @hw: dlb2_hw handle for a particular device.\n+ * @domain_id: domain ID.\n+ * @vdev_req: indicates whether this request came from a vdev.\n+ * @vdev_id: If vdev_req is true, this contains the vdev's ID.\n+ *\n+ * This function resets and frees a DLB 2.0 scheduling domain and its associated\n+ * resources.\n+ *\n+ * Pre-condition: the driver must ensure software has stopped sending QEs\n+ * through this domain's producer ports before invoking this function, or\n+ * undefined behavior will result.\n+ *\n+ * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual\n+ * device.\n+ *\n+ * Return:\n+ * Returns 0 upon success, -1 otherwise.\n+ *\n+ * EINVAL - Invalid domain ID, or the domain is not configured.\n+ * EFAULT - Internal error. (Possibly caused if software is the pre-condition\n+ *\t    is not met.)\n+ * ETIMEDOUT - Hardware component didn't reset in the expected time.\n+ */\n+int dlb2_reset_domain(struct dlb2_hw *hw,\n+\t\t      u32 domain_id,\n+\t\t      bool vdev_req,\n+\t\t      unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tint ret;\n+\n+\tdlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\n+\tif (domain == NULL || !domain->configured)\n+\t\treturn -EINVAL;\n+\n+\t/* Disable VPPs */\n+\tif (vdev_req) {\n+\t\tdlb2_domain_disable_dir_vpps(hw, domain, vdev_id);\n+\n+\t\tdlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);\n+\t}\n+\n+\t/* Disable CQ interrupts */\n+\tdlb2_domain_disable_dir_port_interrupts(hw, domain);\n+\n+\tdlb2_domain_disable_ldb_port_interrupts(hw, domain);\n+\n+\t/*\n+\t * For each queue owned by this domain, disable its write permissions to\n+\t * cause any traffic sent to it to be dropped. Well-behaved software\n+\t * should not be sending QEs at this point.\n+\t */\n+\tdlb2_domain_disable_dir_queue_write_perms(hw, domain);\n+\n+\tdlb2_domain_disable_ldb_queue_write_perms(hw, domain);\n+\n+\t/* Turn off completion tracking on all the domain's PPs. */\n+\tdlb2_domain_disable_ldb_seq_checks(hw, domain);\n+\n+\t/*\n+\t * Disable the LDB CQs and drain them in order to complete the map and\n+\t * unmap procedures, which require zero CQ inflights and zero QID\n+\t * inflights respectively.\n+\t */\n+\tdlb2_domain_disable_ldb_cqs(hw, domain);\n+\n+\tdlb2_domain_drain_ldb_cqs(hw, domain, false);\n+\n+\tret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb2_domain_finish_map_qid_procedures(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Re-enable the CQs in order to drain the mapped queues. */\n+\tdlb2_domain_enable_ldb_cqs(hw, domain);\n+\n+\tret = dlb2_domain_drain_mapped_queues(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = dlb2_domain_drain_unmapped_queues(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Done draining LDB QEs, so disable the CQs. */\n+\tdlb2_domain_disable_ldb_cqs(hw, domain);\n+\n+\tdlb2_domain_drain_dir_queues(hw, domain);\n+\n+\t/* Done draining DIR QEs, so disable the CQs. */\n+\tdlb2_domain_disable_dir_cqs(hw, domain);\n+\n+\t/* Disable PPs */\n+\tdlb2_domain_disable_dir_producer_ports(hw, domain);\n+\n+\tdlb2_domain_disable_ldb_producer_ports(hw, domain);\n+\n+\tret = dlb2_domain_verify_reset_success(hw, domain);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Reset the QID and port state. */\n+\tdlb2_domain_reset_registers(hw, domain);\n+\n+\t/* Hardware reset complete. Reset the domain's software state */\n+\treturn dlb2_domain_reset_software_state(hw, domain);\n+}\n",
    "prefixes": [
        "v4",
        "07/27"
    ]
}