get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77523/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77523,
    "url": "http://patches.dpdk.org/api/patches/77523/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599855987-25976-18-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599855987-25976-18-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599855987-25976-18-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-09-11T20:26:22",
    "name": "[17/22] event/dlb2: add eventdev stop and close",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f701b6dda00997439d07ea6fe74776ad86716a3d",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599855987-25976-18-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 12164,
            "url": "http://patches.dpdk.org/api/series/12164/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12164",
            "date": "2020-09-11T20:26:05",
            "name": "Add DLB2 PMD",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12164/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77523/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/77523/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E1651A04C1;\n\tFri, 11 Sep 2020 22:33:09 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 892401C23B;\n\tFri, 11 Sep 2020 22:30:39 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n by dpdk.org (Postfix) with ESMTP id 189321C126\n for <dev@dpdk.org>; Fri, 11 Sep 2020 22:30:06 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 11 Sep 2020 13:30:06 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga005.jf.intel.com with ESMTP; 11 Sep 2020 13:30:05 -0700"
        ],
        "IronPort-SDR": [
            "\n u34hunR+DryUGEW/8yfzugEZN2TMS+XMaZF51Q2uK2dcJQ2hIlqt/CzQokpCE7YhdgaFC79Ebg\n S5jJ2QK+0awg==",
            "\n okEx3qqNfa4ku1eVCoLI07XTzDYEgjZn/PqCEXftCPXM36yyusUqChRQsZmQhSsrh0cKrOstVN\n LJG4s3Klj/fg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9741\"; a=\"156244387\"",
            "E=Sophos;i=\"5.76,417,1592895600\"; d=\"scan'208\";a=\"156244387\"",
            "E=Sophos;i=\"5.76,417,1592895600\"; d=\"scan'208\";a=\"481453654\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com",
        "Date": "Fri, 11 Sep 2020 15:26:22 -0500",
        "Message-Id": "<1599855987-25976-18-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1599855987-25976-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1599855987-25976-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 17/22] event/dlb2: add eventdev stop and close",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for eventdev stop and close entry points.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\n---\n drivers/event/dlb2/dlb2.c                  | 261 +++++++++++++++++++++++++++--\n drivers/event/dlb2/dlb2_iface.c            |   6 +\n drivers/event/dlb2/dlb2_iface.h            |   6 +\n drivers/event/dlb2/pf/base/dlb2_resource.c |  91 ++++++++++\n drivers/event/dlb2/pf/dlb2_pf.c            |  52 ++++++\n 5 files changed, 401 insertions(+), 15 deletions(-)",
    "diff": "diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c\nindex 8911c55..43b85d7 100644\n--- a/drivers/event/dlb2/dlb2.c\n+++ b/drivers/event/dlb2/dlb2.c\n@@ -77,21 +77,6 @@ static struct dlb2_port_low_level_io_functions qm_mmio_fns;\n struct process_local_port_data\n dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];\n \n-/*\n- * DUMMY - added so that xstats path will compile/link.\n- * Will be replaced by real version in a subsequent\n- * patch.\n- */\n-uint32_t\n-dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,\n-\t\t     struct dlb2_eventdev_queue *queue)\n-{\n-\tRTE_SET_USED(dlb2);\n-\tRTE_SET_USED(queue);\n-\n-\treturn 0;\n-}\n-\n static void\n dlb2_free_qe_mem(struct dlb2_port *qm_port)\n {\n@@ -3527,6 +3512,250 @@ dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,\n }\n \n static void\n+dlb2_flush_port(struct rte_eventdev *dev, int port_id)\n+{\n+\tstruct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);\n+\teventdev_stop_flush_t flush;\n+\tstruct rte_event ev;\n+\tuint8_t dev_id;\n+\tvoid *arg;\n+\tint i;\n+\n+\tflush = dev->dev_ops->dev_stop_flush;\n+\tdev_id = dev->data->dev_id;\n+\targ = dev->data->dev_stop_flush_arg;\n+\n+\twhile (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {\n+\t\tif (flush)\n+\t\t\tflush(dev_id, ev, arg);\n+\n+\t\tif (dlb2->ev_ports[port_id].qm_port.is_directed)\n+\t\t\tcontinue;\n+\n+\t\tev.op = RTE_EVENT_OP_RELEASE;\n+\n+\t\trte_event_enqueue_burst(dev_id, port_id, &ev, 1);\n+\t}\n+\n+\t/* Enqueue any additional outstanding releases */\n+\tev.op = RTE_EVENT_OP_RELEASE;\n+\n+\tfor (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)\n+\t\trte_event_enqueue_burst(dev_id, port_id, &ev, 1);\n+}\n+\n+static uint32_t\n+dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,\n+\t\t\t struct dlb2_eventdev_queue *queue)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_get_ldb_queue_depth_args cfg;\n+\tint ret;\n+\n+\tcfg.queue_id = queue->qm_queue.id;\n+\n+\tret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn cfg.response.id;\n+}\n+\n+static uint32_t\n+dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,\n+\t\t\t struct dlb2_eventdev_queue *queue)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_get_dir_queue_depth_args cfg;\n+\tint ret;\n+\n+\tcfg.queue_id = queue->qm_queue.id;\n+\n+\tret = dlb2_iface_get_dir_queue_depth(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: get_dir_queue_depth ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn cfg.response.id;\n+}\n+\n+uint32_t\n+dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,\n+\t\t     struct dlb2_eventdev_queue *queue)\n+{\n+\tif (queue->qm_queue.is_directed)\n+\t\treturn dlb2_get_dir_queue_depth(dlb2, queue);\n+\telse\n+\t\treturn dlb2_get_ldb_queue_depth(dlb2, queue);\n+}\n+\n+static bool\n+dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,\n+\t\t    struct dlb2_eventdev_queue *queue)\n+{\n+\treturn dlb2_get_queue_depth(dlb2, queue) == 0;\n+}\n+\n+static bool\n+dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < dlb2->num_queues; i++) {\n+\t\tif (dlb2->ev_queues[i].num_links == 0)\n+\t\t\tcontinue;\n+\t\tif (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static bool\n+dlb2_queues_empty(struct dlb2_eventdev *dlb2)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < dlb2->num_queues; i++) {\n+\t\tif (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static void\n+dlb2_drain(struct rte_eventdev *dev)\n+{\n+\tstruct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);\n+\tstruct dlb2_eventdev_port *ev_port = NULL;\n+\tuint8_t dev_id;\n+\tint i;\n+\n+\tdev_id = dev->data->dev_id;\n+\n+\twhile (!dlb2_linked_queues_empty(dlb2)) {\n+\t\t/* Flush all the ev_ports, which will drain all their connected\n+\t\t * queues.\n+\t\t */\n+\t\tfor (i = 0; i < dlb2->num_ports; i++)\n+\t\t\tdlb2_flush_port(dev, i);\n+\t}\n+\n+\t/* The queues are empty, but there may be events left in the ports. */\n+\tfor (i = 0; i < dlb2->num_ports; i++)\n+\t\tdlb2_flush_port(dev, i);\n+\n+\t/* If the domain's queues are empty, we're done. */\n+\tif (dlb2_queues_empty(dlb2))\n+\t\treturn;\n+\n+\t/* Else, there must be at least one unlinked load-balanced queue.\n+\t * Select a load-balanced port with which to drain the unlinked\n+\t * queue(s).\n+\t */\n+\tfor (i = 0; i < dlb2->num_ports; i++) {\n+\t\tev_port = &dlb2->ev_ports[i];\n+\n+\t\tif (!ev_port->qm_port.is_directed)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == dlb2->num_ports) {\n+\t\tDLB2_LOG_ERR(\"internal error: no LDB ev_ports\\n\");\n+\t\treturn;\n+\t}\n+\n+\trte_errno = 0;\n+\trte_event_port_unlink(dev_id, ev_port->id, NULL, 0);\n+\n+\tif (rte_errno) {\n+\t\tDLB2_LOG_ERR(\"internal error: failed to unlink ev_port %d\\n\",\n+\t\t\t     ev_port->id);\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < dlb2->num_queues; i++) {\n+\t\tuint8_t qid, prio;\n+\t\tint ret;\n+\n+\t\tif (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))\n+\t\t\tcontinue;\n+\n+\t\tqid = i;\n+\t\tprio = 0;\n+\n+\t\t/* Link the ev_port to the queue */\n+\t\tret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);\n+\t\tif (ret != 1) {\n+\t\t\tDLB2_LOG_ERR(\"internal error: failed to link ev_port %d to queue %d\\n\",\n+\t\t\t\t     ev_port->id, qid);\n+\t\t\treturn;\n+\t\t}\n+\n+\t\t/* Flush the queue */\n+\t\twhile (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))\n+\t\t\tdlb2_flush_port(dev, ev_port->id);\n+\n+\t\t/* Drain any extant events in the ev_port. */\n+\t\tdlb2_flush_port(dev, ev_port->id);\n+\n+\t\t/* Unlink the ev_port from the queue */\n+\t\tret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);\n+\t\tif (ret != 1) {\n+\t\t\tDLB2_LOG_ERR(\"internal error: failed to unlink ev_port %d to queue %d\\n\",\n+\t\t\t\t     ev_port->id, qid);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+}\n+\n+static void\n+dlb2_eventdev_stop(struct rte_eventdev *dev)\n+{\n+\t/* FIXME: Handle the case that app threads are waiting in\n+\t * rte_event_dequeue_burst() (either blocked on interrupt or in umwait,\n+\t * or waiting with a timeout). Looking into proposing a new function to\n+\t * wake blocked threads that the app must call before stopping a device.\n+\t */\n+\tstruct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);\n+\n+\trte_spinlock_lock(&dlb2->qm_instance.resource_lock);\n+\n+\tif (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {\n+\t\tDLB2_LOG_DBG(\"Internal error: already stopped\\n\");\n+\t\trte_spinlock_unlock(&dlb2->qm_instance.resource_lock);\n+\t\treturn;\n+\t} else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {\n+\t\tDLB2_LOG_ERR(\"Internal error: bad state %d for dev_stop\\n\",\n+\t\t\t     (int)dlb2->run_state);\n+\t\trte_spinlock_unlock(&dlb2->qm_instance.resource_lock);\n+\t\treturn;\n+\t}\n+\n+\tdlb2->run_state = DLB2_RUN_STATE_STOPPING;\n+\n+\trte_spinlock_unlock(&dlb2->qm_instance.resource_lock);\n+\n+\tdlb2_drain(dev);\n+\n+\tdlb2->run_state = DLB2_RUN_STATE_STOPPED;\n+}\n+\n+static int\n+dlb2_eventdev_close(struct rte_eventdev *dev)\n+{\n+\tdlb2_hw_reset_sched_domain(dev, false);\n+\n+\treturn 0;\n+}\n+\n+static void\n dlb2_entry_points_init(struct rte_eventdev *dev)\n {\n \tstruct dlb2_eventdev *dlb2;\n@@ -3536,6 +3765,8 @@ dlb2_entry_points_init(struct rte_eventdev *dev)\n \t\t.dev_infos_get    = dlb2_eventdev_info_get,\n \t\t.dev_configure    = dlb2_eventdev_configure,\n \t\t.dev_start        = dlb2_eventdev_start,\n+\t\t.dev_stop         = dlb2_eventdev_stop,\n+\t\t.dev_close        = dlb2_eventdev_close,\n \t\t.queue_def_conf   = dlb2_eventdev_queue_default_conf_get,\n \t\t.queue_setup      = dlb2_eventdev_queue_setup,\n \t\t.port_def_conf    = dlb2_eventdev_port_default_conf_get,\ndiff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c\nindex 8228bb3..eac2ea2 100644\n--- a/drivers/event/dlb2/dlb2_iface.c\n+++ b/drivers/event/dlb2/dlb2_iface.c\n@@ -80,3 +80,9 @@ int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle,\n \n int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle,\n \t\t\t\t     struct dlb2_start_domain_args *cfg);\n+\n+int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle,\n+\t\t\t\tstruct dlb2_get_ldb_queue_depth_args *args);\n+\n+int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle,\n+\t\t\t\tstruct dlb2_get_dir_queue_depth_args *args);\ndiff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h\nindex d5bb6be..bcd9446 100644\n--- a/drivers/event/dlb2/dlb2_iface.h\n+++ b/drivers/event/dlb2/dlb2_iface.h\n@@ -66,4 +66,10 @@ extern int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle,\n extern int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle,\n \t\t\t\tstruct dlb2_start_domain_args *cfg);\n \n+extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle,\n+\t\t\t\tstruct dlb2_get_ldb_queue_depth_args *args);\n+\n+extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle,\n+\t\t\t\tstruct dlb2_get_dir_queue_depth_args *args);\n+\n #endif /* _DLB2_IFACE_H_ */\ndiff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c\nindex 4c4c0b4..a835e97 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_resource.c\n+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c\n@@ -5930,3 +5930,94 @@ dlb2_hw_start_domain(struct dlb2_hw *hw,\n \n \treturn 0;\n }\n+\n+static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\t\t u32 domain_id,\n+\t\t\t\t\t u32 queue_id,\n+\t\t\t\t\t bool vdev_req,\n+\t\t\t\t\t unsigned int vf_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB get directed queue depth:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from VF %d)\\n\", vf_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+\tDLB2_HW_DBG(hw, \"\\tQueue ID: %d\\n\", queue_id);\n+}\n+\n+int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\tu32 domain_id,\n+\t\t\t\tstruct dlb2_get_dir_queue_depth_args *args,\n+\t\t\t\tstruct dlb2_cmd_response *resp,\n+\t\t\t\tbool vdev_req,\n+\t\t\t\tunsigned int vdev_id)\n+{\n+\tstruct dlb2_dir_pq_pair *queue;\n+\tstruct dlb2_hw_domain *domain;\n+\tint id;\n+\n+\tid = domain_id;\n+\n+\tdlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,\n+\t\t\t\t     vdev_req, vdev_id);\n+\n+\tdomain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);\n+\tif (!domain) {\n+\t\tresp->status = DLB2_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tid = args->queue_id;\n+\n+\tqueue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain);\n+\tif (!queue) {\n+\t\tresp->status = DLB2_ST_INVALID_QID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tresp->id = dlb2_dir_queue_depth(hw, queue);\n+\n+\treturn 0;\n+}\n+\n+static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\t\t u32 domain_id,\n+\t\t\t\t\t u32 queue_id,\n+\t\t\t\t\t bool vdev_req,\n+\t\t\t\t\t unsigned int vf_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB get load-balanced queue depth:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from VF %d)\\n\", vf_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+\tDLB2_HW_DBG(hw, \"\\tQueue ID: %d\\n\", queue_id);\n+}\n+\n+int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,\n+\t\t\t\tu32 domain_id,\n+\t\t\t\tstruct dlb2_get_ldb_queue_depth_args *args,\n+\t\t\t\tstruct dlb2_cmd_response *resp,\n+\t\t\t\tbool vdev_req,\n+\t\t\t\tunsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tstruct dlb2_ldb_queue *queue;\n+\n+\tdlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,\n+\t\t\t\t     vdev_req, vdev_id);\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\tif (!domain) {\n+\t\tresp->status = DLB2_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tqueue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);\n+\tif (!queue) {\n+\t\tresp->status = DLB2_ST_INVALID_QID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tresp->id = dlb2_ldb_queue_depth(hw, queue);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c\nindex 084ba64..a901a0e 100644\n--- a/drivers/event/dlb2/pf/dlb2_pf.c\n+++ b/drivers/event/dlb2/pf/dlb2_pf.c\n@@ -532,6 +532,56 @@ dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,\n \treturn ret;\n }\n \n+static int\n+dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,\n+\t\t\t    struct dlb2_get_ldb_queue_depth_args *args)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,\n+\t\t\t\t\t  handle->domain_id,\n+\t\t\t\t\t  args,\n+\t\t\t\t\t  &response,\n+\t\t\t\t\t  false,\n+\t\t\t\t\t  0);\n+\n+\targs->response = response;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Exiting %s() with ret=%d\\n\",\n+\t\t  __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,\n+\t\t\t    struct dlb2_get_dir_queue_depth_args *args)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret = 0;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,\n+\t\t\t\t\t  handle->domain_id,\n+\t\t\t\t\t  args,\n+\t\t\t\t\t  &response,\n+\t\t\t\t\t  false,\n+\t\t\t\t\t  0);\n+\n+\targs->response = response;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Exiting %s() with ret=%d\\n\",\n+\t\t  __func__, ret);\n+\n+\treturn ret;\n+}\n+\n static void\n dlb2_pf_iface_fn_ptrs_init(void)\n {\n@@ -550,6 +600,8 @@ dlb2_pf_iface_fn_ptrs_init(void)\n \tdlb2_iface_dir_port_create = dlb2_pf_dir_port_create;\n \tdlb2_iface_map_qid = dlb2_pf_map_qid;\n \tdlb2_iface_unmap_qid = dlb2_pf_unmap_qid;\n+\tdlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;\n+\tdlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;\n \tdlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;\n \tdlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;\n \tdlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;\n",
    "prefixes": [
        "17/22"
    ]
}