get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77500/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77500,
    "url": "http://patches.dpdk.org/api/patches/77500/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599851920-16802-18-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599851920-16802-18-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599851920-16802-18-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-09-11T19:18:35",
    "name": "[v4,17/22] event/dlb: add eventdev stop and close",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "22c14592756ad75ca152b7a8e8906e81a2463520",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599851920-16802-18-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 12163,
            "url": "http://patches.dpdk.org/api/series/12163/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12163",
            "date": "2020-09-11T19:18:18",
            "name": "Add DLB PMD",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/12163/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77500/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/77500/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BF78BA04C1;\n\tFri, 11 Sep 2020 21:24:54 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B8C1A1C21F;\n\tFri, 11 Sep 2020 21:22:30 +0200 (CEST)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by dpdk.org (Postfix) with ESMTP id 2E7191C1BD\n for <dev@dpdk.org>; Fri, 11 Sep 2020 21:22:16 +0200 (CEST)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 11 Sep 2020 12:22:15 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga005.fm.intel.com with ESMTP; 11 Sep 2020 12:22:13 -0700"
        ],
        "IronPort-SDR": [
            "\n IO6Xh/UQoYN6bkCPKUzGzC9c4nvXL3asUvaQo+cNFhvHpElAJ1FX0snH0OyD7ts01nzMfhoBtI\n LFCRPKG3TqGw==",
            "\n cZQP57zg29Er3PQd7oRZnsFjZ9SJM0sEBZGz8kDkXepdKYEl8hDalyPbgFAdyZy5DDZ90FdcbS\n U3TwLtOK62GQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9741\"; a=\"146570561\"",
            "E=Sophos;i=\"5.76,416,1592895600\"; d=\"scan'208\";a=\"146570561\"",
            "E=Sophos;i=\"5.76,416,1592895600\"; d=\"scan'208\";a=\"506375706\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com",
        "Date": "Fri, 11 Sep 2020 14:18:35 -0500",
        "Message-Id": "<1599851920-16802-18-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v4 17/22] event/dlb: add eventdev stop and close",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for eventdev stop and close entry points.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\n---\n drivers/event/dlb/dlb.c                  | 256 +++++++++++++++++++++++++++++--\n drivers/event/dlb/dlb_iface.c            |   6 +\n drivers/event/dlb/dlb_iface.h            |   6 +\n drivers/event/dlb/pf/base/dlb_resource.c |  89 +++++++++++\n drivers/event/dlb/pf/dlb_pf.c            |  47 ++++++\n 5 files changed, 393 insertions(+), 11 deletions(-)",
    "diff": "diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c\nindex ba2323b..1166aa3 100644\n--- a/drivers/event/dlb/dlb.c\n+++ b/drivers/event/dlb/dlb.c\n@@ -90,17 +90,6 @@ dlb_event_enqueue_forward_burst_delayed(void *event_port,\n \t\t\t\t\tconst struct rte_event events[],\n \t\t\t\t\tuint16_t num);\n \n-uint32_t\n-dlb_get_queue_depth(struct dlb_eventdev *dlb,\n-\t\t    struct dlb_eventdev_queue *queue)\n-{\n-\t/* DUMMY FOR NOW So \"xstats\" patch compiles */\n-\tRTE_SET_USED(dlb);\n-\tRTE_SET_USED(queue);\n-\n-\treturn 0;\n-}\n-\n static int\n dlb_hw_query_resources(struct dlb_eventdev *dlb)\n {\n@@ -3640,6 +3629,249 @@ dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t wait)\n \treturn dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);\n }\n \n+static uint32_t\n+dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb,\n+\t\t\tstruct dlb_eventdev_queue *queue)\n+{\n+\tstruct dlb_hw_dev *handle = &dlb->qm_instance;\n+\tstruct dlb_get_ldb_queue_depth_args cfg;\n+\tstruct dlb_cmd_response response;\n+\tint ret;\n+\n+\tcfg.queue_id = queue->qm_queue.id;\n+\tcfg.response = (uintptr_t)&response;\n+\n+\tret = dlb_iface_get_ldb_queue_depth(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: get_ldb_queue_depth ret=%d (driver status: %s)\\n\",\n+\t\t\t    ret, dlb_error_strings[response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn response.id;\n+}\n+\n+static uint32_t\n+dlb_get_dir_queue_depth(struct dlb_eventdev *dlb,\n+\t\t\tstruct dlb_eventdev_queue *queue)\n+{\n+\tstruct dlb_hw_dev *handle = &dlb->qm_instance;\n+\tstruct dlb_get_dir_queue_depth_args cfg;\n+\tstruct dlb_cmd_response response;\n+\tint ret;\n+\n+\tcfg.queue_id = queue->qm_queue.id;\n+\tcfg.response = (uintptr_t)&response;\n+\n+\tret = dlb_iface_get_dir_queue_depth(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB_LOG_ERR(\"dlb: get_dir_queue_depth ret=%d (driver status: %s)\\n\",\n+\t\t\t    ret, dlb_error_strings[response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn response.id;\n+}\n+\n+uint32_t\n+dlb_get_queue_depth(struct dlb_eventdev *dlb,\n+\t\t    struct dlb_eventdev_queue *queue)\n+{\n+\tif (queue->qm_queue.is_directed)\n+\t\treturn dlb_get_dir_queue_depth(dlb, queue);\n+\telse\n+\t\treturn dlb_get_ldb_queue_depth(dlb, queue);\n+}\n+\n+static bool\n+dlb_queue_is_empty(struct dlb_eventdev *dlb,\n+\t\t   struct dlb_eventdev_queue *queue)\n+{\n+\treturn dlb_get_queue_depth(dlb, queue) == 0;\n+}\n+\n+static bool\n+dlb_linked_queues_empty(struct dlb_eventdev *dlb)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < dlb->num_queues; i++) {\n+\t\tif (dlb->ev_queues[i].num_links == 0)\n+\t\t\tcontinue;\n+\t\tif (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static bool\n+dlb_queues_empty(struct dlb_eventdev *dlb)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < dlb->num_queues; i++) {\n+\t\tif (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static void\n+dlb_flush_port(struct rte_eventdev *dev, int port_id)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\teventdev_stop_flush_t flush;\n+\tstruct rte_event ev;\n+\tuint8_t dev_id;\n+\tvoid *arg;\n+\tint i;\n+\n+\tflush = dev->dev_ops->dev_stop_flush;\n+\tdev_id = dev->data->dev_id;\n+\targ = dev->data->dev_stop_flush_arg;\n+\n+\twhile (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {\n+\t\tif (flush)\n+\t\t\tflush(dev_id, ev, arg);\n+\n+\t\tif (dlb->ev_ports[port_id].qm_port.is_directed)\n+\t\t\tcontinue;\n+\n+\t\tev.op = RTE_EVENT_OP_RELEASE;\n+\n+\t\trte_event_enqueue_burst(dev_id, port_id, &ev, 1);\n+\t}\n+\n+\t/* Enqueue any additional outstanding releases */\n+\tev.op = RTE_EVENT_OP_RELEASE;\n+\n+\tfor (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--)\n+\t\trte_event_enqueue_burst(dev_id, port_id, &ev, 1);\n+}\n+\n+void\n+dlb_drain(struct rte_eventdev *dev)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\tstruct dlb_eventdev_port *ev_port = NULL;\n+\tuint8_t dev_id;\n+\tint i;\n+\n+\tdev_id = dev->data->dev_id;\n+\n+\twhile (!dlb_linked_queues_empty(dlb)) {\n+\t\t/* Flush all the ev_ports, which will drain all their connected\n+\t\t * queues.\n+\t\t */\n+\t\tfor (i = 0; i < dlb->num_ports; i++)\n+\t\t\tdlb_flush_port(dev, i);\n+\t}\n+\n+\t/* The queues are empty, but there may be events left in the ports. */\n+\tfor (i = 0; i < dlb->num_ports; i++)\n+\t\tdlb_flush_port(dev, i);\n+\n+\t/* If the domain's queues are empty, we're done. */\n+\tif (dlb_queues_empty(dlb))\n+\t\treturn;\n+\n+\t/* Else, there must be at least one unlinked load-balanced queue.\n+\t * Select a load-balanced port with which to drain the unlinked\n+\t * queue(s).\n+\t */\n+\tfor (i = 0; i < dlb->num_ports; i++) {\n+\t\tev_port = &dlb->ev_ports[i];\n+\n+\t\tif (!ev_port->qm_port.is_directed)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == dlb->num_ports) {\n+\t\tDLB_LOG_ERR(\"internal error: no LDB ev_ports\\n\");\n+\t\treturn;\n+\t}\n+\n+\trte_errno = 0;\n+\trte_event_port_unlink(dev_id, ev_port->id, NULL, 0);\n+\n+\tif (rte_errno) {\n+\t\tDLB_LOG_ERR(\"internal error: failed to unlink ev_port %d\\n\",\n+\t\t\t    ev_port->id);\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < dlb->num_queues; i++) {\n+\t\tuint8_t qid, prio;\n+\t\tint ret;\n+\n+\t\tif (dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))\n+\t\t\tcontinue;\n+\n+\t\tqid = i;\n+\t\tprio = 0;\n+\n+\t\t/* Link the ev_port to the queue */\n+\t\tret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);\n+\t\tif (ret != 1) {\n+\t\t\tDLB_LOG_ERR(\"internal error: failed to link ev_port %d to queue %d\\n\",\n+\t\t\t\t    ev_port->id, qid);\n+\t\t\treturn;\n+\t\t}\n+\n+\t\t/* Flush the queue */\n+\t\twhile (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))\n+\t\t\tdlb_flush_port(dev, ev_port->id);\n+\n+\t\t/* Drain any extant events in the ev_port. */\n+\t\tdlb_flush_port(dev, ev_port->id);\n+\n+\t\t/* Unlink the ev_port from the queue */\n+\t\tret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);\n+\t\tif (ret != 1) {\n+\t\t\tDLB_LOG_ERR(\"internal error: failed to unlink ev_port %d to queue %d\\n\",\n+\t\t\t\t    ev_port->id, qid);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+}\n+\n+static void\n+dlb_eventdev_stop(struct rte_eventdev *dev)\n+{\n+\tstruct dlb_eventdev *dlb = dlb_pmd_priv(dev);\n+\n+\trte_spinlock_lock(&dlb->qm_instance.resource_lock);\n+\n+\tif (dlb->run_state == DLB_RUN_STATE_STOPPED) {\n+\t\tDLB_LOG_DBG(\"Internal error: already stopped\\n\");\n+\t\trte_spinlock_unlock(&dlb->qm_instance.resource_lock);\n+\t\treturn;\n+\t} else if (dlb->run_state != DLB_RUN_STATE_STARTED) {\n+\t\tDLB_LOG_ERR(\"Internal error: bad state %d for dev_stop\\n\",\n+\t\t\t    (int)dlb->run_state);\n+\t\trte_spinlock_unlock(&dlb->qm_instance.resource_lock);\n+\t\treturn;\n+\t}\n+\n+\tdlb->run_state = DLB_RUN_STATE_STOPPING;\n+\n+\trte_spinlock_unlock(&dlb->qm_instance.resource_lock);\n+\n+\tdlb_drain(dev);\n+\n+\tdlb->run_state = DLB_RUN_STATE_STOPPED;\n+}\n+\n+static int\n+dlb_eventdev_close(struct rte_eventdev *dev)\n+{\n+\tdlb_hw_reset_sched_domain(dev, false);\n+\n+\treturn 0;\n+}\n+\n void\n dlb_entry_points_init(struct rte_eventdev *dev)\n {\n@@ -3649,6 +3881,8 @@ dlb_entry_points_init(struct rte_eventdev *dev)\n \t\t.dev_infos_get    = dlb_eventdev_info_get,\n \t\t.dev_configure    = dlb_eventdev_configure,\n \t\t.dev_start        = dlb_eventdev_start,\n+\t\t.dev_stop         = dlb_eventdev_stop,\n+\t\t.dev_close        = dlb_eventdev_close,\n \t\t.queue_def_conf   = dlb_eventdev_queue_default_conf_get,\n \t\t.port_def_conf    = dlb_eventdev_port_default_conf_get,\n \t\t.queue_setup      = dlb_eventdev_queue_setup,\ndiff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c\nindex e0cade1..d35c832 100644\n--- a/drivers/event/dlb/dlb_iface.c\n+++ b/drivers/event/dlb/dlb_iface.c\n@@ -85,3 +85,9 @@ int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,\n int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,\n \t\t\t\t  struct dlb_get_sn_occupancy_args *args);\n \n+int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,\n+\t\t\t\t     struct dlb_get_ldb_queue_depth_args *args);\n+\n+int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,\n+\t\t\t\t     struct dlb_get_dir_queue_depth_args *args);\n+\ndiff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h\nindex 8c905ab..9f61135 100644\n--- a/drivers/event/dlb/dlb_iface.h\n+++ b/drivers/event/dlb/dlb_iface.h\n@@ -73,4 +73,10 @@ extern int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,\n extern int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,\n \t\t\t\t  struct dlb_get_sn_occupancy_args *args);\n \n+extern int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,\n+\t\t\t\t    struct dlb_get_ldb_queue_depth_args *args);\n+\n+extern int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,\n+\t\t\t\t    struct dlb_get_dir_queue_depth_args *args);\n+\n #endif /* _DLB_IFACE_H */\ndiff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c\nindex 829a977..ff2c195 100644\n--- a/drivers/event/dlb/pf/base/dlb_resource.c\n+++ b/drivers/event/dlb/pf/base/dlb_resource.c\n@@ -6811,3 +6811,92 @@ int dlb_hw_start_domain(struct dlb_hw *hw,\n \n \treturn 0;\n }\n+\n+static void dlb_log_get_dir_queue_depth(struct dlb_hw *hw,\n+\t\t\t\t\tu32 domain_id,\n+\t\t\t\t\tu32 queue_id)\n+{\n+\tDLB_HW_INFO(hw, \"DLB get directed queue depth:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+\tDLB_HW_INFO(hw, \"\\tQueue ID: %d\\n\", queue_id);\n+}\n+\n+int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,\n+\t\t\t       u32 domain_id,\n+\t\t\t       struct dlb_get_dir_queue_depth_args *args,\n+\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tstruct dlb_dir_pq_pair *queue;\n+\tstruct dlb_domain *domain;\n+\tint id;\n+\n+\tid = domain_id;\n+\n+\tdlb_log_get_dir_queue_depth(hw, domain_id, args->queue_id);\n+\n+\tdomain = dlb_get_domain_from_id(hw, id);\n+\tif (!domain) {\n+\t\tresp->status = DLB_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tid = args->queue_id;\n+\n+\tqueue = dlb_get_domain_used_dir_pq(args->queue_id, domain);\n+\tif (!queue) {\n+\t\tresp->status = DLB_ST_INVALID_QID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tresp->id = dlb_dir_queue_depth(hw, queue);\n+\n+\treturn 0;\n+}\n+\n+static void dlb_log_get_ldb_queue_depth(struct dlb_hw *hw,\n+\t\t\t\t\tu32 domain_id,\n+\t\t\t\t\tu32 queue_id)\n+{\n+\tDLB_HW_INFO(hw, \"DLB get load-balanced queue depth:\\n\");\n+\tDLB_HW_INFO(hw, \"\\tDomain ID: %d\\n\", domain_id);\n+\tDLB_HW_INFO(hw, \"\\tQueue ID: %d\\n\", queue_id);\n+}\n+\n+int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,\n+\t\t\t       u32 domain_id,\n+\t\t\t       struct dlb_get_ldb_queue_depth_args *args,\n+\t\t\t       struct dlb_cmd_response *resp)\n+{\n+\tunion dlb_lsp_qid_aqed_active_cnt r0;\n+\tunion dlb_lsp_qid_atq_enqueue_cnt r1;\n+\tunion dlb_lsp_qid_ldb_enqueue_cnt r2;\n+\tstruct dlb_ldb_queue *queue;\n+\tstruct dlb_domain *domain;\n+\n+\tdlb_log_get_ldb_queue_depth(hw, domain_id, args->queue_id);\n+\n+\tdomain = dlb_get_domain_from_id(hw, domain_id);\n+\tif (!domain) {\n+\t\tresp->status = DLB_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tqueue = dlb_get_domain_ldb_queue(args->queue_id, domain);\n+\tif (!queue) {\n+\t\tresp->status = DLB_ST_INVALID_QID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tr0.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));\n+\n+\tr1.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));\n+\n+\tr2.val = DLB_CSR_RD(hw,\n+\t\t\t    DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));\n+\n+\tresp->id = r0.val + r1.val + r2.val;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c\nindex 237ade9..870de67 100644\n--- a/drivers/event/dlb/pf/dlb_pf.c\n+++ b/drivers/event/dlb/pf/dlb_pf.c\n@@ -564,6 +564,50 @@ dlb_pf_unmap_qid(struct dlb_hw_dev *handle,\n \treturn ret;\n }\n \n+static int\n+dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_get_ldb_queue_depth_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,\n+\t\t\t\t\t handle->domain_id,\n+\t\t\t\t\t args,\n+\t\t\t\t\t &response);\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_get_dir_queue_depth_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret = 0;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,\n+\t\t\t\t\t handle->domain_id,\n+\t\t\t\t\t args,\n+\t\t\t\t\t &response);\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n static void\n dlb_pf_iface_fn_ptrs_init(void)\n {\n@@ -583,10 +627,13 @@ dlb_pf_iface_fn_ptrs_init(void)\n \tdlb_iface_unmap_qid = dlb_pf_unmap_qid;\n \tdlb_iface_sched_domain_start = dlb_pf_sched_domain_start;\n \tdlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;\n+\tdlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;\n+\tdlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;\n \tdlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;\n \tdlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;\n \tdlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;\n \tdlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;\n+\n }\n \n /* PCI DEV HOOKS */\n",
    "prefixes": [
        "v4",
        "17/22"
    ]
}