get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/81974/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 81974,
    "url": "https://patches.dpdk.org/api/patches/81974/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1603477826-31374-13-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1603477826-31374-13-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1603477826-31374-13-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-10-23T18:30:15",
    "name": "[v3,12/23] event/dlb2: add queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5242fd74af66cab9144017f75f1cfa1a5f2abfe8",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1603477826-31374-13-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 13284,
            "url": "https://patches.dpdk.org/api/series/13284/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13284",
            "date": "2020-10-23T18:30:03",
            "name": "Add DLB2 PMD",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/13284/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/81974/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/81974/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E7072A04DD;\n\tFri, 23 Oct 2020 20:33:36 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 920E072E7;\n\tFri, 23 Oct 2020 20:29:05 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by dpdk.org (Postfix) with ESMTP id AB6CF5A93\n for <dev@dpdk.org>; Fri, 23 Oct 2020 20:28:46 +0200 (CEST)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 23 Oct 2020 11:28:44 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga005.fm.intel.com with ESMTP; 23 Oct 2020 11:28:43 -0700"
        ],
        "IronPort-SDR": [
            "\n ZFQOtGrQGf9TPDJVTW3AY6VXFTl4A5GJebNKl1Qd79zfcYi0gqh5Ek0DAxI+F1uUTm+nPHowyW\n S6I8amvK+Pkw==",
            "\n nPFdso4lCRldk7sz/TxA426EDWGXHs86HSDZ4opH4F2m3vNqXBMWDlqdfoU+U4IoCQbZ14YNqy\n DsJ53LqrBvqA=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9783\"; a=\"231907474\"",
            "E=Sophos;i=\"5.77,409,1596524400\"; d=\"scan'208\";a=\"231907474\"",
            "E=Sophos;i=\"5.77,409,1596524400\"; d=\"scan'208\";a=\"524764046\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "John McNamara <john.mcnamara@intel.com>,\n Marko Kovacevic <marko.kovacevic@intel.com>",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com",
        "Date": "Fri, 23 Oct 2020 13:30:15 -0500",
        "Message-Id": "<1603477826-31374-13-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1603477826-31374-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1599855987-25976-2-git-send-email-timothy.mcdaniel@intel.com>\n <1603477826-31374-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 12/23] event/dlb2: add queue setup",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Load balanced (ldb) queues are setup here.\nDirected queues are not set up until link time, at which\npoint we know the directed port ID. Directed queue setup\nwill only fail if this queue is already setup or there are\nno directed queues left to configure.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\nReviewed-by: Gage Eads <gage.eads@intel.com>\n---\n doc/guides/eventdevs/dlb2.rst              |  73 +++--\n drivers/event/dlb2/dlb2.c                  | 312 +++++++++++++++++++\n drivers/event/dlb2/dlb2_iface.c            |  12 +\n drivers/event/dlb2/dlb2_iface.h            |  12 +\n drivers/event/dlb2/pf/base/dlb2_resource.c | 464 +++++++++++++++++++++++++++++\n drivers/event/dlb2/pf/dlb2_main.c          |  10 +\n drivers/event/dlb2/pf/dlb2_pf.c            |  82 +++++\n 7 files changed, 926 insertions(+), 39 deletions(-)",
    "diff": "diff --git a/doc/guides/eventdevs/dlb2.rst b/doc/guides/eventdevs/dlb2.rst\nindex 5f6c486..e57e009 100644\n--- a/doc/guides/eventdevs/dlb2.rst\n+++ b/doc/guides/eventdevs/dlb2.rst\n@@ -47,45 +47,40 @@ setup argument and the per-port ``new_event_threshold`` argument apply as\n defined in the eventdev header file. The limit is applied to all enqueues,\n regardless of whether it will consume a directed or load-balanced credit.\n \n-Load-balanced and Directed Ports\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-DLB2 ports come in two flavors: load-balanced and directed. The eventdev API\n-does not have the same concept, but it has a similar one: ports and queues that\n-are singly-linked (i.e. linked to a single queue or port, respectively).\n-\n-The ``rte_event_dev_info_get()`` function reports the number of available\n-event ports and queues (among other things). For the DLB2 PMD, max_event_ports\n-and max_event_queues report the number of available load-balanced ports and\n-queues, and max_single_link_event_port_queue_pairs reports the number of\n-available directed ports and queues.\n-\n-When a scheduling domain is created in ``rte_event_dev_configure()``, the user\n-specifies ``nb_event_ports`` and ``nb_single_link_event_port_queues``, which\n-control the total number of ports (load-balanced and directed) and the number\n-of directed ports. Hence, the number of requested load-balanced ports is\n-``nb_event_ports - nb_single_link_event_ports``. The ``nb_event_queues`` field\n-specifies the total number of queues (load-balanced and directed). The number\n-of directed queues comes from ``nb_single_link_event_port_queues``, since\n-directed ports and queues come in pairs.\n-\n-When a port is setup, the ``RTE_EVENT_PORT_CFG_SINGLE_LINK`` flag determines\n-whether it should be configured as a directed (the flag is set) or a\n-load-balanced (the flag is unset) port. Similarly, the\n-``RTE_EVENT_QUEUE_CFG_SINGLE_LINK`` queue configuration flag controls\n-whether it is a directed or load-balanced queue.\n-\n-Load-balanced ports can only be linked to load-balanced queues, and directed\n-ports can only be linked to directed queues. Furthermore, directed ports can\n-only be linked to a single directed queue (and vice versa), and that link\n-cannot change after the eventdev is started.\n-\n-The eventdev API does not have a directed scheduling type. To support directed\n-traffic, the dlb PMD detects when an event is being sent to a directed queue\n-and overrides its scheduling type. Note that the originally selected scheduling\n-type (atomic, ordered, or parallel) is not preserved, and an event's sched_type\n-will be set to ``RTE_SCHED_TYPE_ATOMIC`` when it is dequeued from a directed\n-port.\n+Load-Balanced Queues\n+~~~~~~~~~~~~~~~~~~~\n+\n+A load-balanced queue can support atomic and ordered scheduling, or atomic and\n+unordered scheduling, but not atomic and unordered and ordered scheduling. A\n+queue's scheduling types are controlled by the event queue configuration.\n+\n+If the user sets the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag, the\n+``nb_atomic_order_sequences`` determines the supported scheduling types.\n+With non-zero ``nb_atomic_order_sequences``, the queue is configured for atomic\n+and ordered scheduling. In this case, ``RTE_SCHED_TYPE_PARALLEL`` scheduling is\n+supported by scheduling those events as ordered events.  Note that when the\n+event is dequeued, its sched_type will be ``RTE_SCHED_TYPE_ORDERED``. Else if\n+``nb_atomic_order_sequences`` is zero, the queue is configured for atomic and\n+unordered scheduling. In this case, ``RTE_SCHED_TYPE_ORDERED`` is unsupported.\n+\n+If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type\n+dictates the queue's scheduling type.\n+\n+The ``nb_atomic_order_sequences`` queue configuration field sets the ordered\n+queue's reorder buffer size.  DLB2 has 4 groups of ordered queues, where each\n+group is configured to contain either 1 queue with 1024 reorder entries, 2\n+queues with 512 reorder entries, and so on down to 32 queues with 32 entries.\n+\n+When a load-balanced queue is created, the PMD will configure a new sequence\n+number group on-demand if num_sequence_numbers does not match a pre-existing\n+group with available reorder buffer entries. If all sequence number groups are\n+in use, no new group will be created and queue configuration will fail. (Note\n+that when the PMD is used with a virtual DLB2 device, it cannot change the\n+sequence number configuration.)\n+\n+The queue's ``nb_atomic_flows`` parameter is ignored by the DLB2 PMD, because\n+the DLB2 does not limit the number of flows a queue can track. In the DLB2, all\n+load-balanced queues can use the full 16-bit flow ID range.\n \n Flow ID\n ~~~~~~~\ndiff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c\nindex ef1c000..8c1e06d 100644\n--- a/drivers/event/dlb2/dlb2.c\n+++ b/drivers/event/dlb2/dlb2.c\n@@ -688,6 +688,317 @@ dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,\n \tqueue_conf->priority = 0;\n }\n \n+static int32_t\n+dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_get_sn_allocation_args cfg;\n+\tint ret;\n+\n+\tcfg.group = group;\n+\n+\tret = dlb2_iface_get_sn_allocation(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: get_sn_allocation ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn cfg.response.id;\n+}\n+\n+static int\n+dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_set_sn_allocation_args cfg;\n+\tint ret;\n+\n+\tcfg.num = num;\n+\tcfg.group = group;\n+\n+\tret = dlb2_iface_set_sn_allocation(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: set_sn_allocation ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int32_t\n+dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_get_sn_occupancy_args cfg;\n+\tint ret;\n+\n+\tcfg.group = group;\n+\n+\tret = dlb2_iface_get_sn_occupancy(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: get_sn_occupancy ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn ret;\n+\t}\n+\n+\treturn cfg.response.id;\n+}\n+\n+/* Query the current sequence number allocations and, if they conflict with the\n+ * requested LDB queue configuration, attempt to re-allocate sequence numbers.\n+ * This is best-effort; if it fails, the PMD will attempt to configure the\n+ * load-balanced queue and return an error.\n+ */\n+static void\n+dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,\n+\t\t\t   const struct rte_event_queue_conf *queue_conf)\n+{\n+\tint grp_occupancy[DLB2_NUM_SN_GROUPS];\n+\tint grp_alloc[DLB2_NUM_SN_GROUPS];\n+\tint i, sequence_numbers;\n+\n+\tsequence_numbers = (int)queue_conf->nb_atomic_order_sequences;\n+\n+\tfor (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {\n+\t\tint total_slots;\n+\n+\t\tgrp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);\n+\t\tif (grp_alloc[i] < 0)\n+\t\t\treturn;\n+\n+\t\ttotal_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];\n+\n+\t\tgrp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);\n+\t\tif (grp_occupancy[i] < 0)\n+\t\t\treturn;\n+\n+\t\t/* DLB has at least one available slot for the requested\n+\t\t * sequence numbers, so no further configuration required.\n+\t\t */\n+\t\tif (grp_alloc[i] == sequence_numbers &&\n+\t\t    grp_occupancy[i] < total_slots)\n+\t\t\treturn;\n+\t}\n+\n+\t/* None of the sequence number groups are configured for the requested\n+\t * sequence numbers, so we have to reconfigure one of them. This is\n+\t * only possible if a group is not in use.\n+\t */\n+\tfor (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {\n+\t\tif (grp_occupancy[i] == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == DLB2_NUM_SN_GROUPS) {\n+\t\tprintf(\"[%s()] No groups with %d sequence_numbers are available or have free slots\\n\",\n+\t\t       __func__, sequence_numbers);\n+\t\treturn;\n+\t}\n+\n+\t/* Attempt to configure slot i with the requested number of sequence\n+\t * numbers. Ignore the return value -- if this fails, the error will be\n+\t * caught during subsequent queue configuration.\n+\t */\n+\tdlb2_set_sn_allocation(dlb2, i, sequence_numbers);\n+}\n+\n+static int32_t\n+dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,\n+\t\t\t struct dlb2_eventdev_queue *ev_queue,\n+\t\t\t const struct rte_event_queue_conf *evq_conf)\n+{\n+\tstruct dlb2_hw_dev *handle = &dlb2->qm_instance;\n+\tstruct dlb2_queue *queue = &ev_queue->qm_queue;\n+\tstruct dlb2_create_ldb_queue_args cfg;\n+\tint32_t ret;\n+\tuint32_t qm_qid;\n+\tint sched_type = -1;\n+\n+\tif (evq_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {\n+\t\tif (evq_conf->nb_atomic_order_sequences != 0)\n+\t\t\tsched_type = RTE_SCHED_TYPE_ORDERED;\n+\t\telse\n+\t\t\tsched_type = RTE_SCHED_TYPE_PARALLEL;\n+\t} else\n+\t\tsched_type = evq_conf->schedule_type;\n+\n+\tcfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;\n+\tcfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;\n+\tcfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;\n+\n+\tif (sched_type != RTE_SCHED_TYPE_ORDERED) {\n+\t\tcfg.num_sequence_numbers = 0;\n+\t\tcfg.num_qid_inflights = 2048;\n+\t}\n+\n+\t/* App should set this to the number of hardware flows they want, not\n+\t * the overall number of flows they're going to use. E.g. if app is\n+\t * using 64 flows and sets compression to 64, best-case they'll get\n+\t * 64 unique hashed flows in hardware.\n+\t */\n+\tswitch (evq_conf->nb_atomic_flows) {\n+\t/* Valid DLB2 compression levels */\n+\tcase 64:\n+\tcase 128:\n+\tcase 256:\n+\tcase 512:\n+\tcase (1 * 1024): /* 1K */\n+\tcase (2 * 1024): /* 2K */\n+\tcase (4 * 1024): /* 4K */\n+\tcase (64 * 1024): /* 64K */\n+\t\tcfg.lock_id_comp_level = evq_conf->nb_atomic_flows;\n+\t\tbreak;\n+\tdefault:\n+\t\t/* Invalid compression level */\n+\t\tcfg.lock_id_comp_level = 0; /* no compression */\n+\t}\n+\n+\tif (ev_queue->depth_threshold == 0) {\n+\t\tcfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;\n+\t\tev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;\n+\t} else\n+\t\tcfg.depth_threshold = ev_queue->depth_threshold;\n+\n+\tret = dlb2_iface_ldb_queue_create(handle, &cfg);\n+\tif (ret < 0) {\n+\t\tDLB2_LOG_ERR(\"dlb2: create LB event queue error, ret=%d (driver status: %s)\\n\",\n+\t\t\t     ret, dlb2_error_strings[cfg.response.status]);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tqm_qid = cfg.response.id;\n+\n+\t/* Save off queue config for debug, resource lookups, and reconfig */\n+\tqueue->num_qid_inflights = cfg.num_qid_inflights;\n+\tqueue->num_atm_inflights = cfg.num_atomic_inflights;\n+\n+\tqueue->sched_type = sched_type;\n+\tqueue->config_state = DLB2_CONFIGURED;\n+\n+\tDLB2_LOG_DBG(\"Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\\n\",\n+\t\t     qm_qid,\n+\t\t     cfg.num_atomic_inflights,\n+\t\t     cfg.num_sequence_numbers,\n+\t\t     cfg.num_qid_inflights);\n+\n+\treturn qm_qid;\n+}\n+\n+static int\n+dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,\n+\t\t\t      struct dlb2_eventdev_queue *ev_queue,\n+\t\t\t      const struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);\n+\tint32_t qm_qid;\n+\n+\tif (queue_conf->nb_atomic_order_sequences)\n+\t\tdlb2_program_sn_allocation(dlb2, queue_conf);\n+\n+\tqm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);\n+\tif (qm_qid < 0) {\n+\t\tDLB2_LOG_ERR(\"Failed to create the load-balanced queue\\n\");\n+\n+\t\treturn qm_qid;\n+\t}\n+\n+\tdlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;\n+\n+\tev_queue->qm_queue.id = qm_qid;\n+\n+\treturn 0;\n+}\n+\n+static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)\n+{\n+\tint i, num = 0;\n+\n+\tfor (i = 0; i < dlb2->num_queues; i++) {\n+\t\tif (dlb2->ev_queues[i].setup_done &&\n+\t\t    dlb2->ev_queues[i].qm_queue.is_directed)\n+\t\t\tnum++;\n+\t}\n+\n+\treturn num;\n+}\n+\n+static void\n+dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,\n+\t\t\t struct dlb2_eventdev_queue *ev_queue)\n+{\n+\tstruct dlb2_eventdev_port *ev_port;\n+\tint i, j;\n+\n+\tfor (i = 0; i < dlb2->num_ports; i++) {\n+\t\tev_port = &dlb2->ev_ports[i];\n+\n+\t\tfor (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {\n+\t\t\tif (!ev_port->link[j].valid ||\n+\t\t\t    ev_port->link[j].queue_id != ev_queue->id)\n+\t\t\t\tcontinue;\n+\n+\t\t\tev_port->link[j].valid = false;\n+\t\t\tev_port->num_links--;\n+\t\t}\n+\t}\n+\n+\tev_queue->num_links = 0;\n+}\n+\n+static int\n+dlb2_eventdev_queue_setup(struct rte_eventdev *dev,\n+\t\t\t  uint8_t ev_qid,\n+\t\t\t  const struct rte_event_queue_conf *queue_conf)\n+{\n+\tstruct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);\n+\tstruct dlb2_eventdev_queue *ev_queue;\n+\tint ret;\n+\n+\tif (queue_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (ev_qid >= dlb2->num_queues)\n+\t\treturn -EINVAL;\n+\n+\tev_queue = &dlb2->ev_queues[ev_qid];\n+\n+\tev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &\n+\t\tRTE_EVENT_QUEUE_CFG_SINGLE_LINK;\n+\tev_queue->id = ev_qid;\n+\tev_queue->conf = *queue_conf;\n+\n+\tif (!ev_queue->qm_queue.is_directed) {\n+\t\tret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);\n+\t} else {\n+\t\t/* The directed queue isn't setup until link time, at which\n+\t\t * point we know its directed port ID. Directed queue setup\n+\t\t * will only fail if this queue is already setup or there are\n+\t\t * no directed queues left to configure.\n+\t\t */\n+\t\tret = 0;\n+\n+\t\tev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;\n+\n+\t\tif (ev_queue->setup_done ||\n+\t\t    dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)\n+\t\t\tret = -EINVAL;\n+\t}\n+\n+\t/* Tear down pre-existing port->queue links */\n+\tif (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)\n+\t\tdlb2_queue_link_teardown(dlb2, ev_queue);\n+\n+\tif (!ret)\n+\t\tev_queue->setup_done = true;\n+\n+\treturn ret;\n+}\n+\n static void\n dlb2_entry_points_init(struct rte_eventdev *dev)\n {\n@@ -696,6 +1007,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)\n \t\t.dev_infos_get    = dlb2_eventdev_info_get,\n \t\t.dev_configure    = dlb2_eventdev_configure,\n \t\t.queue_def_conf   = dlb2_eventdev_queue_default_conf_get,\n+\t\t.queue_setup      = dlb2_eventdev_queue_setup,\n \t\t.port_def_conf    = dlb2_eventdev_port_default_conf_get,\n \t\t.dump             = dlb2_eventdev_dump,\n \t\t.xstats_get       = dlb2_eventdev_xstats_get,\ndiff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c\nindex a829b9b..4c07574 100644\n--- a/drivers/event/dlb2/dlb2_iface.c\n+++ b/drivers/event/dlb2/dlb2_iface.c\n@@ -31,3 +31,15 @@ int (*dlb2_iface_sched_domain_create)(struct dlb2_hw_dev *handle,\n \t\t\t\tstruct dlb2_create_sched_domain_args *args);\n \n void (*dlb2_iface_domain_reset)(struct dlb2_eventdev *dlb2);\n+\n+int (*dlb2_iface_ldb_queue_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t   struct dlb2_create_ldb_queue_args *cfg);\n+\n+int (*dlb2_iface_get_sn_allocation)(struct dlb2_hw_dev *handle,\n+\t\t\t\t    struct dlb2_get_sn_allocation_args *args);\n+\n+int (*dlb2_iface_set_sn_allocation)(struct dlb2_hw_dev *handle,\n+\t\t\t\t    struct dlb2_set_sn_allocation_args *args);\n+\n+int (*dlb2_iface_get_sn_occupancy)(struct dlb2_hw_dev *handle,\n+\t\t\t\t   struct dlb2_get_sn_occupancy_args *args);\ndiff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h\nindex 6663dab..4c88fe0 100644\n--- a/drivers/event/dlb2/dlb2_iface.h\n+++ b/drivers/event/dlb2/dlb2_iface.h\n@@ -30,4 +30,16 @@ extern int (*dlb2_iface_sched_domain_create)(struct dlb2_hw_dev *handle,\n \t\t\t\t struct dlb2_create_sched_domain_args *args);\n \n extern void (*dlb2_iface_domain_reset)(struct dlb2_eventdev *dlb2);\n+\n+extern int (*dlb2_iface_ldb_queue_create)(struct dlb2_hw_dev *handle,\n+\t\t\t\t  struct dlb2_create_ldb_queue_args *cfg);\n+\n+extern int (*dlb2_iface_get_sn_allocation)(struct dlb2_hw_dev *handle,\n+\t\t\t\t   struct dlb2_get_sn_allocation_args *args);\n+\n+extern int (*dlb2_iface_set_sn_allocation)(struct dlb2_hw_dev *handle,\n+\t\t\t\t   struct dlb2_set_sn_allocation_args *args);\n+\n+extern int (*dlb2_iface_get_sn_occupancy)(struct dlb2_hw_dev *handle,\n+\t\t\t\t  struct dlb2_get_sn_occupancy_args *args);\n #endif /* _DLB2_IFACE_H_ */\ndiff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c\nindex 63a68a6..e6ea0d7 100644\n--- a/drivers/event/dlb2/pf/base/dlb2_resource.c\n+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c\n@@ -3509,3 +3509,467 @@ unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)\n \n \treturn num;\n }\n+\n+\n+static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,\n+\t\t\t\t     struct dlb2_hw_domain *domain,\n+\t\t\t\t     struct dlb2_ldb_queue *queue,\n+\t\t\t\t     struct dlb2_create_ldb_queue_args *args,\n+\t\t\t\t     bool vdev_req,\n+\t\t\t\t     unsigned int vdev_id)\n+{\n+\tunion dlb2_sys_vf_ldb_vqid_v r0 = { {0} };\n+\tunion dlb2_sys_vf_ldb_vqid2qid r1 = { {0} };\n+\tunion dlb2_sys_ldb_qid2vqid r2 = { {0} };\n+\tunion dlb2_sys_ldb_vasqid_v r3 = { {0} };\n+\tunion dlb2_lsp_qid_ldb_infl_lim r4 = { {0} };\n+\tunion dlb2_lsp_qid_aqed_active_lim r5 = { {0} };\n+\tunion dlb2_aqed_pipe_qid_hid_width r6 = { {0} };\n+\tunion dlb2_sys_ldb_qid_its r7 = { {0} };\n+\tunion dlb2_lsp_qid_atm_depth_thrsh r8 = { {0} };\n+\tunion dlb2_lsp_qid_naldb_depth_thrsh r9 = { {0} };\n+\tunion dlb2_aqed_pipe_qid_fid_lim r10 = { {0} };\n+\tunion dlb2_chp_ord_qid_sn_map r11 = { {0} };\n+\tunion dlb2_sys_ldb_qid_cfg_v r12 = { {0} };\n+\tunion dlb2_sys_ldb_qid_v r13 = { {0} };\n+\n+\tstruct dlb2_sn_group *sn_group;\n+\tunsigned int offs;\n+\n+\t/* QID write permissions are turned on when the domain is started */\n+\tr3.field.vasqid_v = 0;\n+\n+\toffs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +\n+\t\tqueue->id.phys_id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r3.val);\n+\n+\t/*\n+\t * Unordered QIDs get 4K inflights, ordered get as many as the number\n+\t * of sequence numbers.\n+\t */\n+\tr4.field.limit = args->num_qid_inflights;\n+\n+\tDLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r4.val);\n+\n+\tr5.field.limit = queue->aqed_limit;\n+\n+\tif (r5.field.limit > DLB2_MAX_NUM_AQED_ENTRIES)\n+\t\tr5.field.limit = DLB2_MAX_NUM_AQED_ENTRIES;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id),\n+\t\t    r5.val);\n+\n+\tswitch (args->lock_id_comp_level) {\n+\tcase 64:\n+\t\tr6.field.compress_code = 1;\n+\t\tbreak;\n+\tcase 128:\n+\t\tr6.field.compress_code = 2;\n+\t\tbreak;\n+\tcase 256:\n+\t\tr6.field.compress_code = 3;\n+\t\tbreak;\n+\tcase 512:\n+\t\tr6.field.compress_code = 4;\n+\t\tbreak;\n+\tcase 1024:\n+\t\tr6.field.compress_code = 5;\n+\t\tbreak;\n+\tcase 2048:\n+\t\tr6.field.compress_code = 6;\n+\t\tbreak;\n+\tcase 4096:\n+\t\tr6.field.compress_code = 7;\n+\t\tbreak;\n+\tcase 0:\n+\tcase 65536:\n+\t\tr6.field.compress_code = 0;\n+\t}\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_AQED_PIPE_QID_HID_WIDTH(queue->id.phys_id),\n+\t\t    r6.val);\n+\n+\t/* Don't timestamp QEs that pass through this queue */\n+\tr7.field.qid_its = 0;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_SYS_LDB_QID_ITS(queue->id.phys_id),\n+\t\t    r7.val);\n+\n+\tr8.field.thresh = args->depth_threshold;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID_ATM_DEPTH_THRSH(queue->id.phys_id),\n+\t\t    r8.val);\n+\n+\tr9.field.thresh = args->depth_threshold;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue->id.phys_id),\n+\t\t    r9.val);\n+\n+\t/*\n+\t * This register limits the number of inflight flows a queue can have\n+\t * at one time.  It has an upper bound of 2048, but can be\n+\t * over-subscribed. 512 is chosen so that a single queue doesn't use\n+\t * the entire atomic storage, but can use a substantial portion if\n+\t * needed.\n+\t */\n+\tr10.field.qid_fid_limit = 512;\n+\n+\tDLB2_CSR_WR(hw,\n+\t\t    DLB2_AQED_PIPE_QID_FID_LIM(queue->id.phys_id),\n+\t\t    r10.val);\n+\n+\t/* Configure SNs */\n+\tsn_group = &hw->rsrcs.sn_groups[queue->sn_group];\n+\tr11.field.mode = sn_group->mode;\n+\tr11.field.slot = queue->sn_slot;\n+\tr11.field.grp  = sn_group->id;\n+\n+\tDLB2_CSR_WR(hw, DLB2_CHP_ORD_QID_SN_MAP(queue->id.phys_id), r11.val);\n+\n+\tr12.field.sn_cfg_v = (args->num_sequence_numbers != 0);\n+\tr12.field.fid_cfg_v = (args->num_atomic_inflights != 0);\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), r12.val);\n+\n+\tif (vdev_req) {\n+\t\toffs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;\n+\n+\t\tr0.field.vqid_v = 1;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), r0.val);\n+\n+\t\tr1.field.qid = queue->id.phys_id;\n+\n+\t\tDLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), r1.val);\n+\n+\t\tr2.field.vqid = queue->id.virt_id;\n+\n+\t\tDLB2_CSR_WR(hw,\n+\t\t\t    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),\n+\t\t\t    r2.val);\n+\t}\n+\n+\tr13.field.qid_v = 1;\n+\n+\tDLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), r13.val);\n+}\n+\n+static int\n+dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,\n+\t\t\t\t  struct dlb2_ldb_queue *queue,\n+\t\t\t\t  struct dlb2_create_ldb_queue_args *args)\n+{\n+\tint slot = -1;\n+\tint i;\n+\n+\tqueue->sn_cfg_valid = false;\n+\n+\tif (args->num_sequence_numbers == 0)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {\n+\t\tstruct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];\n+\n+\t\tif (group->sequence_numbers_per_queue ==\n+\t\t    args->num_sequence_numbers &&\n+\t\t    !dlb2_sn_group_full(group)) {\n+\t\t\tslot = dlb2_sn_group_alloc_slot(group);\n+\t\t\tif (slot >= 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (slot == -1) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: no sequence number slots available\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tqueue->sn_cfg_valid = true;\n+\tqueue->sn_group = i;\n+\tqueue->sn_slot = slot;\n+\treturn 0;\n+}\n+\n+static int\n+dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,\n+\t\t\t\tstruct dlb2_hw_domain *domain,\n+\t\t\t\tstruct dlb2_ldb_queue *queue,\n+\t\t\t\tstruct dlb2_create_ldb_queue_args *args)\n+{\n+\tint ret;\n+\n+\tret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Attach QID inflights */\n+\tqueue->num_qid_inflights = args->num_qid_inflights;\n+\n+\t/* Attach atomic inflights */\n+\tqueue->aqed_limit = args->num_atomic_inflights;\n+\n+\tdomain->num_avail_aqed_entries -= args->num_atomic_inflights;\n+\tdomain->num_used_aqed_entries += args->num_atomic_inflights;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,\n+\t\t\t\t  u32 domain_id,\n+\t\t\t\t  struct dlb2_create_ldb_queue_args *args,\n+\t\t\t\t  struct dlb2_cmd_response *resp,\n+\t\t\t\t  bool vdev_req,\n+\t\t\t\t  unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tint i;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\n+\tif (domain == NULL) {\n+\t\tresp->status = DLB2_ST_INVALID_DOMAIN_ID;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!domain->configured) {\n+\t\tresp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (domain->started) {\n+\t\tresp->status = DLB2_ST_DOMAIN_STARTED;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dlb2_list_empty(&domain->avail_ldb_queues)) {\n+\t\tresp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->num_sequence_numbers) {\n+\t\tfor (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {\n+\t\t\tstruct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];\n+\n+\t\t\tif (group->sequence_numbers_per_queue ==\n+\t\t\t    args->num_sequence_numbers &&\n+\t\t\t    !dlb2_sn_group_full(group))\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {\n+\t\t\tresp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (args->num_qid_inflights > 4096) {\n+\t\tresp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Inflights must be <= number of sequence numbers if ordered */\n+\tif (args->num_sequence_numbers != 0 &&\n+\t    args->num_qid_inflights > args->num_sequence_numbers) {\n+\t\tresp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (domain->num_avail_aqed_entries < args->num_atomic_inflights) {\n+\t\tresp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (args->num_atomic_inflights &&\n+\t    args->lock_id_comp_level != 0 &&\n+\t    args->lock_id_comp_level != 64 &&\n+\t    args->lock_id_comp_level != 128 &&\n+\t    args->lock_id_comp_level != 256 &&\n+\t    args->lock_id_comp_level != 512 &&\n+\t    args->lock_id_comp_level != 1024 &&\n+\t    args->lock_id_comp_level != 2048 &&\n+\t    args->lock_id_comp_level != 4096 &&\n+\t    args->lock_id_comp_level != 65536) {\n+\t\tresp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,\n+\t\t\t       u32 domain_id,\n+\t\t\t       struct dlb2_create_ldb_queue_args *args,\n+\t\t\t       bool vdev_req,\n+\t\t\t       unsigned int vdev_id)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB2 create load-balanced queue arguments:\\n\");\n+\tif (vdev_req)\n+\t\tDLB2_HW_DBG(hw, \"(Request from vdev %d)\\n\", vdev_id);\n+\tDLB2_HW_DBG(hw, \"\\tDomain ID:                  %d\\n\",\n+\t\t    domain_id);\n+\tDLB2_HW_DBG(hw, \"\\tNumber of sequence numbers: %d\\n\",\n+\t\t    args->num_sequence_numbers);\n+\tDLB2_HW_DBG(hw, \"\\tNumber of QID inflights:    %d\\n\",\n+\t\t    args->num_qid_inflights);\n+\tDLB2_HW_DBG(hw, \"\\tNumber of ATM inflights:    %d\\n\",\n+\t\t    args->num_atomic_inflights);\n+}\n+\n+/**\n+ * dlb2_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.\n+ * @hw:\tContains the current state of the DLB2 hardware.\n+ * @domain_id: Domain ID\n+ * @args: User-provided arguments.\n+ * @resp: Response to user.\n+ * @vdev_req: Request came from a virtual device.\n+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.\n+ *\n+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to\n+ * satisfy a request, resp->status will be set accordingly.\n+ */\n+int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,\n+\t\t\t     u32 domain_id,\n+\t\t\t     struct dlb2_create_ldb_queue_args *args,\n+\t\t\t     struct dlb2_cmd_response *resp,\n+\t\t\t     bool vdev_req,\n+\t\t\t     unsigned int vdev_id)\n+{\n+\tstruct dlb2_hw_domain *domain;\n+\tstruct dlb2_ldb_queue *queue;\n+\tint ret;\n+\n+\tdlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);\n+\n+\t/*\n+\t * Verify that hardware resources are available before attempting to\n+\t * satisfy the request. This simplifies the error unwinding code.\n+\t */\n+\tret = dlb2_verify_create_ldb_queue_args(hw,\n+\t\t\t\t\t\tdomain_id,\n+\t\t\t\t\t\targs,\n+\t\t\t\t\t\tresp,\n+\t\t\t\t\t\tvdev_req,\n+\t\t\t\t\t\tvdev_id);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdomain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);\n+\tif (domain == NULL) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: domain not found\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tqueue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));\n+\tif (queue == NULL) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: no available ldb queues\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);\n+\tif (ret < 0) {\n+\t\tDLB2_HW_ERR(hw,\n+\t\t\t    \"[%s():%d] Internal error: failed to attach the ldb queue resources\\n\",\n+\t\t\t    __func__, __LINE__);\n+\t\treturn ret;\n+\t}\n+\n+\tdlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);\n+\n+\tqueue->num_mappings = 0;\n+\n+\tqueue->configured = true;\n+\n+\t/*\n+\t * Configuration succeeded, so move the resource from the 'avail' to\n+\t * the 'used' list.\n+\t */\n+\tdlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);\n+\n+\tdlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);\n+\n+\tresp->status = 0;\n+\tresp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;\n+\n+\treturn 0;\n+}\n+\n+int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)\n+{\n+\tif (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)\n+\t\treturn -EINVAL;\n+\n+\treturn hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;\n+}\n+\n+int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,\n+\t\t\t\t\t     unsigned int group_id)\n+{\n+\tif (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)\n+\t\treturn -EINVAL;\n+\n+\treturn dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);\n+}\n+\n+static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,\n+\t\t\t\t\t\tunsigned int group_id,\n+\t\t\t\t\t\tunsigned long val)\n+{\n+\tDLB2_HW_DBG(hw, \"DLB2 set group sequence numbers:\\n\");\n+\tDLB2_HW_DBG(hw, \"\\tGroup ID: %u\\n\", group_id);\n+\tDLB2_HW_DBG(hw, \"\\tValue:    %lu\\n\", val);\n+}\n+\n+int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,\n+\t\t\t\t    unsigned int group_id,\n+\t\t\t\t    unsigned long val)\n+{\n+\tu32 valid_allocations[] = {64, 128, 256, 512, 1024};\n+\tunion dlb2_ro_pipe_grp_sn_mode r0 = { {0} };\n+\tstruct dlb2_sn_group *group;\n+\tint mode;\n+\n+\tif (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)\n+\t\treturn -EINVAL;\n+\n+\tgroup = &hw->rsrcs.sn_groups[group_id];\n+\n+\t/*\n+\t * Once the first load-balanced queue using an SN group is configured,\n+\t * the group cannot be changed.\n+\t */\n+\tif (group->slot_use_bitmap != 0)\n+\t\treturn -EPERM;\n+\n+\tfor (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)\n+\t\tif (val == valid_allocations[mode])\n+\t\t\tbreak;\n+\n+\tif (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)\n+\t\treturn -EINVAL;\n+\n+\tgroup->mode = mode;\n+\tgroup->sequence_numbers_per_queue = val;\n+\n+\tr0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;\n+\tr0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;\n+\n+\tDLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);\n+\n+\tdlb2_log_set_group_sequence_numbers(hw, group_id, val);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c\nindex 285ad2c..14bb04b 100644\n--- a/drivers/event/dlb2/pf/dlb2_main.c\n+++ b/drivers/event/dlb2/pf/dlb2_main.c\n@@ -613,3 +613,13 @@ dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)\n {\n \treturn dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);\n }\n+\n+int\n+dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,\n+\t\t\t u32 id,\n+\t\t\t struct dlb2_create_ldb_queue_args *args,\n+\t\t\t struct dlb2_cmd_response *resp)\n+{\n+\treturn dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,\n+\t\t\t\t\tPF_ID_ZERO);\n+}\ndiff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c\nindex 9310f72..a6cd178 100644\n--- a/drivers/event/dlb2/pf/dlb2_pf.c\n+++ b/drivers/event/dlb2/pf/dlb2_pf.c\n@@ -150,6 +150,84 @@ dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)\n \t\tDLB2_LOG_ERR(\"dlb2_pf_reset_domain err %d\", ret);\n }\n \n+static int\n+dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,\n+\t\t\t struct dlb2_create_ldb_queue_args *cfg)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,\n+\t\t\t\t       handle->domain_id,\n+\t\t\t\t       cfg,\n+\t\t\t\t       &response);\n+\n+\tcfg->response = response;\n+\n+\tDLB2_INFO(dev->dlb2_device, \"Exiting %s() with ret=%d\\n\",\n+\t\t  __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,\n+\t\t\t struct dlb2_get_sn_occupancy_args *args)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,\n+\t\t\t\t\t\t       args->group);\n+\n+\tresponse.id = ret;\n+\tresponse.status = 0;\n+\n+\targs->response = response;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,\n+\t\t\t  struct dlb2_get_sn_allocation_args *args)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);\n+\n+\tresponse.id = ret;\n+\tresponse.status = 0;\n+\n+\targs->response = response;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,\n+\t\t\t  struct dlb2_set_sn_allocation_args *args)\n+{\n+\tstruct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;\n+\tstruct dlb2_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,\n+\t\t\t\t\t      args->num);\n+\n+\tresponse.status = 0;\n+\n+\targs->response = response;\n+\n+\treturn ret;\n+}\n+\n static void\n dlb2_pf_iface_fn_ptrs_init(void)\n {\n@@ -161,6 +239,10 @@ dlb2_pf_iface_fn_ptrs_init(void)\n \tdlb2_iface_get_num_resources = dlb2_pf_get_num_resources;\n \tdlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;\n \tdlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;\n+\tdlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;\n+\tdlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;\n+\tdlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;\n+\tdlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;\n }\n \n /* PCI DEV HOOKS */\n",
    "prefixes": [
        "v3",
        "12/23"
    ]
}