get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/71461/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 71461,
    "url": "https://patches.dpdk.org/api/patches/71461/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200612212434.6852-6-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200612212434.6852-6-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200612212434.6852-6-timothy.mcdaniel@intel.com",
    "date": "2020-06-12T21:24:12",
    "name": "[05/27] event/dlb: add DLB documentation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a4da26878934f52fa743ced8bbcf40456163ef60",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200612212434.6852-6-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 10445,
            "url": "https://patches.dpdk.org/api/series/10445/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10445",
            "date": "2020-06-12T21:24:07",
            "name": "V1 event/dlb add Intel DLB PMD",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/10445/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/71461/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/71461/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9894BA00BE;\n\tFri, 12 Jun 2020 23:27:33 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E3AE11BFBB;\n\tFri, 12 Jun 2020 23:26:30 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by dpdk.org (Postfix) with ESMTP id D99991BF81\n for <dev@dpdk.org>; Fri, 12 Jun 2020 23:26:24 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 12 Jun 2020 14:26:22 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga003.jf.intel.com with ESMTP; 12 Jun 2020 14:26:22 -0700"
        ],
        "IronPort-SDR": [
            "\n O8nA2gLtSR16oVpX4QMwPoLfHVQhL48NbM54oGM/5plcoHgWJorj+u8NhkGkWjdYaw3GPYAVJY\n X7tyIbKRzzhQ==",
            "\n LeDKunlTjcR+q1CP53QAxEHE2aRSIaSPKz6aFA9wGGCJyGlPeQo4jdmS5/AAOWC7lFlbJELrmo\n M7F3At6M3v0Q=="
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.73,504,1583222400\"; d=\"scan'208\";a=\"272035782\"",
        "From": "\"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "To": "jerinj@marvell.com",
        "Cc": "dev@dpdk.org,\n\tgage.eads@intel.com,\n\tharry.van.haaren@intel.com",
        "Date": "Fri, 12 Jun 2020 16:24:12 -0500",
        "Message-Id": "<20200612212434.6852-6-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20200612212434.6852-1-timothy.mcdaniel@intel.com>",
        "References": "<20200612212434.6852-1-timothy.mcdaniel@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Change-Id: I269bfe9fb4ac53a9f81d33718f3a808fd8216c74\nSigned-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>\n---\n doc/guides/eventdevs/dlb.rst | 497 +++++++++++++++++++++++++++++++++++++++++++\n 1 file changed, 497 insertions(+)\n create mode 100644 doc/guides/eventdevs/dlb.rst",
    "diff": "diff --git a/doc/guides/eventdevs/dlb.rst b/doc/guides/eventdevs/dlb.rst\nnew file mode 100644\nindex 000000000..21e48fea0\n--- /dev/null\n+++ b/doc/guides/eventdevs/dlb.rst\n@@ -0,0 +1,497 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2020 Intel Corporation.\n+\n+Driver for the Intel® Dynamic Load Balancer (DLB)\n+==================================================\n+\n+The DPDK dlb poll mode driver supports the Intel® Dynamic Load Balancer.\n+\n+.. note::\n+\n+    This PMD is disabled by default in the build configuration files, owing to\n+    an external dependency on the `Netlink Protocol Library Suite\n+    <http://www.infradead.org/~tgr/libnl/>`_ (libnl-3 and libnl-genl-3) which\n+    must be installed on the board.  Once the Netlink libraries are installed,\n+    the PMD can be enabled by setting CONFIG_RTE_LIBRTE_PMD_DLB_QM=y and\n+    recompiling the DPDK.\n+\n+Prerequisites\n+-------------\n+\n+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup\n+  the basic DPDK environment.\n+\n+- Learn about the DLB device and its capabilities at `Intel Support\n+  <http://www.intel.com/support>`_. FIXME: Add real link when documentation\n+  becomes available.\n+\n+- The DLB kernel module. If it is not included in the machine's OS\n+  distribution, download it from <FIXME: Add 01.org link when available> and\n+  follow the build instructions.\n+\n+Configuration\n+-------------\n+\n+The DLB eventdev supports two modes of operation:\n+\n+* Bifurcated mode: the PMD is created as a vdev device and depends on the Linux\n+  DLB kernel driver for device access. The bifurcated PMD's configuration\n+  accesses are performed through the kernel driver, and (performance-critical)\n+  datapath functions execute entirely in user-space.\n+\n+  This mode supports both PF and VF devices, but is supported on Linux only.\n+\n+* PF PMD mode: the PF PMD is a user-space PMD that uses VFIO to gain direct\n+  device access. To use this operation mode, the PCIe PF device must be bound\n+  to a DPDK-compatible VFIO driver, such as vfio-pci. The PF PMD does not work\n+  with PCIe VFs, but is portable to all environments (Linux, FreeBSD, etc.)\n+  that DPDK supports. (Note: PF PMD testing has been limited to Linux at this\n+  time.)\n+\n+The vdev device can be created from the application code or from the EAL\n+command line like so:\n+\n+* Call ``rte_vdev_init(\"dlb1_event\")`` from the application.\n+\n+* Use ``--vdev=\"dlb1_event\"`` in the EAL options, which will call\n+  rte_vdev_init() internally.\n+\n+Example:\n+\n+.. code-block:: console\n+\n+    ./your_eventdev_application --vdev=\"dlb1_event\"\n+\n+Note: The dlb vdev can be instatiated with the name \"event_dlb\" as well.\n+\n+Eventdev API Notes\n+------------------\n+\n+The DLB provides the functions of a DPDK event device; specifically, it\n+supports atomic, ordered, and parallel scheduling events from queues to ports.\n+However, the DLB hardware is not a perfect match to the eventdev API. Some DLB\n+features are abstracted by the PMD (e.g. directed ports), some are only\n+accessible as vdev command-line parameters, and certain eventdev features are\n+not supported (e.g. the event flow ID is not maintained during scheduling).\n+\n+In general the dlb PMD is designed for ease-of-use and doesn't require a\n+detailed understanding of the hardware, but these details are important when\n+writing high-performance code. This section describes the places where the\n+eventdev API and DLB misalign.\n+\n+Wait (timeout_ticks) Parameter\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+The eventdev API rte_event_dequeue_burst(..) can wait for an event to\n+arrive. Three different forms of waiting are supported by the dlb PMD:\n+polling, blocking on a hardware interrupt, and waiting using umonitor/umwait.\n+Which form of wait to use can be specified using the hybrid timeout data\n+structure below.The application should use the appropriate bybrid timeout\n+struct below and cast it to uint32_t or uint64_t,  as appropriate.\n+\n+If RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT is set, then the timeout_ticks\n+parameter supplied to rte_event_dequeue_burst(..) is used to control if and how\n+to wait, and dequeue_timeout_ns is ignored.\n+\n+If RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT is not set, then dequeue_timeout_ns\n+supplied to the rte_event_dev_configure API is used to control if and how to\n+wait, and the timeout_ticks value is ignored.\n+\n+The application should use the appropriate bybrid timeout struct below and cast\n+it to uint32_t (for rte_event_dev_configure) or uint64_t (for\n+rte_event_dequeue_burst),  as appropriate.\n+\n+Hybrid timeout_ticks\n+^^^^^^^^^^^^^^^^^^^^\n+#. If poll_ticks is not 0 and neither interrupt_wait or umonitor_wait are set,\n+   then we will busy poll for up to poll_ticks time.\n+#. If the interrupt_wait bit is set and the CQ is empty, then enter kernel\n+   to wait for an interrupt after busy polling for poll_ticks time. There\n+   is no guarantee how much time we spend in the API when using interrupt_wait.\n+#. If umonitor_wait is set, then repeatedly issue a umwait instruction\n+   until the requested number of events have been dequeued,  or until\n+   poll_ticks has expired.\n+\n+Note: It is invalid to set both interrupt_wait and umonitor_wait.\n+\n+The hybrid timeout data structures are currently located in\n+drivers/event/dlb/dlb_timeout.h:\n+\n+.. code-block:: c\n+\n+        struct rte_hybrid_timeout_ticks_64 {\n+                RTE_STD_C11\n+                union {\n+                        uint64_t val64;\n+                        struct {\n+                                uint64_t poll_ticks:62;\n+                                uint64_t umonitor_wait:1;\n+                                uint64_t interrupt_wait:1;\n+                        };\n+                };\n+        };\n+        struct rte_hybrid_timeout_ns_32 {\n+                RTE_STD_C11\n+                union {\n+                        uint32_t val32;\n+                        struct {\n+                                uint32_t poll_ns:30;\n+                                uint32_t umonitor_wait:1;\n+                                uint32_t interrupt_wait:1;\n+                        };\n+                };\n+        };\n+\n+VAS Configuration\n+~~~~~~~~~~~~~~~~~\n+\n+A VAS is a scheduling domain, of which there are 32 in the DLB. (Producer\n+ports in one VAS cannot enqueue events to a different VAS, except through the\n+`Data Mover`_.) When a VAS is configured, it allocates load-balanced and\n+directed queues, ports, credits, and other hardware resources. Some VAS\n+resource allocations are user-controlled -- the number of queues, for example\n+-- and others, like credit pools (one directed and one load-balanced pool per\n+VAS), are not.\n+\n+The dlb PMD creates a single VAS per DLB device. Supporting multiple VASes\n+per DLB device is a planned feature, where each VAS will be represented as a\n+separate event device.\n+\n+The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device\n+setup argument and the per-port ``new_event_threshold`` argument apply as\n+defined in the eventdev header file. The limit is applied to all enqueues,\n+regardless of whether it will consume a directed or load-balanced credit.\n+\n+Load-balanced and Directed Ports\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+DLB ports come in two flavors: load-balanced and directed. The eventdev API\n+does not have the same concept, but it has a similar one: ports and queues that\n+are singly-linked (i.e. linked to a single queue or port, respectively).\n+\n+The ``rte_event_dev_info_get()`` function reports the number of available\n+event ports and queues (among other things). For the DLB PMD, max_event_ports\n+and max_event_queues report the number of available load-balanced ports and\n+queues, and max_single_link_event_port_queue_pairs reports the number of\n+available directed ports and queues.\n+\n+When a VAS is created in ``rte_event_dev_configure()``, the user specifies\n+``nb_event_ports`` and ``nb_single_link_event_port_queues``, which control the\n+total number of ports (load-balanced and directed) and the number of directed\n+ports. Hence, the number of requested load-balanced ports is ``nb_event_ports\n+- nb_single_link_event_ports``. The ``nb_event_queues`` field specifies the\n+total number of queues (load-balanced and directed). The number of directed\n+queues comes from ``nb_single_link_event_port_queues``, since directed ports\n+and queues come in pairs.\n+\n+When a port is setup, the ``RTE_EVENT_PORT_CFG_SINGLE_LINK`` flag determines\n+whether it should be configured as a directed (the flag is set) or a\n+load-balanced (the flag is unset) port. Similarly, the\n+``RTE_EVENT_QUEUE_CFG_SINGLE_LINK`` queue configuration flag controls\n+whether it is a directed or load-balanced queue.\n+\n+Load-balanced ports can only be linked to load-balanced queues, and directed\n+ports can only be linked to directed queues. Furthermore, directed ports can\n+only be linked to a single directed queue (and vice versa), and that link\n+cannot change after the eventdev is started.\n+\n+The eventdev API doesn't have a directed scheduling type. To support directed\n+traffic, the dlb PMD detects when an event is being sent to a directed queue\n+and overrides its scheduling type. Note that the originally selected scheduling\n+type (atomic, ordered, or parallel) is not preserved, and an event's sched_type\n+will be set to ``RTE_SCHED_TYPE_ATOMIC`` when it is dequeued from a directed\n+port.\n+\n+Flow ID\n+~~~~~~~\n+\n+The flow ID field is not preserved in the event when it is scheduled in the\n+DLB, because the DLB hardware control word format does not have sufficient\n+space to preserve every event field. As a result, the flow ID specified with\n+the enqueued event will not be in the dequeued event. If this field is\n+required, the application should pass it through an out-of-band path (for\n+example in the mbuf's udata64 field, if the event points to an mbuf) or\n+reconstruct the flow ID after receiving the event.\n+\n+Also, the DLB hardware control word supports a 16-bit flow ID. Since struct\n+rte_event's flow_id field is 20 bits, the DLB PMD drops the most significant\n+four bits from the event's flow ID.\n+\n+Hardware Credits\n+~~~~~~~~~~~~~~~~\n+\n+DLB uses a hardware credit scheme to prevent software from overflowing hardware\n+event storage, with each unit of storage represented by a credit. A port spends\n+a credit to enqueue an event, and hardware refills the ports with credits as the\n+events are scheduled to ports. Refills come from credit pools, and each port is\n+a member of a load-balanced credit pool and a directed credit pool. The\n+load-balanced credits are used to enqueue to load-balanced queues, and directed\n+credits are used for directed queues.\n+\n+An dlb eventdev contains one load-balanced and one directed credit pool. These\n+pools' sizes are controlled by the nb_events_limit field in struct\n+rte_event_dev_config. The load-balanced pool is sized to contain\n+nb_events_limit credits, and the directed pool is sized to contain\n+nb_events_limit/4 credits. The directed pool size can be overriden with the\n+num_dir_credits vdev argument, like so:\n+\n+    .. code-block:: console\n+\n+       --vdev=dlb1_event,num_dir_credits=<value>\n+\n+This can be used if the default allocation is too low or too high for the\n+specific application needs. The PMD also supports a vdev arg that limits the\n+max_num_events reported by rte_event_dev_info_get():\n+\n+    .. code-block:: console\n+\n+       --vdev=dlb1_event,max_num_events=<value>\n+\n+By default, max_num_events is reported as the total available load-balanced\n+credits. If multiple DLB-based applications are being used, it may be desirable\n+to control how many load-balanced credits each application uses, particularly\n+when application(s) are written to configure nb_events_limit equal to the\n+reported max_num_events.\n+\n+Each port is a member of both credit pools. A port's credit allocation is\n+defined by its low watermark, high watermark, and refill quanta. These three\n+parameters are calculated by the dlb PMD like so:\n+\n+- The load-balanced high watermark is set to the port's enqueue_depth.\n+  The directed high watermark is set to the minimum of the enqueue_depth and\n+  the directed pool size divided by the total number of ports.\n+- The refill quanta is set to half the high watermark.\n+- The low watermark is set to the minimum of 8 and the refill quanta.\n+\n+When the eventdev is started, each port is pre-allocated a high watermark's\n+worth of credits. For example, if an eventdev contains four ports with enqueue\n+depths of 32 and a load-balanced credit pool size of 4096, each port will start\n+with 32 load-balanced credits, and there will be 3968 credits available to\n+replenish the ports. Thus, a single port is not capable of enqueueing up to the\n+nb_events_limit (without any events being dequeued), since the other ports are\n+retaining their initial credit allocation; in short, all ports must enqueue in\n+order to reach the limit.\n+\n+If a port attempts to enqueue and has no credits available, the enqueue\n+operation will fail and the application must retry the enqueue. Credits are\n+replenished asynchronously by the DLB hardware.\n+\n+Software Credits\n+~~~~~~~~~~~~~~~~\n+\n+The DLB is a \"closed system\" event dev, and the DLB PMD layers a software\n+credit scheme on top of the hardware credit scheme in order to comply with\n+the per-port backpressure described in the eventdev API.\n+\n+The DLB's hardware scheme is local to a queue/pipeline stage: a port spends a\n+credit when it enqueues to a queue, and credits are later replenished after the\n+events are dequeued and released.\n+\n+In the software credit scheme, a credit is consumed when a new (.op =\n+RTE_EVENT_OP_NEW) event is injected into the system, and the credit is\n+replenished when the event is released from the system (either explicitly with\n+RTE_EVENT_OP_RELEASE or implicitly in dequeue_burst()).\n+\n+In this model, an event is \"in the system\" from its first enqueue into eventdev\n+until it is last dequeued. If the event goes through multiple event queues, it\n+is still considered \"in the system\" while a worker thread is processing it.\n+\n+A port will fail to enqueue if the number of events in the system exceeds its\n+``new_event_threshold`` (specified at port setup time). A port will also fail\n+to enqueue if it lacks enough hardware credits to enqueue; load-balanced\n+credits are used to enqueue to a load-balanced queue, and directed credits are\n+used to enqueue to a directed queue.\n+\n+The out-of-credit situations are typically transient, and an eventdev\n+application using the DLB ought to retry its enqueues if they fail.\n+If enqueue fails, DLB PMD sets rte_errno as follows:\n+\n+- -ENOSPC: Credit exhaustion (either hardware or software)\n+- -EINVAL: Invalid argument, such as port ID, queue ID, or sched_type.\n+\n+Depending on the pipeline the application has constructed, it's possible to\n+enter a credit deadlock scenario wherein the worker thread lacks the credit\n+to enqueue an event, and it must dequeue an event before it can recover the\n+credit. If the worker thread retries its enqueue indefinitely, it will not\n+make forward progress. Such deadlock is possible if the application has event\n+\"loops\", in which an event in dequeued from queue A and later enqueued back to\n+queue A.\n+\n+Due to this, workers should stop retrying after a time, release the events it\n+is attempting to enqueue, and dequeue more events. It is important that the\n+worker release the events and don't simply set them aside to retry the enqueue\n+again later, because the port has limited history list size (by default, twice\n+the port's dequeue_depth).\n+\n+Priority\n+~~~~~~~~\n+\n+The DLB supports event priority and per-port queue service priority, as\n+described in the eventdev header file. The DLB does not support 'global' event\n+queue priority established at queue creation time.\n+\n+DLB supports 8 event and queue service priority levels. For both priority\n+types, the PMD uses the upper three bits of the priority field to determine the\n+DLB priority, discarding the 5 least significant bits. The 5 least significant\n+event priority bits are not preserved when an event is enqueued.\n+\n+Load-Balanced Queues\n+~~~~~~~~~~~~~~~~~~~~\n+\n+A load-balanced queue can support atomic and ordered scheduling, or atomic and\n+unordered scheduling, but not atomic and unordered and ordered scheduling. A\n+queue's scheduling types are controlled by the event queue configuration.\n+\n+If the user sets the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag, the\n+``nb_atomic_order_sequences`` determines the supported scheduling types.\n+With non-zero ``nb_atomic_order_sequences``, the queue is configured for atomic\n+and ordered scheduling. In this case, ``RTE_SCHED_TYPE_PARALLEL`` scheduling is\n+supported by scheduling those events as ordered events.  Note that when the\n+event is dequeued, its sched_type will be ``RTE_SCHED_TYPE_ORDERED``. Else if\n+``nb_atomic_order_sequences`` is zero, the queue is configured for atomic and\n+unordered scheduling. In this case, ``RTE_SCHED_TYPE_ORDERED`` is unsupported.\n+\n+If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type\n+dictates the queue's scheduling type.\n+\n+The ``nb_atomic_order_sequences`` queue configuration field sets the ordered\n+queue's reorder buffer size.  DLB has 4 groups of ordered queues, where each\n+group is configured to contain either 1 queue with 1024 reorder entries, 2\n+queues with 512 reorder entries, and so on down to 32 queues with 32 entries.\n+\n+When a load-balanced queue is created, the PMD will configure a new sequence\n+number group on-demand if num_sequence_numbers does not match a pre-existing\n+group with available reorder buffer entries. If all sequence number groups are\n+in use, no new group will be created and queue configuration will fail. (Note\n+that when the PMD is used with a virtual DLB device, it cannot change the\n+sequence number configuration.)\n+\n+The queue's ``nb_atomic_flows`` parameter is ignored by the DLB PMD, because\n+the DLB doesn't limit the number of flows a queue can track. In the DLB, all\n+load-balanced queues can use the full 16-bit flow ID range.\n+\n+Reconfiguration\n+~~~~~~~~~~~~~~~\n+\n+The Eventdev API allows one to reconfigure a device, its ports, and its queues\n+by first stopping the device, calling the configuration function(s), then\n+restarting the device. The DLB doesn't support configuring an individual queue\n+or port without first reconfiguring the entire device, however, so there are\n+certain reconfiguration sequences that are valid in the eventdev API but not\n+supported by the PMD.\n+\n+Specifically, the PMD supports the following configuration sequence:\n+1. Configure and start the device\n+2. Stop the device\n+3. (Optional) Reconfigure the device\n+4. (Optional) If step 3 is run:\n+\n+   a. Setup queue(s). The reconfigured queue(s) lose their previous port links.\n+   b. The reconfigured port(s) lose their previous queue links.\n+\n+5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)\n+6. Restart the device. If the device is reconfigured in step 3 but one or more\n+   of its ports or queues are not, the PMD will apply their previous\n+   configuration (including port->queue links) at this time.\n+\n+The PMD does not support the following configuration sequences:\n+1. Configure and start the device\n+2. Stop the device\n+3. Setup queue or setup port\n+4. Start the device\n+\n+This sequence is not supported because the event device must be reconfigured\n+before its ports or queues can be.\n+\n+Ordered Fragments\n+~~~~~~~~~~~~~~~~~\n+\n+The DLB has a fourth enqueue type: partial enqueue. When a thread is processing\n+an ordered event, it can perform up to 16 \"partial\" enqueues, which allows a\n+single received ordered event to result in multiple reordered events.\n+\n+For example, consider the case where three events (A, then B, then C) are\n+enqueued with ordered scheduling and are received by three different ports.\n+The ports that receive A and C forward events A' and C', while the port that\n+receives B generates three partial enqueues -- B1', B2', and B3' -- followed by\n+a release operation. The DLB will reorder the events in the following order:\n+\n+A', B1', B2', B3', C'\n+\n+This functionality is not available explicitly through the eventdev API, but\n+the dlb PMD provides it through an additional (DLB-specific) event operation,\n+RTE_EVENT_DLB_OP_FRAG.\n+\n+Deferred Scheduling\n+~~~~~~~~~~~~~~~~~~~\n+\n+The DLB PMD's default behavior for managing a CQ is to \"pop\" the CQ once per\n+dequeued event before returning from rte_event_dequeue_burst(). This frees the\n+corresponding entries in the CQ, which enables the DLB to schedule more events\n+to it.\n+\n+To support applications seeking finer-grained scheduling control -- for example\n+deferring scheduling to get the best possible priority scheduling and\n+load-balancing -- the PMD supports a deferred scheduling mode. In this mode,\n+the CQ entry is not popped until the *subsequent* rte_event_dequeue_burst()\n+call. This mode only applies to load-balanced event ports with dequeue depth of\n+1.\n+\n+To enable deferred scheduling, use the defer_sched vdev argument like so:\n+\n+    .. code-block:: console\n+\n+       --vdev=dlb1_event,defer_sched=on\n+\n+Atomic Inflights Allocation\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+In the last stage prior to scheduling an atomic event to a CQ, DLB holds the\n+inflight event in a temporary buffer that is divided among load-balanced\n+queues. If a queue's atomic buffer storage fills up, this can result in\n+head-of-line-blocking. For example:\n+- An LDB queue allocated N atomic buffer entries\n+- All N entries are filled with events from flow X, which is pinned to CQ 0.\n+\n+Until CQ 0 releases 1+ events, no other atomic flows for that LDB queue can be\n+scheduled. The likelihood of this case depends on the eventdev configuration,\n+traffic behavior, event processing latency, potential for a worker to be\n+interrupted or otherwise delayed, etc.\n+\n+By default, the PMD allocates 16 buffer entries for each load-balanced queue,\n+which provides an even division across all 128 queues but potentially wastes\n+buffer space (e.g. if not all queues are used, or aren't used for atomic\n+scheduling).\n+\n+The PMD provides a dev arg to override the default per-queue allocation. To\n+increase a vdev's per-queue atomic-inflight allocation to (for example) 64:\n+\n+    .. code-block:: console\n+\n+       --vdev=dlb1_event,atm_inflights=64\n+\n+Atomic Inflights Allocation\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+In the last stage prior to scheduling an atomic event to a CQ, DLB holds the\n+inflight event in a temporary buffer that is divided among load-balanced\n+queues. If a queue's atomic buffer storage fills up, this can result in\n+head-of-line-blocking. For example:\n+- An LDB queue allocated N atomic buffer entries\n+- All N entries are filled with events from flow X, which is pinned to CQ 0.\n+\n+Until CQ 0 releases 1+ events, no other atomic flows for that LDB queue can be\n+scheduled. The likelihood of this case depends on the eventdev configuration,\n+traffic behavior, event processing latency, potential for a worker to be\n+interrupted or otherwise delayed, etc.\n+\n+By default, the PMD allocates 16 buffer entries for each load-balanced queue,\n+which provides an even division across all 128 queues but potentially wastes\n+buffer space (e.g. if not all queues are used, or aren't used for atomic\n+scheduling).\n+\n+The PMD provides a dev arg to override the default per-queue allocation. To\n+increase a vdev's per-queue atomic-inflight allocation to (for example) 64:\n+\n+    .. code-block:: console\n+\n+       --vdev=dlb1_event,atm_inflights=64\n",
    "prefixes": [
        "05/27"
    ]
}