@@ -1196,10 +1196,10 @@ Cavium OCTEON TX timvf
M: Pavan Nikhilesh <pbhagavatula@marvell.com>
F: drivers/event/octeontx/timvf_*
-Intel DLB2
+Intel DLB
M: Timothy McDaniel <timothy.mcdaniel@intel.com>
-F: drivers/event/dlb2/
-F: doc/guides/eventdevs/dlb2.rst
+F: drivers/event/dlb/
+F: doc/guides/eventdevs/dlb.rst
Marvell OCTEON TX2
M: Pavan Nikhilesh <pbhagavatula@marvell.com>
@@ -1031,9 +1031,9 @@ test_eventdev_selftest_dpaa2(void)
}
static int
-test_eventdev_selftest_dlb2(void)
+test_eventdev_selftest_dlb(void)
{
- return test_eventdev_selftest_impl("dlb2_event", "");
+ return test_eventdev_selftest_impl("dlb_event", "");
}
REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
@@ -1043,4 +1043,4 @@ REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
test_eventdev_selftest_octeontx2);
REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
-REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
+REGISTER_TEST_COMMAND(eventdev_selftest_dlb, test_eventdev_selftest_dlb);
@@ -139,11 +139,10 @@
/* QEDE PMD defines */
#define RTE_LIBRTE_QEDE_FW ""
-/* DLB2 defines */
-#define RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL 1000
-#define RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE 0
-#undef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
-#define RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA 32
-#define RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH 256
+/* DLB defines */
+#define RTE_LIBRTE_PMD_DLB_POLL_INTERVAL 1000
+#undef RTE_LIBRTE_PMD_DLB_QUELL_STATS
+#define RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA 32
+#define RTE_LIBRTE_PMD_DLB_DEFAULT_DEPTH_THRESH 256
#endif /* _RTE_CONFIG_H_ */
@@ -55,7 +55,7 @@ The public API headers are grouped by topics:
[dpaa2_cmdif] (@ref rte_pmd_dpaa2_cmdif.h),
[dpaa2_qdma] (@ref rte_pmd_dpaa2_qdma.h),
[crypto_scheduler] (@ref rte_cryptodev_scheduler.h),
- [dlb2] (@ref rte_pmd_dlb2.h),
+ [dlb] (@ref rte_pmd_dlb.h),
[ifpga] (@ref rte_pmd_ifpga.h)
- **memory**:
@@ -7,7 +7,7 @@ USE_MDFILE_AS_MAINPAGE = @TOPDIR@/doc/api/doxy-api-index.md
INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/drivers/bus/vdev \
@TOPDIR@/drivers/crypto/scheduler \
- @TOPDIR@/drivers/event/dlb2 \
+ @TOPDIR@/drivers/event/dlb \
@TOPDIR@/drivers/mempool/dpaa2 \
@TOPDIR@/drivers/net/ark \
@TOPDIR@/drivers/net/bnxt \
similarity index 84%
rename from doc/guides/eventdevs/dlb2.rst
rename to doc/guides/eventdevs/dlb.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2020 Intel Corporation.
-Driver for the Intel® Dynamic Load Balancer (DLB2)
+Driver for the Intel® Dynamic Load Balancer (DLB)
==================================================
The DPDK dlb poll mode driver supports the Intel® Dynamic Load Balancer,
@@ -16,34 +16,34 @@ the basic DPDK environment.
Configuration
-------------
-The DLB2 PF PMD is a user-space PMD that uses VFIO to gain direct
+The DLB PF PMD is a user-space PMD that uses VFIO to gain direct
device access. To use this operation mode, the PCIe PF device must be bound
to a DPDK-compatible VFIO driver, such as vfio-pci.
Eventdev API Notes
------------------
-The DLB2 provides the functions of a DPDK event device; specifically, it
+The DLB PMD provides the functions of a DPDK event device; specifically, it
supports atomic, ordered, and parallel scheduling events from queues to ports.
-However, the DLB2 hardware is not a perfect match to the eventdev API. Some DLB2
+However, the DLB hardware is not a perfect match to the eventdev API. Some DLB
features are abstracted by the PMD such as directed ports.
In general the dlb PMD is designed for ease-of-use and does not require a
detailed understanding of the hardware, but these details are important when
writing high-performance code. This section describes the places where the
-eventdev API and DLB2 misalign.
+eventdev API and DLB misalign.
Scheduling Domain Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-DLB2 supports 32 scheduling domains.
+DLB supports 32 scheduling domains.
When one is configured, it allocates load-balanced and
directed queues, ports, credits, and other hardware resources. Some
resource allocations are user-controlled -- the number of queues, for example
-- and others, like credit pools (one directed and one load-balanced pool per
scheduling domain), are not.
-The DLB2 is a closed system eventdev, and as such the ``nb_events_limit`` device
+The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device
setup argument and the per-port ``new_event_threshold`` argument apply as
defined in the eventdev header file. The limit is applied to all enqueues,
regardless of whether it will consume a directed or load-balanced credit.
@@ -68,7 +68,7 @@ If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type
dictates the queue's scheduling type.
The ``nb_atomic_order_sequences`` queue configuration field sets the ordered
-queue's reorder buffer size. DLB2 has 2 groups of ordered queues, where each
+queue's reorder buffer size. DLB has 2 groups of ordered queues, where each
group is configured to contain either 1 queue with 1024 reorder entries, 2
queues with 512 reorder entries, and so on down to 32 queues with 32 entries.
@@ -76,22 +76,22 @@ When a load-balanced queue is created, the PMD will configure a new sequence
number group on-demand if num_sequence_numbers does not match a pre-existing
group with available reorder buffer entries. If all sequence number groups are
in use, no new group will be created and queue configuration will fail. (Note
-that when the PMD is used with a virtual DLB2 device, it cannot change the
+that when the PMD is used with a virtual DLB device, it cannot change the
sequence number configuration.)
-The queue's ``nb_atomic_flows`` parameter is ignored by the DLB2 PMD, because
-the DLB2 does not limit the number of flows a queue can track. In the DLB2, all
+The queue's ``nb_atomic_flows`` parameter is ignored by the DLB PMD, because
+the DLB does not limit the number of flows a queue can track. In the DLB, all
load-balanced queues can use the full 16-bit flow ID range.
Load-balanced and Directed Ports
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-DLB2 ports come in two flavors: load-balanced and directed. The eventdev API
+DLB ports come in two flavors: load-balanced and directed. The eventdev API
does not have the same concept, but it has a similar one: ports and queues that
are singly-linked (i.e. linked to a single queue or port, respectively).
The ``rte_event_dev_info_get()`` function reports the number of available
-event ports and queues (among other things). For the DLB2 PMD, max_event_ports
+event ports and queues (among other things). For the DLB PMD, max_event_ports
and max_event_queues report the number of available load-balanced ports and
queues, and max_single_link_event_port_queue_pairs reports the number of
available directed ports and queues.
@@ -132,12 +132,12 @@ Flow ID
~~~~~~~
The flow ID field is preserved in the event when it is scheduled in the
-DLB2.
+DLB.
Hardware Credits
~~~~~~~~~~~~~~~~
-DLB2 uses a hardware credit scheme to prevent software from overflowing hardware
+DLB uses a hardware credit scheme to prevent software from overflowing hardware
event storage, with each unit of storage represented by a credit. A port spends
a credit to enqueue an event, and hardware refills the ports with credits as the
events are scheduled to ports. Refills come from credit pools.
@@ -156,7 +156,7 @@ num_dir_credits vdev argument, like so:
.. code-block:: console
- --vdev=dlb1_event,num_dir_credits=<value>
+ --vdev=dlb_event,num_dir_credits=<value>
This can be used if the default allocation is too low or too high for the
specific application needs. The PMD also supports a vdev arg that limits the
@@ -164,10 +164,10 @@ max_num_events reported by rte_event_dev_info_get():
.. code-block:: console
- --vdev=dlb1_event,max_num_events=<value>
+ --vdev=dlb_event,max_num_events=<value>
By default, max_num_events is reported as the total available load-balanced
-credits. If multiple DLB2-based applications are being used, it may be desirable
+credits. If multiple DLB-based applications are being used, it may be desirable
to control how many load-balanced credits each application uses, particularly
when application(s) are written to configure nb_events_limit equal to the
reported max_num_events.
@@ -193,16 +193,16 @@ order to reach the limit.
If a port attempts to enqueue and has no credits available, the enqueue
operation will fail and the application must retry the enqueue. Credits are
-replenished asynchronously by the DLB2 hardware.
+replenished asynchronously by the DLB hardware.
Software Credits
~~~~~~~~~~~~~~~~
-The DLB2 is a "closed system" event dev, and the DLB2 PMD layers a software
+The DLB is a "closed system" event dev, and the DLB PMD layers a software
credit scheme on top of the hardware credit scheme in order to comply with
the per-port backpressure described in the eventdev API.
-The DLB2's hardware scheme is local to a queue/pipeline stage: a port spends a
+The DLB's hardware scheme is local to a queue/pipeline stage: a port spends a
credit when it enqueues to a queue, and credits are later replenished after the
events are dequeued and released.
@@ -222,8 +222,8 @@ credits are used to enqueue to a load-balanced queue, and directed credits are
used to enqueue to a directed queue.
The out-of-credit situations are typically transient, and an eventdev
-application using the DLB2 ought to retry its enqueues if they fail.
-If enqueue fails, DLB2 PMD sets rte_errno as follows:
+application using the DLB ought to retry its enqueues if they fail.
+If enqueue fails, DLB PMD sets rte_errno as follows:
- -ENOSPC: Credit exhaustion (either hardware or software)
- -EINVAL: Invalid argument, such as port ID, queue ID, or sched_type.
@@ -245,12 +245,12 @@ the port's dequeue_depth).
Priority
~~~~~~~~
-The DLB2 supports event priority and per-port queue service priority, as
-described in the eventdev header file. The DLB2 does not support 'global' event
+The DLB supports event priority and per-port queue service priority, as
+described in the eventdev header file. The DLB does not support 'global' event
queue priority established at queue creation time.
-DLB2 supports 4 event and queue service priority levels. For both priority types,
-the PMD uses the upper three bits of the priority field to determine the DLB2
+DLB supports 4 event and queue service priority levels. For both priority types,
+the PMD uses the upper three bits of the priority field to determine the DLB
priority, discarding the 5 least significant bits. But least significant bit out
of 3 priority bits is effectively ignored for binning into 4 priorities. The
discarded 5 least significant event priority bits are not preserved when an event
@@ -265,7 +265,7 @@ Reconfiguration
The Eventdev API allows one to reconfigure a device, its ports, and its queues
by first stopping the device, calling the configuration function(s), then
-restarting the device. The DLB2 does not support configuring an individual queue
+restarting the device. The DLB does not support configuring an individual queue
or port without first reconfiguring the entire device, however, so there are
certain reconfiguration sequences that are valid in the eventdev API but not
supported by the PMD.
@@ -296,9 +296,9 @@ before its ports or queues can be.
Deferred Scheduling
~~~~~~~~~~~~~~~~~~~
-The DLB2 PMD's default behavior for managing a CQ is to "pop" the CQ once per
+The DLB PMD's default behavior for managing a CQ is to "pop" the CQ once per
dequeued event before returning from rte_event_dequeue_burst(). This frees the
-corresponding entries in the CQ, which enables the DLB2 to schedule more events
+corresponding entries in the CQ, which enables the DLB to schedule more events
to it.
To support applications seeking finer-grained scheduling control -- for example
@@ -312,12 +312,12 @@ To enable deferred scheduling, use the defer_sched vdev argument like so:
.. code-block:: console
- --vdev=dlb1_event,defer_sched=on
+ --vdev=dlb_event,defer_sched=on
Atomic Inflights Allocation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In the last stage prior to scheduling an atomic event to a CQ, DLB2 holds the
+In the last stage prior to scheduling an atomic event to a CQ, DLB holds the
inflight event in a temporary buffer that is divided among load-balanced
queues. If a queue's atomic buffer storage fills up, this can result in
head-of-line-blocking. For example:
@@ -340,12 +340,12 @@ increase a vdev's per-queue atomic-inflight allocation to (for example) 64:
.. code-block:: console
- --vdev=dlb1_event,atm_inflights=64
+ --vdev=dlb_event,atm_inflights=64
QID Depth Threshold
~~~~~~~~~~~~~~~~~~~
-DLB2 supports setting and tracking queue depth thresholds. Hardware uses
+DLB supports setting and tracking queue depth thresholds. Hardware uses
the thresholds to track how full a queue is compared to its threshold.
Four buckets are used
@@ -354,7 +354,7 @@ Four buckets are used
- Greater than 75%, but less than or equal to 100% of depth threshold
- Greater than 100% of depth thresholds
-Per queue threshold metrics are tracked in the DLB2 xstats, and are also
+Per queue threshold metrics are tracked in the DLB xstats, and are also
returned in the impl_opaque field of each received event.
The per qid threshold can be specified as part of the device args, and
@@ -363,19 +363,19 @@ shown below.
.. code-block:: console
- --vdev=dlb2_event,qid_depth_thresh=all:<threshold_value>
- --vdev=dlb2_event,qid_depth_thresh=qidA-qidB:<threshold_value>
- --vdev=dlb2_event,qid_depth_thresh=qid:<threshold_value>
+ --vdev=dlb_event,qid_depth_thresh=all:<threshold_value>
+ --vdev=dlb_event,qid_depth_thresh=qidA-qidB:<threshold_value>
+ --vdev=dlb_event,qid_depth_thresh=qid:<threshold_value>
Class of service
~~~~~~~~~~~~~~~~
-DLB2 supports provisioning the DLB2 bandwidth into 4 classes of service.
+DLB supports provisioning the DLB bandwidth into 4 classes of service.
-- Class 4 corresponds to 40% of the DLB2 hardware bandwidth
-- Class 3 corresponds to 30% of the DLB2 hardware bandwidth
-- Class 2 corresponds to 20% of the DLB2 hardware bandwidth
-- Class 1 corresponds to 10% of the DLB2 hardware bandwidth
+- Class 4 corresponds to 40% of the DLB hardware bandwidth
+- Class 3 corresponds to 30% of the DLB hardware bandwidth
+- Class 2 corresponds to 20% of the DLB hardware bandwidth
+- Class 1 corresponds to 10% of the DLB hardware bandwidth
- Class 0 corresponds to don't care
The classes are applied globally to the set of ports contained in this
@@ -387,4 +387,4 @@ Class of service can be specified in the devargs, as follows
.. code-block:: console
- --vdev=dlb2_event,cos=<0..4>
+ --vdev=dlb_event,cos=<0..4>
@@ -11,7 +11,7 @@ application through the eventdev API.
:maxdepth: 2
:numbered:
- dlb2
+ dlb
dpaa
dpaa2
dsw
@@ -94,6 +94,11 @@ New Features
* Added support for preferred busy polling.
+* **Updated DLB driver.**
+
+ * Added support for v2.5 hardware.
+ * Renamed DLB2 to DLB, which supports all HW versions v2.0 and v2.5.
+
* **Updated testpmd.**
* Added a command line option to configure forced speed for Ethernet port.
similarity index 99%
rename from drivers/event/dlb2/dlb2.c
rename to drivers/event/dlb/dlb2.c
@@ -667,15 +667,8 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
}
/* Does this platform support umonitor/umwait? */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
- if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
- RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
- DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
- RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
- return -EINVAL;
- }
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG))
dlb2->umwait_allowed = true;
- }
rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
@@ -930,8 +923,9 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
}
if (ev_queue->depth_threshold == 0) {
- cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
- ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
+ cfg.depth_threshold = RTE_LIBRTE_PMD_DLB_DEFAULT_DEPTH_THRESH;
+ ev_queue->depth_threshold =
+ RTE_LIBRTE_PMD_DLB_DEFAULT_DEPTH_THRESH;
} else
cfg.depth_threshold = ev_queue->depth_threshold;
@@ -1623,7 +1617,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
ev_port->outstanding_releases = 0;
ev_port->inflight_credits = 0;
- ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
+ ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
ev_port->dlb2 = dlb2; /* reverse link */
/* Tear down pre-existing port->queue links */
@@ -1718,8 +1712,9 @@ dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
cfg.port_id = qm_port_id;
if (ev_queue->depth_threshold == 0) {
- cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
- ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
+ cfg.depth_threshold = RTE_LIBRTE_PMD_DLB_DEFAULT_DEPTH_THRESH;
+ ev_queue->depth_threshold =
+ RTE_LIBRTE_PMD_DLB_DEFAULT_DEPTH_THRESH;
} else
cfg.depth_threshold = ev_queue->depth_threshold;
@@ -2747,7 +2742,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
-#ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
+#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
if (ev->op != RTE_EVENT_OP_RELEASE) {
DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
@@ -3070,7 +3065,7 @@ dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
} else {
- uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
+ uint64_t poll_interval = RTE_LIBRTE_PMD_DLB_POLL_INTERVAL;
uint64_t curr_ticks = rte_get_timer_cycles();
uint64_t init_ticks = curr_ticks;
similarity index 100%
rename from drivers/event/dlb2/dlb2_iface.c
rename to drivers/event/dlb/dlb2_iface.c
similarity index 100%
rename from drivers/event/dlb2/dlb2_iface.h
rename to drivers/event/dlb/dlb2_iface.h
similarity index 100%
rename from drivers/event/dlb2/dlb2_inline_fns.h
rename to drivers/event/dlb/dlb2_inline_fns.h
similarity index 100%
rename from drivers/event/dlb2/dlb2_log.h
rename to drivers/event/dlb/dlb2_log.h
similarity index 99%
rename from drivers/event/dlb2/dlb2_priv.h
rename to drivers/event/dlb/dlb2_priv.h
@@ -12,7 +12,7 @@
#include <rte_config.h>
#include "dlb2_user.h"
#include "dlb2_log.h"
-#include "rte_pmd_dlb2.h"
+#include "rte_pmd_dlb.h"
#ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
#define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
@@ -20,7 +20,8 @@
#define DLB2_INC_STAT(_stat, _incr_val)
#endif
-#define EVDEV_DLB2_NAME_PMD dlb2_event
+/* common name for all dlb devs (dlb v2.0, dlb v2.5 ...) */
+#define EVDEV_DLB2_NAME_PMD dlb_event
/* command line arg strings */
#define NUMA_NODE_ARG "numa_node"
@@ -320,7 +321,7 @@ struct dlb2_port {
bool gen_bit;
uint16_t dir_credits;
uint32_t dequeue_depth;
- enum dlb2_token_pop_mode token_pop_mode;
+ enum dlb_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
union {
similarity index 99%
rename from drivers/event/dlb2/dlb2_selftest.c
rename to drivers/event/dlb/dlb2_selftest.c
@@ -22,7 +22,7 @@
#include <rte_pause.h>
#include "dlb2_priv.h"
-#include "rte_pmd_dlb2.h"
+#include "rte_pmd_dlb.h"
#define MAX_PORTS 32
#define MAX_QIDS 32
@@ -1105,13 +1105,13 @@ test_deferred_sched(void)
return -1;
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DEFERRED_POP);
+ ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DEFERRED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
goto err;
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 1, DEFERRED_POP);
+ ret = rte_pmd_dlb_set_token_pop_mode(evdev, 1, DEFERRED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
goto err;
@@ -1257,7 +1257,7 @@ test_delayed_pop(void)
return -1;
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DELAYED_POP);
+ ret = rte_pmd_dlb_set_token_pop_mode(evdev, 0, DELAYED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
goto err;
similarity index 100%
rename from drivers/event/dlb2/dlb2_user.h
rename to drivers/event/dlb/dlb2_user.h
similarity index 100%
rename from drivers/event/dlb2/dlb2_xstats.c
rename to drivers/event/dlb/dlb2_xstats.c
similarity index 89%
rename from drivers/event/dlb2/meson.build
rename to drivers/event/dlb/meson.build
@@ -14,10 +14,10 @@ sources = files('dlb2.c',
'pf/dlb2_main.c',
'pf/dlb2_pf.c',
'pf/base/dlb2_resource.c',
- 'rte_pmd_dlb2.c',
+ 'rte_pmd_dlb.c',
'dlb2_selftest.c'
)
-headers = files('rte_pmd_dlb2.h')
+headers = files('rte_pmd_dlb.h')
deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_hw_types.h
rename to drivers/event/dlb/pf/base/dlb2_hw_types.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_osdep.h
rename to drivers/event/dlb/pf/base/dlb2_osdep.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_osdep_bitmap.h
rename to drivers/event/dlb/pf/base/dlb2_osdep_bitmap.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_osdep_list.h
rename to drivers/event/dlb/pf/base/dlb2_osdep_list.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_osdep_types.h
rename to drivers/event/dlb/pf/base/dlb2_osdep_types.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_regs.h
rename to drivers/event/dlb/pf/base/dlb2_regs.h
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_resource.c
rename to drivers/event/dlb/pf/base/dlb2_resource.c
similarity index 100%
rename from drivers/event/dlb2/pf/base/dlb2_resource.h
rename to drivers/event/dlb/pf/base/dlb2_resource.h
similarity index 100%
rename from drivers/event/dlb2/pf/dlb2_main.c
rename to drivers/event/dlb/pf/dlb2_main.c
similarity index 100%
rename from drivers/event/dlb2/pf/dlb2_main.h
rename to drivers/event/dlb/pf/dlb2_main.h
similarity index 100%
rename from drivers/event/dlb2/pf/dlb2_pf.c
rename to drivers/event/dlb/pf/dlb2_pf.c
similarity index 88%
rename from drivers/event/dlb2/rte_pmd_dlb2.c
rename to drivers/event/dlb/rte_pmd_dlb.c
@@ -5,14 +5,14 @@
#include <rte_eventdev.h>
#include <eventdev_pmd.h>
-#include "rte_pmd_dlb2.h"
+#include "rte_pmd_dlb.h"
#include "dlb2_priv.h"
#include "dlb2_inline_fns.h"
int
-rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
uint8_t port_id,
- enum dlb2_token_pop_mode mode)
+ enum dlb_token_pop_mode mode)
{
struct dlb2_eventdev *dlb2;
struct rte_eventdev *dev;
similarity index 88%
rename from drivers/event/dlb2/rte_pmd_dlb2.h
rename to drivers/event/dlb/rte_pmd_dlb.h
@@ -3,13 +3,13 @@
*/
/*!
- * @file rte_pmd_dlb2.h
+ * @file rte_pmd_dlb.h
*
* @brief DLB PMD-specific functions
*/
-#ifndef _RTE_PMD_DLB2_H_
-#define _RTE_PMD_DLB2_H_
+#ifndef _RTE_PMD_DLB_H_
+#define _RTE_PMD_DLB_H_
#ifdef __cplusplus
extern "C" {
@@ -23,7 +23,7 @@ extern "C" {
*
* Selects the token pop mode for a DLB2 port.
*/
-enum dlb2_token_pop_mode {
+enum dlb_token_pop_mode {
/* Pop the CQ tokens immediately after dequeueing. */
AUTO_POP,
/* Pop CQ tokens after (dequeue_depth - 1) events are released.
@@ -61,9 +61,9 @@ enum dlb2_token_pop_mode {
__rte_experimental
int
-rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
uint8_t port_id,
- enum dlb2_token_pop_mode mode);
+ enum dlb_token_pop_mode mode);
#ifdef __cplusplus
}
similarity index 60%
rename from drivers/event/dlb2/version.map
rename to drivers/event/dlb/version.map
@@ -5,5 +5,5 @@ DPDK_21 {
EXPERIMENTAL {
global:
- rte_pmd_dlb2_set_token_pop_mode;
+ rte_pmd_dlb_set_token_pop_mode;
};
@@ -5,7 +5,7 @@ if is_windows
subdir_done()
endif
-drivers = ['dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
+drivers = ['dlb', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
'dsw']
if not (toolchain == 'gcc' and cc.version().version_compare('<4.8.6') and
dpdk_conf.has('RTE_ARCH_ARM64'))