@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
}
+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
static inline int
evt_service_setup(uint32_t service_id)
{
@@ -15,6 +15,7 @@ sources = files(
'test_order_atq.c',
'test_order_common.c',
'test_order_queue.c',
+ 'test_atomic_common.c',
'test_perf_atq.c',
'test_perf_common.c',
'test_perf_queue.c',
new file mode 100644
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include "test_atomic_common.h"
+
+static inline bool
+test_done(struct test_order *const t)
+{
+ return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_order *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ uint32_t *producer_flow_seq = t->producer_flow_seq;
+ const uint32_t nb_flows = t->nb_flows;
+ uint64_t count = 0;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+ __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev = (struct rte_event) {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = p->queue_id,
+ .sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .sub_event_type = 0
+ };
+
+ while (count < nb_pkts && t->err == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ /* Maintain seq number per flow */
+
+ const flow_id_t flow = rte_rand_max(nb_flows);
+
+ *order_mbuf_flow_id(t, m) = flow;
+ *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
+
+ ev.flow_id = flow;
+ ev.mbuf = m;
+
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->err)
+ break;
+ rte_pause();
+ }
+
+ count++;
+ }
+
+ if (!evt_is_maintenance_free(dev_id)) {
+ while (!test_done(t)) {
+ rte_event_maintain(dev_id, port, RTE_EVENT_DEV_MAINT_OP_FLUSH);
+ rte_pause();
+ }
+ }
+
+ return 0;
+}
+
+int
+atomic_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_order *t = evt_test_priv(test);
+
+ /* launch workers */
+
+ int wkr_idx = 0;
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ wkr_idx++;
+ }
+
+ /* launch producer */
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ ret = rte_eal_remote_launch(atomic_producer, &t->prod, plcore);
+ if (ret) {
+ evt_err("failed to launch order_producer %d", plcore);
+ return ret;
+ }
+
+ uint64_t prev_time = rte_get_timer_cycles();
+ int64_t prev_outstanding_pkts = -1;
+
+ while (t->err == false) {
+ uint64_t current_time = rte_get_timer_cycles();
+ int64_t outstanding_pkts = rte_atomic_load_explicit(
+ &t->outstand_pkts, rte_memory_order_relaxed);
+
+ if (outstanding_pkts <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ break;
+ }
+
+ if (current_time - prev_time > rte_get_timer_hz() * IDLE_TIMEOUT) {
+ printf(CLGRN "\r%" PRId64 "" CLNRM, outstanding_pkts);
+ fflush(stdout);
+ if (prev_outstanding_pkts == outstanding_pkts) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No events processed during one period, deadlock");
+ t->err = true;
+ break;
+ }
+ prev_outstanding_pkts = outstanding_pkts;
+ prev_time = current_time;
+ }
+ }
+ printf("\r");
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#ifndef _TEST_ATOMIC_COMMON_H
+#define _TEST_ATOMIC_COMMON_H
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_eventdev.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+#include "test_order_common.h"
+
+#define IDLE_TIMEOUT 1
+
+static inline uint32_t
+get_lock_idx(int stage, flow_id_t flow, uint32_t nb_flows)
+{
+ return (stage * nb_flows) + flow;
+}
+
+static inline bool
+atomic_spinlock_trylock(rte_spinlock_t atomic_locks[],
+ uint32_t stage,
+ uint32_t flow,
+ uint32_t nb_flows)
+{
+ return rte_spinlock_trylock(&atomic_locks[get_lock_idx(stage, flow, nb_flows)]);
+}
+
+static inline void
+atomic_spinlock_unlock(rte_spinlock_t atomic_locks[],
+ uint32_t stage,
+ uint32_t flow,
+ uint32_t nb_flows)
+{
+ rte_spinlock_unlock(&atomic_locks[get_lock_idx(stage, flow, nb_flows)]);
+}
+
+static inline void
+atomic_lock_verify(rte_spinlock_t atomic_locks[],
+ uint32_t stage,
+ uint32_t flow,
+ uint32_t nb_flows,
+ struct test_order *const t,
+ uint32_t port)
+{
+ if (!atomic_spinlock_trylock(atomic_locks, stage, flow, nb_flows)) {
+
+ evt_err("q=%u, flow=%x atomicity error: port %u tried to take held spinlock %p",
+ stage, flow, port,
+ &atomic_locks[get_lock_idx(stage, flow, nb_flows)]);
+ t->err = true;
+ }
+}
+
+static inline rte_spinlock_t *
+atomic_init_locks(uint32_t nb_stages, uint32_t nb_flows)
+{
+ const uint32_t num_locks = nb_stages * nb_flows;
+
+ rte_spinlock_t *atomic_locks = rte_calloc(NULL, num_locks, sizeof(rte_spinlock_t), 0);
+
+ if (atomic_locks == NULL)
+ evt_err("Unable to allocate memory for spinlocks.");
+
+ for (uint32_t i = 0; i < num_locks; i++)
+ rte_spinlock_init(&atomic_locks[i]);
+
+ return atomic_locks;
+}
+
+static inline flow_id_t *
+order_mbuf_flow_id(struct test_order *t, struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf, t->flow_id_dynfield_offset, flow_id_t *);
+}
+
+int atomic_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+
+#endif
@@ -166,6 +166,11 @@ New Features
See the :doc:`../compressdevs/zsda` guide for more details on the new driver.
+* **Added atomic tests to the dpdk-test-eventdev test application.**
+
+ Added two atomic tests: ``atomic_queue`` and ``atomic_atq``. They work in the same way as
+ the corresponding ordered tests but exclusively use atomic queues.
+ Atomicity is verified using spinlocks.
Removed Items
-------------