@@ -259,6 +259,7 @@ struct sw_evdev {
uint64_t sched_no_iq_enqueues;
uint64_t sched_no_cq_enqueues;
uint64_t sched_cq_qid_called;
+ uint64_t sched_last_iter_bitmask;
uint8_t started;
uint32_t credit_update_quanta;
@@ -566,6 +566,8 @@ sw_event_schedule(struct rte_eventdev *dev)
rte_service_component_attr_set(sw->service_id,
RTE_SERVICE_ATTR_USEFUL_WORK_LAST_ITER, work_done);
+ uint64_t cqs_scheds_last_iter = 0;
+
/* push all the internal buffered QEs in port->cq_ring to the
* worker cores: aka, do the ring transfers batched.
*/
@@ -585,6 +587,7 @@ sw_event_schedule(struct rte_eventdev *dev)
&sw->cq_ring_space[i]);
port->cq_buf_count = 0;
no_enq = 0;
+ cqs_scheds_last_iter |= (1ULL << i);
} else {
sw->cq_ring_space[i] =
rte_event_ring_free_count(worker) -
@@ -604,4 +607,13 @@ sw_event_schedule(struct rte_eventdev *dev)
sw->sched_min_burst = sw->sched_min_burst_size;
}
+ /* Provide stats on what eventdev ports were scheduled to this
+ * iteration. If more than 64 ports are active, always report that
+ * all Eventdev ports have been scheduled events.
+ */
+ if (likely(sw->port_count < 64)) {
+ sw->sched_last_iter_bitmask = cqs_scheds_last_iter;
+ } else {
+ sw->sched_last_iter_bitmask = UINT64_MAX;
+ }
}
@@ -873,15 +873,15 @@ xstats_tests(struct test *t)
int ret = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_DEVICE,
0, xstats_names, ids, XSTATS_MAX);
- if (ret != 6) {
- printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ if (ret != 7) {
+ printf("%d: expected 7 stats, got return %d\n", __LINE__, ret);
return -1;
}
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_DEVICE,
0, ids, values, ret);
- if (ret != 6) {
- printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ if (ret != 7) {
+ printf("%d: expected 7 stats, got return %d\n", __LINE__, ret);
return -1;
}
@@ -959,7 +959,7 @@ xstats_tests(struct test *t)
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_DEVICE,
0, ids, values, num_stats);
- static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+ static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4};
for (i = 0; (signed int)i < ret; i++) {
if (expected[i] != values[i]) {
printf(
@@ -975,7 +975,7 @@ xstats_tests(struct test *t)
0, NULL, 0);
/* ensure reset statistics are zero-ed */
- static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+ static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0};
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_DEVICE,
0, ids, values, num_stats);
@@ -1460,7 +1460,7 @@ xstats_id_reset_tests(struct test *t)
for (i = 0; i < XSTATS_MAX; i++)
ids[i] = i;
-#define NUM_DEV_STATS 6
+#define NUM_DEV_STATS 7
/* Device names / values */
int num_stats = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_DEVICE,
@@ -1504,8 +1504,9 @@ xstats_id_reset_tests(struct test *t)
static const char * const dev_names[] = {
"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+ "dev_sched_last_iter_bitmask",
};
- uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+ uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4};
for (i = 0; (int)i < ret; i++) {
unsigned int id;
uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
@@ -1518,8 +1519,8 @@ xstats_id_reset_tests(struct test *t)
}
if (val != dev_expected[i]) {
printf("%d: %s value incorrect, expected %"
- PRIu64" got %d\n", __LINE__, dev_names[i],
- dev_expected[i], id);
+ PRIu64" got %ld\n", __LINE__, dev_names[i],
+ dev_expected[i], val);
goto fail;
}
/* reset to zero */
@@ -1542,11 +1543,11 @@ xstats_id_reset_tests(struct test *t)
}
};
-/* 48 is stat offset from start of the devices whole xstats.
+/* 49 is stat offset from start of the devices whole xstats.
* This WILL break every time we add a statistic to a port
* or the device, but there is no other way to test
*/
-#define PORT_OFF 48
+#define PORT_OFF 49
/* num stats for the tested port. CQ size adds more stats to a port */
#define NUM_PORT_STATS 21
/* the port to test. */
@@ -1670,7 +1671,7 @@ xstats_id_reset_tests(struct test *t)
/* queue offset from start of the devices whole xstats.
* This will break every time we add a statistic to a device/port/queue
*/
-#define QUEUE_OFF 90
+#define QUEUE_OFF 91
const uint32_t queue = 0;
num_stats = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE, queue,
@@ -17,6 +17,7 @@ enum xstats_type {
/* device instance specific */
no_iq_enq,
no_cq_enq,
+ sched_last_iter_bitmask,
/* port_specific */
rx_used,
rx_free,
@@ -57,6 +58,8 @@ get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
case calls: return sw->sched_called;
case no_iq_enq: return sw->sched_no_iq_enqueues;
case no_cq_enq: return sw->sched_no_cq_enqueues;
+ case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
+
default: return -1;
}
}
@@ -177,9 +180,10 @@ sw_xstats_init(struct sw_evdev *sw)
*/
static const char * const dev_stats[] = { "rx", "tx", "drop",
"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+ "sched_last_iter_bitmask",
};
static const enum xstats_type dev_types[] = { rx, tx, dropped,
- calls, no_iq_enq, no_cq_enq,
+ calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
};
/* all device stats are allowed to be reset */