@@ -84,6 +84,16 @@ evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
return 0;
}
+static int
+evt_parse_deq_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->deq_tmo_nsec), arg);
+
+ return ret;
+}
+
static int
evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
{
@@ -240,6 +250,7 @@ usage(char *program)
"\t--worker_deq_depth : dequeue depth of the worker\n"
"\t--fwd_latency : perform fwd_latency measurement\n"
"\t--queue_priority : enable queue priority\n"
+ "\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
"\t expity_nsec would be the timeout\n"
@@ -311,6 +322,7 @@ static struct option lgopts[] = {
{ EVT_SCHED_TYPE_LIST, 1, 0, 0 },
{ EVT_FWD_LATENCY, 0, 0, 0 },
{ EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_DEQ_TMO_NSEC, 1, 0, 0 },
{ EVT_PROD_ETHDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
@@ -342,6 +354,7 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
{ EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ { EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
{ EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
@@ -31,6 +31,7 @@
#define EVT_SCHED_TYPE_LIST ("stlist")
#define EVT_FWD_LATENCY ("fwd_latency")
#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_DEQ_TMO_NSEC ("deq_tmo_nsec")
#define EVT_PROD_ETHDEV ("prod_type_ethdev")
#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
@@ -71,6 +72,7 @@ struct evt_options {
uint8_t dev_id;
uint32_t fwd_latency:1;
uint32_t q_priority:1;
+ uint32_t deq_tmo_nsec;
enum evt_prod_type prod_type;
uint8_t timdev_use_burst;
uint8_t timdev_cnt;
@@ -113,18 +113,35 @@ static int
order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
{
int ret;
-
const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
/* number of active worker cores + 1 producer */
const uint8_t nb_ports = nb_workers + 1;
+ struct rte_event_dev_info info;
+
+ rte_event_dev_info_get(opt->dev_id, &info);
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = NB_QUEUES,/* one all types queue */
.nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
+ .nb_events_limit = info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
};
ret = rte_event_dev_configure(opt->dev_id, &config);
@@ -113,18 +113,35 @@ static int
order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
{
int ret;
-
const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
/* number of active worker cores + 1 producer */
const uint8_t nb_ports = nb_workers + 1;
+ struct rte_event_dev_info info;
+
+ rte_event_dev_info_get(opt->dev_id, &info);
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
.nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
+ .nb_events_limit = info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
};
ret = rte_event_dev_configure(opt->dev_id, &config);
@@ -175,7 +175,21 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
return ret;
}
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < dev_info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = dev_info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > dev_info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = dev_info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = dev_info.max_num_events,
@@ -176,7 +176,21 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
return ret;
}
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < dev_info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = dev_info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > dev_info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = dev_info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = dev_info.max_num_events,
@@ -314,7 +314,21 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
rte_event_dev_info_get(opt->dev_id, &info);
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = info.max_num_events,
@@ -334,7 +334,22 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
rte_event_dev_info_get(opt->dev_id, &info);
+
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = info.max_num_events,
@@ -129,31 +129,38 @@ The following are the application command-line options:
Use event timer adapter as producer.
- * ``--prod_type_timerdev_burst``
+* ``--prod_type_timerdev_burst``
- Use burst mode event timer adapter as producer.
+ Use burst mode event timer adapter as producer.
- * ``--timer_tick_nsec``
+* ``--timer_tick_nsec``
- Used to dictate number of nano seconds between bucket traversal of the
- event timer adapter. Refer `rte_event_timer_adapter_conf`.
+ Used to dictate number of nano seconds between bucket traversal of the
+ event timer adapter. Refer `rte_event_timer_adapter_conf`.
- * ``--max_tmo_nsec``
+* ``--max_tmo_nsec``
- Used to configure event timer adapter max arm timeout in nano seconds.
+ Used to configure event timer adapter max arm timeout in nano seconds.
- * ``--expiry_nsec``
+* ``--expiry_nsec``
- Dictate the number of nano seconds after which the event timer expires.
+ Dictate the number of nano seconds after which the event timer expires.
- * ``--nb_timers``
+* ``--nb_timers``
- Number of event timers each producer core will generate.
+ Number of event timers each producer core will generate.
- * ``--nb_timer_adptrs``
+* ``--nb_timer_adptrs``
+
+ Number of event timer adapters to be used. Each adapter is used in
+ round robin manner by the producer cores.
+
+* ``--deq_tmo_nsec``
+
+ Global dequeue timeout for all the event ports if the provided dequeue
+ timeout is out of the supported range of event device it will be
+ adjusted to the highest/lowest supported dequeue timeout supported.
- Number of event timer adapters to be used. Each adapter is used in
- round robin manner by the producer cores.
Eventdev Tests
--------------
@@ -225,6 +232,7 @@ Supported application command line options are following::
--nb_flows
--nb_pkts
--worker_deq_depth
+ --deq_tmo_nsec
Example
^^^^^^^
@@ -287,6 +295,7 @@ Supported application command line options are following::
--nb_flows
--nb_pkts
--worker_deq_depth
+ --deq_tmo_nsec
Example
^^^^^^^
@@ -386,6 +395,7 @@ Supported application command line options are following::
--expiry_nsec
--nb_timers
--nb_timer_adptrs
+ --deq_tmo_nsec
Example
^^^^^^^
@@ -485,6 +495,7 @@ Supported application command line options are following::
--expiry_nsec
--nb_timers
--nb_timer_adptrs
+ --deq_tmo_nsec
Example
^^^^^^^
@@ -598,6 +609,7 @@ Supported application command line options are following::
--stlist
--worker_deq_depth
--prod_type_ethdev
+ --deq_tmo_nsec
.. Note::
@@ -689,6 +701,7 @@ Supported application command line options are following::
--stlist
--worker_deq_depth
--prod_type_ethdev
+ --deq_tmo_nsec
.. Note::