@@ -84,6 +84,16 @@ evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
return 0;
}
+static int
+evt_parse_deq_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->deq_tmo_nsec), arg);
+
+ return ret;
+}
+
static int
evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
{
@@ -240,6 +250,7 @@ usage(char *program)
"\t--worker_deq_depth : dequeue depth of the worker\n"
"\t--fwd_latency : perform fwd_latency measurement\n"
"\t--queue_priority : enable queue priority\n"
+ "\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
"\t expity_nsec would be the timeout\n"
@@ -311,6 +322,7 @@ static struct option lgopts[] = {
{ EVT_SCHED_TYPE_LIST, 1, 0, 0 },
{ EVT_FWD_LATENCY, 0, 0, 0 },
{ EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_DEQ_TMO_NSEC, 1, 0, 0 },
{ EVT_PROD_ETHDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
@@ -342,6 +354,7 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
{ EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ { EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
{ EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
@@ -31,6 +31,7 @@
#define EVT_SCHED_TYPE_LIST ("stlist")
#define EVT_FWD_LATENCY ("fwd_latency")
#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_DEQ_TMO_NSEC ("deq_tmo_nsec")
#define EVT_PROD_ETHDEV ("prod_type_ethdev")
#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
@@ -71,6 +72,7 @@ struct evt_options {
uint8_t dev_id;
uint32_t fwd_latency:1;
uint32_t q_priority:1;
+ uint32_t deq_tmo_nsec;
enum evt_prod_type prod_type;
uint8_t timdev_use_burst;
uint8_t timdev_cnt;
@@ -119,6 +119,7 @@ order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
const uint8_t nb_ports = nb_workers + 1;
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = NB_QUEUES,/* one all types queue */
.nb_event_ports = nb_ports,
.nb_events_limit = 4096,
@@ -119,6 +119,7 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
const uint8_t nb_ports = nb_workers + 1;
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
.nb_event_ports = nb_ports,
.nb_events_limit = 4096,
@@ -176,6 +176,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = dev_info.max_num_events,
@@ -177,6 +177,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = dev_info.max_num_events,
@@ -315,6 +315,7 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
rte_event_dev_info_get(opt->dev_id, &info);
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = info.max_num_events,
@@ -335,6 +335,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
rte_event_dev_info_get(opt->dev_id, &info);
const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = info.max_num_events,