@@ -42,10 +42,362 @@ pipeline_atq_nb_event_queues(struct evt_options *opt)
return (eth_count * opt->nb_stages);
}
+static int
+pipeline_atq_worker_single_stage_safe(void *arg)
+{
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ pipeline_tx_pkt_safe(ev.mbuf);
+ w->processed_pkts++;
+ continue;
+ }
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+
+ return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_unsafe(void *arg)
+{
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ pipeline_tx_pkt_unsafe(ev.mbuf, t);
+ w->processed_pkts++;
+ } else {
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+ }
+
+ return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_burst_safe(void *arg)
+{
+ int i;
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ struct rte_event ev[BURST_SIZE];
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_pkt_safe(ev[i].mbuf);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ } else
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_burst_unsafe(void *arg)
+{
+ int i;
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ struct rte_event ev[BURST_SIZE];
+ const uint16_t nb_ports = rte_eth_dev_count();
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ pipeline_tx_flush(t, nb_ports);
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_unsafe_burst(ev[i].mbuf, t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ } else
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_safe(void *arg)
+{
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ const uint8_t last_queue = t->opt->nb_stages - 1;
+ const uint8_t nb_stages = t->opt->nb_stages;
+ uint8_t *const sched_type_list = &t->sched_type_list[0];
+ uint8_t cq_id;
+ struct rte_event ev;
+
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_pkt_safe(ev.mbuf);
+ w->processed_pkts++;
+ continue;
+ }
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ }
+
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+ return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_unsafe(void *arg)
+{
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ const uint8_t last_queue = t->opt->nb_stages - 1;
+ const uint8_t nb_stages = t->opt->nb_stages;
+ uint8_t *const sched_type_list = &t->sched_type_list[0];
+ uint8_t cq_id;
+ struct rte_event ev;
+
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_pkt_unsafe(ev.mbuf, t);
+ w->processed_pkts++;
+ continue;
+ }
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ }
+
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+ return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_burst_safe(void *arg)
+{
+ int i;
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ uint8_t *const sched_type_list = &t->sched_type_list[0];
+ const uint8_t last_queue = t->opt->nb_stages - 1;
+ const uint8_t nb_stages = t->opt->nb_stages;
+ uint8_t cq_id;
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_unsafe_burst(ev[i].mbuf, t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
+ }
+
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i],
+ sched_type_list[cq_id]);
+ }
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+ return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_burst_unsafe(void *arg)
+{
+ int i;
+ struct worker_data *w = arg;
+ struct test_pipeline *t = w->t;
+ const uint8_t dev = w->dev_id;
+ const uint8_t port = w->port_id;
+ uint8_t *const sched_type_list = &t->sched_type_list[0];
+ const uint8_t last_queue = t->opt->nb_stages - 1;
+ const uint8_t nb_stages = t->opt->nb_stages;
+ uint8_t cq_id;
+ struct rte_event ev[BURST_SIZE + 1];
+ const uint16_t nb_ports = rte_eth_dev_count();
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ pipeline_tx_flush(t, nb_ports);
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ pipeline_tx_unsafe_burst(ev[i].mbuf, t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
+ }
+
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i],
+ sched_type_list[cq_id]);
+ }
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+ return 0;
+}
+
static int
worker_wrapper(void *arg)
{
- RTE_SET_USED(arg);
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const bool mt_safe = !w->t->mt_unsafe;
+ const uint8_t nb_stages = opt->nb_stages;
+ RTE_SET_USED(opt);
+
+ /* allow compiler to optimize */
+ if (nb_stages == 1) {
+ if (!burst && mt_safe)
+ return pipeline_atq_worker_single_stage_safe(arg);
+ else if (!burst && !mt_safe)
+ return pipeline_atq_worker_single_stage_unsafe(
+ arg);
+ else if (burst && mt_safe)
+ return pipeline_atq_worker_single_stage_burst_safe(
+ arg);
+ else if (burst && !mt_safe)
+ return pipeline_atq_worker_single_stage_burst_unsafe(
+ arg);
+ } else {
+ if (!burst && mt_safe)
+ return pipeline_atq_worker_multi_stage_safe(arg);
+ else if (!burst && !mt_safe)
+ return pipeline_atq_worker_multi_stage_unsafe(arg);
+ if (burst && mt_safe)
+ return pipeline_atq_worker_multi_stage_burst_safe(
+ arg);
+ else if (burst && !mt_safe)
+ return pipeline_atq_worker_multi_stage_burst_unsafe(
+ arg);
+
+ }
rte_panic("invalid worker\n");
}