@@ -243,6 +243,9 @@ eventdev_resource_setup(void)
/* Event port configuration */
eventdev_rsrc->ops.event_port_setup();
+ /* Rx/Tx adapters configuration */
+ eventdev_rsrc->ops.adapter_setup(ethdev_count);
+
/* Start event device service */
ret = rte_event_dev_service_id_get(eventdev_rsrc->event_d_id,
&service_id);
@@ -6,6 +6,9 @@
#define __L2FWD_EVENTDEV_H__
#include <rte_common.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_mbuf.h>
#include <rte_spinlock.h>
#include "l2fwd_common.h"
@@ -37,6 +40,18 @@ struct eventdev_ports {
rte_spinlock_t lock;
};
+struct eventdev_rx_adptr {
+ uint32_t service_id;
+ uint8_t nb_rx_adptr;
+ uint8_t *rx_adptr;
+};
+
+struct eventdev_tx_adptr {
+ uint32_t service_id;
+ uint8_t nb_tx_adptr;
+ uint8_t *tx_adptr;
+};
+
struct eventdev_setup_ops {
event_queue_setup_cb event_queue_setup;
event_port_setup_cb event_port_setup;
@@ -50,6 +65,8 @@ struct eventdev_resources {
struct rte_event_port_conf def_p_conf;
struct l2fwd_port_statistics *stats;
/* Default port config. */
+ struct eventdev_rx_adptr rx_adptr;
+ struct eventdev_tx_adptr tx_adptr;
uint8_t disable_implicit_release;
struct eventdev_setup_ops ops;
struct rte_mempool *pkt_pool;
@@ -192,10 +192,127 @@ event_queue_setup_generic(uint16_t ethdev_count, uint32_t event_queue_cfg)
eventdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;
}
+static void
+rx_tx_adapter_setup_generic(uint16_t ethdev_count)
+{
+ struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+ .rx_queue_flags = 0,
+ .ev = {
+ .queue_id = 0,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ }
+ };
+ uint8_t event_d_id = eventdev_rsrc->event_d_id;
+ uint8_t rx_adptr_id = 0;
+ uint8_t tx_adptr_id = 0;
+ uint8_t tx_port_id = 0;
+ uint32_t service_id;
+ int32_t ret, i;
+
+ /* Rx adapter setup */
+ eventdev_rsrc->rx_adptr.nb_rx_adptr = 1;
+ eventdev_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ eventdev_rsrc->rx_adptr.nb_rx_adptr);
+ if (!eventdev_rsrc->rx_adptr.rx_adptr) {
+ free(eventdev_rsrc->evp.event_p_id);
+ free(eventdev_rsrc->evq.event_q_id);
+ rte_exit(EXIT_FAILURE,
+ "failed to allocate memery for Rx adapter");
+ }
+
+ ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
+ &eventdev_rsrc->def_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create rx adapter");
+
+ eth_q_conf.ev.sched_type = eventdev_rsrc->sync_mode;
+ for (i = 0; i < ethdev_count; i++) {
+ /* Configure user requested sync mode */
+ eth_q_conf.ev.queue_id = eventdev_rsrc->evq.event_q_id[i];
+ ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, i, -1,
+ ð_q_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for rx adptr\n");
+ }
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ eventdev_rsrc->rx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ rx_adptr_id);
+
+ eventdev_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
+
+ /* Tx adapter setup */
+ eventdev_rsrc->tx_adptr.nb_tx_adptr = 1;
+ eventdev_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ eventdev_rsrc->tx_adptr.nb_tx_adptr);
+ if (!eventdev_rsrc->tx_adptr.tx_adptr) {
+ free(eventdev_rsrc->rx_adptr.rx_adptr);
+ free(eventdev_rsrc->evp.event_p_id);
+ free(eventdev_rsrc->evq.event_q_id);
+ rte_exit(EXIT_FAILURE,
+ "failed to allocate memery for Rx adapter");
+ }
+
+ ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
+ &eventdev_rsrc->def_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create tx adapter");
+
+ for (i = 0; i < ethdev_count; i++) {
+ ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, i, -1);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to add queues to Tx adapter");
+ }
+
+ ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_exit(EXIT_FAILURE, "Failed to get Tx adapter service ID");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ eventdev_rsrc->tx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get Tx adapter port id: %d\n", ret);
+
+ ret = rte_event_port_link(event_d_id, tx_port_id,
+ &eventdev_rsrc->evq.event_q_id[
+ eventdev_rsrc->evq.nb_queues - 1],
+ NULL, 1);
+ if (ret != 1)
+ rte_exit(EXIT_FAILURE,
+ "Unable to link Tx adapter port to Tx queue:err = %d",
+ ret);
+
+ ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
+ tx_adptr_id);
+
+ eventdev_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
+}
+
void
eventdev_set_generic_ops(struct eventdev_setup_ops *ops)
{
ops->eventdev_setup = eventdev_setup_generic;
ops->event_queue_setup = event_queue_setup_generic;
ops->event_port_setup = event_port_setup_generic;
+ ops->adapter_setup = rx_tx_adapter_setup_generic;
}
@@ -186,10 +186,90 @@ event_queue_setup_internal_port(uint16_t ethdev_count, uint32_t event_queue_cfg)
}
}
+static void
+rx_tx_adapter_setup_internal_port(uint16_t ethdev_count)
+{
+ struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+ .rx_queue_flags = 0,
+ .ev = {
+ .queue_id = 0,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ }
+ };
+ uint8_t event_d_id = eventdev_rsrc->event_d_id;
+ int32_t ret, i;
+
+ eventdev_rsrc->rx_adptr.nb_rx_adptr = ethdev_count;
+ eventdev_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ eventdev_rsrc->rx_adptr.nb_rx_adptr);
+ if (!eventdev_rsrc->rx_adptr.rx_adptr) {
+ free(eventdev_rsrc->evp.event_p_id);
+ free(eventdev_rsrc->evq.event_q_id);
+ rte_exit(EXIT_FAILURE,
+ "failed to allocate memery for Rx adapter");
+ }
+
+ for (i = 0; i < ethdev_count; i++) {
+ ret = rte_event_eth_rx_adapter_create(i, event_d_id,
+ &eventdev_rsrc->def_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to create rx adapter[%d]", i);
+
+ /* Configure user requested sync mode */
+ eth_q_conf.ev.queue_id = eventdev_rsrc->evq.event_q_id[i];
+ eth_q_conf.ev.sched_type = eventdev_rsrc->sync_mode;
+ ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, ð_q_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+
+ ret = rte_event_eth_rx_adapter_start(i);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Rx adapter[%d] start failed", i);
+
+ eventdev_rsrc->rx_adptr.rx_adptr[i] = i;
+ }
+
+ eventdev_rsrc->tx_adptr.nb_tx_adptr = ethdev_count;
+ eventdev_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ eventdev_rsrc->tx_adptr.nb_tx_adptr);
+ if (!eventdev_rsrc->tx_adptr.tx_adptr) {
+ free(eventdev_rsrc->rx_adptr.rx_adptr);
+ free(eventdev_rsrc->evp.event_p_id);
+ free(eventdev_rsrc->evq.event_q_id);
+ rte_exit(EXIT_FAILURE,
+ "failed to allocate memery for Rx adapter");
+ }
+
+ for (i = 0; i < ethdev_count; i++) {
+ ret = rte_event_eth_tx_adapter_create(i, event_d_id,
+ &eventdev_rsrc->def_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to create tx adapter[%d]", i);
+
+ ret = rte_event_eth_tx_adapter_queue_add(i, i, -1);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to add queues to Tx adapter");
+
+ ret = rte_event_eth_tx_adapter_start(i);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Tx adapter[%d] start failed", i);
+
+ eventdev_rsrc->tx_adptr.tx_adptr[i] = i;
+ }
+}
+
void
eventdev_set_internal_port_ops(struct eventdev_setup_ops *ops)
{
ops->eventdev_setup = eventdev_setup_internal_port;
ops->event_queue_setup = event_queue_setup_internal_port;
ops->event_port_setup = event_port_setup_internal_port;
+ ops->adapter_setup = rx_tx_adapter_setup_internal_port;
}