[02/10] eventdev: add DSW device and queue configuration

Message ID 20180830142719.28569-3-mattias.ronnblom@ericsson.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Add the Distributed Software Event Device |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Mattias Rönnblom Aug. 30, 2018, 2:27 p.m. UTC
  Allow queue- and device-level configuration for and retrieval of
contextual information from a DSW event device.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 drivers/event/dsw/dsw_evdev.c | 87 +++++++++++++++++++++++++++++++++++
 drivers/event/dsw/dsw_evdev.h | 28 +++++++++++
 2 files changed, 115 insertions(+)
  

Patch

diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 6990bbc9e..1500d2426 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -9,6 +9,91 @@ 
 
 #define EVENTDEV_NAME_DSW_PMD event_dsw
 
+static int
+dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+		const struct rte_event_queue_conf *conf)
+{
+	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+	struct dsw_queue *queue = &dsw->queues[queue_id];
+
+	if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
+		return -ENOTSUP;
+
+	if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
+		return -ENOTSUP;
+
+	/* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
+	 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
+	 * the queue will only have a single serving port, no
+	 * migration will ever happen, so the extra TYPE_ATOMIC
+	 * migration overhead is avoided.
+	 */
+	if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
+		queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
+	else /* atomic or parallel */
+		queue->schedule_type = conf->schedule_type;
+
+	queue->num_serving_ports = 0;
+
+	return 0;
+}
+
+static void
+dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
+		   uint8_t queue_id __rte_unused,
+		   struct rte_event_queue_conf *queue_conf)
+{
+	*queue_conf = (struct rte_event_queue_conf) {
+		.nb_atomic_flows = 4096,
+		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+	};
+}
+
+static void
+dsw_queue_release(struct rte_eventdev *dev __rte_unused,
+		  uint8_t queue_id __rte_unused)
+{
+}
+
+static void
+dsw_info_get(struct rte_eventdev *dev __rte_unused,
+	     struct rte_event_dev_info *info)
+{
+	*info = (struct rte_event_dev_info) {
+		.driver_name = DSW_PMD_NAME,
+		.max_event_queues = DSW_MAX_QUEUES,
+		.max_event_queue_flows = DSW_MAX_FLOWS,
+		.max_event_queue_priority_levels = 1,
+		.max_event_priority_levels = 1,
+		.max_event_ports = DSW_MAX_PORTS,
+		.max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
+		.max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
+		.max_num_events = DSW_MAX_EVENTS,
+		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
+		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
+	};
+}
+
+static int
+dsw_configure(const struct rte_eventdev *dev)
+{
+	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+	const struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+	dsw->num_queues = conf->nb_event_queues;
+
+	return 0;
+}
+
+static struct rte_eventdev_ops dsw_evdev_ops = {
+	.queue_setup = dsw_queue_setup,
+	.queue_def_conf = dsw_queue_def_conf,
+	.queue_release = dsw_queue_release,
+	.dev_infos_get = dsw_info_get,
+	.dev_configure = dsw_configure,
+};
+
 static int
 dsw_probe(struct rte_vdev_device *vdev)
 {
@@ -23,6 +108,8 @@  dsw_probe(struct rte_vdev_device *vdev)
 	if (dev == NULL)
 		return -EFAULT;
 
+	dev->dev_ops = &dsw_evdev_ops;
+
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 9a0f4c357..5eda8d114 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -9,8 +9,36 @@ 
 
 #define DSW_PMD_NAME RTE_STR(event_dsw)
 
+/* Code changes are required to allow more ports. */
+#define DSW_MAX_PORTS (64)
+#define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
+#define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
+
+#define DSW_MAX_QUEUES (16)
+
+#define DSW_MAX_EVENTS (16384)
+
+/* Code changes are required to allow more flows than 32k. */
+#define DSW_MAX_FLOWS_BITS (15)
+#define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
+#define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
+
+struct dsw_queue {
+	uint8_t schedule_type;
+	uint16_t num_serving_ports;
+};
+
 struct dsw_evdev {
 	struct rte_eventdev_data *data;
+
+	struct dsw_queue queues[DSW_MAX_QUEUES];
+	uint8_t num_queues;
 };
 
+static inline struct dsw_evdev *
+dsw_pmd_priv(const struct rte_eventdev *eventdev)
+{
+	return eventdev->data->dev_private;
+}
+
 #endif