From patchwork Tue Sep 11 08:02:08 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Mattias_R=C3=B6nnblom?= X-Patchwork-Id: 44557 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4B9254CA2; Tue, 11 Sep 2018 10:03:16 +0200 (CEST) Received: from mail.lysator.liu.se (mail.lysator.liu.se [130.236.254.3]) by dpdk.org (Postfix) with ESMTP id 9B1C54C91 for ; Tue, 11 Sep 2018 10:03:13 +0200 (CEST) Received: from mail.lysator.liu.se (localhost [127.0.0.1]) by mail.lysator.liu.se (Postfix) with ESMTP id E599F40087 for ; Tue, 11 Sep 2018 10:03:12 +0200 (CEST) Received: by mail.lysator.liu.se (Postfix, from userid 1004) id CEF2840022; Tue, 11 Sep 2018 10:03:12 +0200 (CEST) X-Spam-Checker-Version: SpamAssassin 3.4.1 (2015-04-28) on bernadotte.lysator.liu.se X-Spam-Level: X-Spam-Status: No, score=-0.9 required=5.0 tests=ALL_TRUSTED,AWL autolearn=disabled version=3.4.1 X-Spam-Score: -0.9 Received: from isengard.friendlyfire.se (host-90-232-156-190.mobileonline.telia.com [90.232.156.190]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.lysator.liu.se (Postfix) with ESMTPSA id 9D16340012; Tue, 11 Sep 2018 10:03:10 +0200 (CEST) From: =?utf-8?q?Mattias_R=C3=B6nnblom?= To: jerin.jacob@caviumnetworks.com Cc: bruce.richardson@intel.com, dev@dpdk.org, =?utf-8?q?Mattias_R=C3=B6nnb?= =?utf-8?q?lom?= Date: Tue, 11 Sep 2018 10:02:08 +0200 Message-Id: <20180911080216.3017-3-mattias.ronnblom@ericsson.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20180911080216.3017-1-mattias.ronnblom@ericsson.com> References: <20180911080216.3017-1-mattias.ronnblom@ericsson.com> MIME-Version: 1.0 X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v3 02/10] event/dsw: add DSW device and queue configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Allow queue- and device-level configuration for and retrieval of contextual information from a DSW event device. Signed-off-by: Mattias Rönnblom --- drivers/event/dsw/dsw_evdev.c | 87 +++++++++++++++++++++++++++++++++++ drivers/event/dsw/dsw_evdev.h | 28 +++++++++++ 2 files changed, 115 insertions(+) diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index 6990bbc9e..1500d2426 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -9,6 +9,91 @@ #define EVENTDEV_NAME_DSW_PMD event_dsw +static int +dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, + const struct rte_event_queue_conf *conf) +{ + struct dsw_evdev *dsw = dsw_pmd_priv(dev); + struct dsw_queue *queue = &dsw->queues[queue_id]; + + if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg) + return -ENOTSUP; + + if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED) + return -ENOTSUP; + + /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it + * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since + * the queue will only have a single serving port, no + * migration will ever happen, so the extra TYPE_ATOMIC + * migration overhead is avoided. + */ + if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) + queue->schedule_type = RTE_SCHED_TYPE_ATOMIC; + else /* atomic or parallel */ + queue->schedule_type = conf->schedule_type; + + queue->num_serving_ports = 0; + + return 0; +} + +static void +dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused, + uint8_t queue_id __rte_unused, + struct rte_event_queue_conf *queue_conf) +{ + *queue_conf = (struct rte_event_queue_conf) { + .nb_atomic_flows = 4096, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL + }; +} + +static void +dsw_queue_release(struct rte_eventdev *dev __rte_unused, + uint8_t queue_id __rte_unused) +{ +} + +static void +dsw_info_get(struct rte_eventdev *dev __rte_unused, + struct rte_event_dev_info *info) +{ + *info = (struct rte_event_dev_info) { + .driver_name = DSW_PMD_NAME, + .max_event_queues = DSW_MAX_QUEUES, + .max_event_queue_flows = DSW_MAX_FLOWS, + .max_event_queue_priority_levels = 1, + .max_event_priority_levels = 1, + .max_event_ports = DSW_MAX_PORTS, + .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH, + .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH, + .max_num_events = DSW_MAX_EVENTS, + .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE| + RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED + }; +} + +static int +dsw_configure(const struct rte_eventdev *dev) +{ + struct dsw_evdev *dsw = dsw_pmd_priv(dev); + const struct rte_event_dev_config *conf = &dev->data->dev_conf; + + dsw->num_queues = conf->nb_event_queues; + + return 0; +} + +static struct rte_eventdev_ops dsw_evdev_ops = { + .queue_setup = dsw_queue_setup, + .queue_def_conf = dsw_queue_def_conf, + .queue_release = dsw_queue_release, + .dev_infos_get = dsw_info_get, + .dev_configure = dsw_configure, +}; + static int dsw_probe(struct rte_vdev_device *vdev) { @@ -23,6 +108,8 @@ dsw_probe(struct rte_vdev_device *vdev) if (dev == NULL) return -EFAULT; + dev->dev_ops = &dsw_evdev_ops; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h index 9a0f4c357..5eda8d114 100644 --- a/drivers/event/dsw/dsw_evdev.h +++ b/drivers/event/dsw/dsw_evdev.h @@ -9,8 +9,36 @@ #define DSW_PMD_NAME RTE_STR(event_dsw) +/* Code changes are required to allow more ports. */ +#define DSW_MAX_PORTS (64) +#define DSW_MAX_PORT_DEQUEUE_DEPTH (128) +#define DSW_MAX_PORT_ENQUEUE_DEPTH (128) + +#define DSW_MAX_QUEUES (16) + +#define DSW_MAX_EVENTS (16384) + +/* Code changes are required to allow more flows than 32k. */ +#define DSW_MAX_FLOWS_BITS (15) +#define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS)) +#define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1) + +struct dsw_queue { + uint8_t schedule_type; + uint16_t num_serving_ports; +}; + struct dsw_evdev { struct rte_eventdev_data *data; + + struct dsw_queue queues[DSW_MAX_QUEUES]; + uint8_t num_queues; }; +static inline struct dsw_evdev * +dsw_pmd_priv(const struct rte_eventdev *eventdev) +{ + return eventdev->data->dev_private; +} + #endif