From patchwork Tue Jan 9 12:20:14 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liang, Ma" X-Patchwork-Id: 33181 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6EDF31B1CD; Tue, 9 Jan 2018 13:20:30 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 49AF41B1BE for ; Tue, 9 Jan 2018 13:20:27 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 09 Jan 2018 04:20:26 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.46,335,1511856000"; d="scan'208";a="20149129" Received: from silpixa00398162.ir.intel.com (HELO silpixa00398162.ger.corp.intel.com) ([10.237.223.171]) by fmsmga001.fm.intel.com with ESMTP; 09 Jan 2018 04:20:25 -0800 From: Liang Ma To: jerin.jacob@caviumnetworks.com Cc: dev@dpdk.org, harry.van.haaren@intel.com, bruce.richardson@intel.com, deepak.k.jain@intel.com, john.geary@intel.com, peter.mccarthy@intel.com, seanbh@gmail.com Date: Tue, 9 Jan 2018 12:20:14 +0000 Message-Id: <1515500423-107720-4-git-send-email-liang.j.ma@intel.com> X-Mailer: git-send-email 2.7.5 In-Reply-To: <1515500423-107720-1-git-send-email-liang.j.ma@intel.com> References: <1513941830-186503-1-git-send-email-liang.j.ma@intel.com> <1515500423-107720-1-git-send-email-liang.j.ma@intel.com> Subject: [dpdk-dev] [PATCH v5 03/12] event/opdl: add event queue config get/set support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Liang Ma Signed-off-by: Peter Mccarthy --- drivers/event/opdl/opdl_evdev.c | 108 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c index cad000a..11ac8fc 100644 --- a/drivers/event/opdl/opdl_evdev.c +++ b/drivers/event/opdl/opdl_evdev.c @@ -27,6 +27,110 @@ static void opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); +static int +opdl_queue_setup(struct rte_eventdev *dev, + uint8_t queue_id, + const struct rte_event_queue_conf *conf) +{ + enum queue_type type; + + struct opdl_evdev *device = opdl_pmd_priv(dev); + + /* Extra sanity check, probably not needed */ + if (queue_id == OPDL_INVALID_QID) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " + "Invalid queue id %u requested\n", + dev->data->dev_id, + queue_id); + return -EINVAL; + } + + if (device->nb_q_md > device->max_queue_nb) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " + "Max number of queues %u exceeded by request %u\n", + dev->data->dev_id, + device->max_queue_nb, + device->nb_q_md); + return -EINVAL; + } + + if (RTE_EVENT_QUEUE_CFG_ALL_TYPES + & conf->event_queue_cfg) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " + "QUEUE_CFG_ALL_TYPES not supported\n", + dev->data->dev_id); + return -ENOTSUP; + } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK + & conf->event_queue_cfg) { + type = OPDL_Q_TYPE_SINGLE_LINK; + } else { + switch (conf->schedule_type) { + case RTE_SCHED_TYPE_ORDERED: + type = OPDL_Q_TYPE_ORDERED; + break; + case RTE_SCHED_TYPE_ATOMIC: + type = OPDL_Q_TYPE_ATOMIC; + break; + case RTE_SCHED_TYPE_PARALLEL: + type = OPDL_Q_TYPE_ORDERED; + break; + default: + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " + "Unknown queue type %d requested\n", + dev->data->dev_id, + conf->event_queue_cfg); + return -EINVAL; + } + } + /* Check if queue id has been setup already */ + for (uint32_t i = 0; i < device->nb_q_md; i++) { + if (device->q_md[i].ext_id == queue_id) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " + "queue id %u already setup\n", + dev->data->dev_id, + queue_id); + return -EINVAL; + } + } + + device->q_md[device->nb_q_md].ext_id = queue_id; + device->q_md[device->nb_q_md].type = type; + device->q_md[device->nb_q_md].setup = 1; + device->nb_q_md++; + + return 1; +} + +static void +opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id) +{ + struct opdl_evdev *device = opdl_pmd_priv(dev); + + RTE_SET_USED(queue_id); + + if (device->data->dev_started) + return; + +} + +static void +opdl_queue_def_conf(struct rte_eventdev *dev, + uint8_t queue_id, + struct rte_event_queue_conf *conf) +{ + RTE_SET_USED(dev); + RTE_SET_USED(queue_id); + + static const struct rte_event_queue_conf default_conf = { + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1, + .event_queue_cfg = 0, + .schedule_type = RTE_SCHED_TYPE_ORDERED, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + }; + + *conf = default_conf; +} static int @@ -306,6 +410,10 @@ opdl_probe(struct rte_vdev_device *vdev) .dev_stop = opdl_stop, .dump = opdl_dump, + .queue_def_conf = opdl_queue_def_conf, + .queue_setup = opdl_queue_setup, + .queue_release = opdl_queue_release, + .xstats_get = opdl_xstats_get, .xstats_get_names = opdl_xstats_get_names, .xstats_get_by_name = opdl_xstats_get_by_name,