From patchwork Fri Jun 12 21:24:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timothy McDaniel X-Patchwork-Id: 71481 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 708EBA00BE; Fri, 12 Jun 2020 23:30:56 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E89021D155; Fri, 12 Jun 2020 23:26:59 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 2AD181C002 for ; Fri, 12 Jun 2020 23:26:37 +0200 (CEST) IronPort-SDR: SDQzhgSM6bAt5PLeLosV0Bhe/p4OlzkoAg2iGmXYfEQ/PVLfdkwswI8LjQc+qTRAn4s57yZfmW vdKMHN+wC5+Q== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jun 2020 14:26:36 -0700 IronPort-SDR: lbpuj3vMXGW/FBE7CYep2Vd1bEEXd2TC5QB+dEbcrdtM9sgEnlctKL24CfjTaVTcKFB3EPzA3e tykifxojcEtQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,504,1583222400"; d="scan'208";a="272035954" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga003.jf.intel.com with ESMTP; 12 Jun 2020 14:26:36 -0700 From: "McDaniel, Timothy" To: jerinj@marvell.com Cc: dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com Date: Fri, 12 Jun 2020 16:24:34 -0500 Message-Id: <20200612212434.6852-28-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20200612212434.6852-1-timothy.mcdaniel@intel.com> References: <20200612212434.6852-1-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change-Id: Iff64ba83ead496b3dedeaf323ee09bce1f631a6f Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 263 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index afdb18061..d47325132 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -305,6 +305,23 @@ dlb_event_enqueue_forward_burst_delayed(void *event_port, const struct rte_event events[], uint16_t num); +void +dlb_free_qe_mem(struct dlb_port *qm_port) +{ + if (qm_port == NULL) + return; + + if (qm_port->qe4) { + rte_free(qm_port->qe4); + qm_port->qe4 = NULL; + } + + if (qm_port->consume_qe) { + rte_free(qm_port->consume_qe); + qm_port->consume_qe = NULL; + } +} + int dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name) { @@ -1922,6 +1939,250 @@ dlb_eventdev_start(struct rte_eventdev *dev) return 0; } +static void +dlb_flush_port(struct rte_eventdev *dev, int port_id) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + eventdev_stop_flush_t flush; + struct rte_event ev; + uint8_t dev_id; + void *arg; + int i; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + if (dlb->ev_ports[port_id].qm_port.is_directed) + continue; + + ev.op = RTE_EVENT_OP_RELEASE; + + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); + } + + /* Enqueue any additional outstanding releases */ + ev.op = RTE_EVENT_OP_RELEASE; + + for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--) + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); +} + +static uint32_t +dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_ldb_queue_depth_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.queue_id = queue->qm_queue.id; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_ldb_queue_depth(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +static uint32_t +dlb_get_dir_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_dir_queue_depth_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.queue_id = queue->qm_queue.id; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_dir_queue_depth(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +uint32_t +dlb_get_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + if (queue->qm_queue.is_directed) + return dlb_get_dir_queue_depth(dlb, queue); + else + return dlb_get_ldb_queue_depth(dlb, queue); +} + +static bool +dlb_queue_is_empty(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + return dlb_get_queue_depth(dlb, queue) == 0; +} + +static bool +dlb_linked_queues_empty(struct dlb_eventdev *dlb) +{ + int i; + + for (i = 0; i < dlb->num_queues; i++) { + if (dlb->ev_queues[i].num_links == 0) + continue; + if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + return false; + } + + return true; +} + +static bool +dlb_queues_empty(struct dlb_eventdev *dlb) +{ + int i; + + for (i = 0; i < dlb->num_queues; i++) { + if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + return false; + } + + return true; +} + +void +dlb_drain(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + struct dlb_eventdev_port *ev_port = NULL; + uint8_t dev_id; + int i; + + dev_id = dev->data->dev_id; + + while (!dlb_linked_queues_empty(dlb)) { + /* Flush all the ev_ports, which will drain all their connected + * queues. + */ + for (i = 0; i < dlb->num_ports; i++) + dlb_flush_port(dev, i); + } + + /* The queues are empty, but there may be events left in the ports. */ + for (i = 0; i < dlb->num_ports; i++) + dlb_flush_port(dev, i); + + /* If the domain's queues are empty, we're done. */ + if (dlb_queues_empty(dlb)) + return; + + /* Else, there must be at least one unlinked load-balanced queue. + * Select a load-balanced port with which to drain the unlinked + * queue(s). + */ + for (i = 0; i < dlb->num_ports; i++) { + ev_port = &dlb->ev_ports[i]; + + if (!ev_port->qm_port.is_directed) + break; + } + + if (i == dlb->num_ports) { + DLB_LOG_ERR("internal error: no LDB ev_ports\n"); + return; + } + + rte_errno = 0; + rte_event_port_unlink(dev_id, ev_port->id, NULL, 0); + + if (rte_errno) { + DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n", + ev_port->id); + return; + } + + for (i = 0; i < dlb->num_queues; i++) { + uint8_t qid, prio; + int ret; + + if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + continue; + + qid = i; + prio = 0; + + /* Link the ev_port to the queue */ + ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1); + if (ret != 1) { + DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + + /* Flush the queue */ + while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + dlb_flush_port(dev, ev_port->id); + + /* Drain any extant events in the ev_port. */ + dlb_flush_port(dev, ev_port->id); + + /* Unlink the ev_port from the queue */ + ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1); + if (ret != 1) { + DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + } +} + +static void +dlb_eventdev_stop(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + + rte_spinlock_lock(&dlb->qm_instance.resource_lock); + + if (dlb->run_state == DLB_RUN_STATE_STOPPED) { + DLB_LOG_DBG("Internal error: already stopped\n"); + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + return; + } else if (dlb->run_state != DLB_RUN_STATE_STARTED) { + DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n", + (int)dlb->run_state); + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + return; + } + + dlb->run_state = DLB_RUN_STATE_STOPPING; + + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + + dlb_drain(dev); + + dlb->run_state = DLB_RUN_STATE_STOPPED; +} + +static int +dlb_eventdev_close(struct rte_eventdev *dev) +{ + dlb_hw_reset_sched_domain(dev, false); + + return 0; +} + + static inline int dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb, struct dlb_eventdev_port *ev_port) @@ -3782,6 +4043,8 @@ dlb_entry_points_init(struct rte_eventdev *dev) .dev_infos_get = dlb_eventdev_info_get, .dev_configure = dlb_eventdev_configure, .dev_start = dlb_eventdev_start, + .dev_stop = dlb_eventdev_stop, + .dev_close = dlb_eventdev_close, .queue_def_conf = dlb_eventdev_queue_default_conf_get, .queue_setup = dlb_eventdev_queue_setup, .queue_release = dlb_eventdev_queue_release,