From patchwork Thu Jul 30 19:50:14 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timothy McDaniel X-Patchwork-Id: 75077 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4E744A052B; Thu, 30 Jul 2020 21:57:51 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E87811C1D4; Thu, 30 Jul 2020 21:54:09 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 230C61C0D2 for ; Thu, 30 Jul 2020 21:53:25 +0200 (CEST) IronPort-SDR: iu24ymUEj+Sgtj5yIYVMBvHOMead+9NVywJz94fOV9U2N+AIvBtK04qUggmTOre3pmrC8zb75j /szRKrK91fhQ== X-IronPort-AV: E=McAfee;i="6000,8403,9698"; a="139672365" X-IronPort-AV: E=Sophos;i="5.75,415,1589266800"; d="scan'208";a="139672365" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Jul 2020 12:53:25 -0700 IronPort-SDR: POh97FfR7XYKLtAy6FpWBVMyTWmS7rto/AMR1TYacS7FxTiAdoguDX060Zi8WkHqviA2MDl0ri gbqC5Im/f7Zw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,415,1589266800"; d="scan'208";a="465378181" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga005.jf.intel.com with ESMTP; 30 Jul 2020 12:53:25 -0700 From: "McDaniel, Timothy" To: jerinj@marvell.com Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com, "McDaniel, Timothy" Date: Thu, 30 Jul 2020 14:50:14 -0500 Message-Id: <1596138614-17409-28-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com> References: <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com> <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "McDaniel, Timothy" Change-Id: Iff64ba83ead496b3dedeaf323ee09bce1f631a6f Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 263 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index 5adc95e..1498bdd 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -303,6 +303,23 @@ int dlb_string_to_int(int *result, const char *str) const struct rte_event events[], uint16_t num); +void +dlb_free_qe_mem(struct dlb_port *qm_port) +{ + if (qm_port == NULL) + return; + + if (qm_port->qe4) { + rte_free(qm_port->qe4); + qm_port->qe4 = NULL; + } + + if (qm_port->consume_qe) { + rte_free(qm_port->consume_qe); + qm_port->consume_qe = NULL; + } +} + int dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name) { @@ -1856,6 +1873,250 @@ static inline void dlb_issue_int_arm_hcw(struct dlb_port *qm_port) return 0; } +static void +dlb_flush_port(struct rte_eventdev *dev, int port_id) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + eventdev_stop_flush_t flush; + struct rte_event ev; + uint8_t dev_id; + void *arg; + int i; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + if (dlb->ev_ports[port_id].qm_port.is_directed) + continue; + + ev.op = RTE_EVENT_OP_RELEASE; + + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); + } + + /* Enqueue any additional outstanding releases */ + ev.op = RTE_EVENT_OP_RELEASE; + + for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--) + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); +} + +static uint32_t +dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_ldb_queue_depth_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.queue_id = queue->qm_queue.id; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_ldb_queue_depth(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +static uint32_t +dlb_get_dir_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_get_dir_queue_depth_args cfg; + struct dlb_cmd_response response; + int ret; + + cfg.queue_id = queue->qm_queue.id; + cfg.response = (uintptr_t)&response; + + ret = dlb_iface_get_dir_queue_depth(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + +uint32_t +dlb_get_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + if (queue->qm_queue.is_directed) + return dlb_get_dir_queue_depth(dlb, queue); + else + return dlb_get_ldb_queue_depth(dlb, queue); +} + +static bool +dlb_queue_is_empty(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue) +{ + return dlb_get_queue_depth(dlb, queue) == 0; +} + +static bool +dlb_linked_queues_empty(struct dlb_eventdev *dlb) +{ + int i; + + for (i = 0; i < dlb->num_queues; i++) { + if (dlb->ev_queues[i].num_links == 0) + continue; + if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + return false; + } + + return true; +} + +static bool +dlb_queues_empty(struct dlb_eventdev *dlb) +{ + int i; + + for (i = 0; i < dlb->num_queues; i++) { + if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + return false; + } + + return true; +} + +void +dlb_drain(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + struct dlb_eventdev_port *ev_port = NULL; + uint8_t dev_id; + int i; + + dev_id = dev->data->dev_id; + + while (!dlb_linked_queues_empty(dlb)) { + /* Flush all the ev_ports, which will drain all their connected + * queues. + */ + for (i = 0; i < dlb->num_ports; i++) + dlb_flush_port(dev, i); + } + + /* The queues are empty, but there may be events left in the ports. */ + for (i = 0; i < dlb->num_ports; i++) + dlb_flush_port(dev, i); + + /* If the domain's queues are empty, we're done. */ + if (dlb_queues_empty(dlb)) + return; + + /* Else, there must be at least one unlinked load-balanced queue. + * Select a load-balanced port with which to drain the unlinked + * queue(s). + */ + for (i = 0; i < dlb->num_ports; i++) { + ev_port = &dlb->ev_ports[i]; + + if (!ev_port->qm_port.is_directed) + break; + } + + if (i == dlb->num_ports) { + DLB_LOG_ERR("internal error: no LDB ev_ports\n"); + return; + } + + rte_errno = 0; + rte_event_port_unlink(dev_id, ev_port->id, NULL, 0); + + if (rte_errno) { + DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n", + ev_port->id); + return; + } + + for (i = 0; i < dlb->num_queues; i++) { + uint8_t qid, prio; + int ret; + + if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + continue; + + qid = i; + prio = 0; + + /* Link the ev_port to the queue */ + ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1); + if (ret != 1) { + DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + + /* Flush the queue */ + while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i])) + dlb_flush_port(dev, ev_port->id); + + /* Drain any extant events in the ev_port. */ + dlb_flush_port(dev, ev_port->id); + + /* Unlink the ev_port from the queue */ + ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1); + if (ret != 1) { + DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + } +} + +static void +dlb_eventdev_stop(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + + rte_spinlock_lock(&dlb->qm_instance.resource_lock); + + if (dlb->run_state == DLB_RUN_STATE_STOPPED) { + DLB_LOG_DBG("Internal error: already stopped\n"); + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + return; + } else if (dlb->run_state != DLB_RUN_STATE_STARTED) { + DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n", + (int)dlb->run_state); + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + return; + } + + dlb->run_state = DLB_RUN_STATE_STOPPING; + + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + + dlb_drain(dev); + + dlb->run_state = DLB_RUN_STATE_STOPPED; +} + +static int +dlb_eventdev_close(struct rte_eventdev *dev) +{ + dlb_hw_reset_sched_domain(dev, false); + + return 0; +} + + static inline int dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb, struct dlb_eventdev_port *ev_port) @@ -3710,6 +3971,8 @@ static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb) .dev_infos_get = dlb_eventdev_info_get, .dev_configure = dlb_eventdev_configure, .dev_start = dlb_eventdev_start, + .dev_stop = dlb_eventdev_stop, + .dev_close = dlb_eventdev_close, .queue_def_conf = dlb_eventdev_queue_default_conf_get, .queue_setup = dlb_eventdev_queue_setup, .queue_release = dlb_eventdev_queue_release,