From patchwork Sat Oct 17 18:21:14 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timothy McDaniel X-Patchwork-Id: 81215 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 81FF9A04DB; Sat, 17 Oct 2020 20:25:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2D75BCFA5; Sat, 17 Oct 2020 20:19:54 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 0DC17CA48 for ; Sat, 17 Oct 2020 20:19:35 +0200 (CEST) IronPort-SDR: 5c9+FNf5eRvBo7mr7x+5lmi8sspqhfou75xzJswZH0XH53mZT4+BX8gmOW6rM7GfJ9YZz+MSGC W55CMZ+ZydWA== X-IronPort-AV: E=McAfee;i="6000,8403,9777"; a="146122193" X-IronPort-AV: E=Sophos;i="5.77,387,1596524400"; d="scan'208";a="146122193" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Oct 2020 11:19:35 -0700 IronPort-SDR: xt2gnOevYCeaXRL+PtBv/77zFDMfhDCB1FXSUljxsetVu7STHlTWzipqW32SCS7h5HoOnSg7fE 0xs2X2hiXX+Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,387,1596524400"; d="scan'208";a="532129695" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga005.jf.intel.com with ESMTP; 17 Oct 2020 11:19:34 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com Date: Sat, 17 Oct 2020 13:21:14 -0500 Message-Id: <1602958879-8558-18-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1602958879-8558-1-git-send-email-timothy.mcdaniel@intel.com> References: <1599855987-25976-2-git-send-email-timothy.mcdaniel@intel.com> <1602958879-8558-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v2 17/22] event/dlb2: add eventdev stop and close X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for eventdev stop and close entry points. Signed-off-by: Timothy McDaniel Reviewed-by: Gage Eads --- drivers/event/dlb2/dlb2.c | 257 +++++++++++++++++++++++++++-- drivers/event/dlb2/dlb2_iface.c | 6 + drivers/event/dlb2/dlb2_iface.h | 6 + drivers/event/dlb2/pf/base/dlb2_resource.c | 91 ++++++++++ drivers/event/dlb2/pf/dlb2_pf.c | 52 ++++++ 5 files changed, 396 insertions(+), 16 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 417c5d0..483659e 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -74,21 +74,6 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { struct process_local_port_data dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES]; -/* - * DUMMY - added so that xstats path will compile/link. - * Will be replaced by real version in a subsequent - * patch. - */ -uint32_t -dlb2_get_queue_depth(struct dlb2_eventdev *dlb2, - struct dlb2_eventdev_queue *queue) -{ - RTE_SET_USED(dlb2); - RTE_SET_USED(queue); - - return 0; -} - static void dlb2_free_qe_mem(struct dlb2_port *qm_port) { @@ -1881,7 +1866,6 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, return 0; /* Ignore and return success */ } - /* FIXME: How to handle unlink on a directed port? */ if (ev_port->qm_port.is_directed) { DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n", ev_port->id); @@ -3416,6 +3400,245 @@ dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev, } static void +dlb2_flush_port(struct rte_eventdev *dev, int port_id) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + eventdev_stop_flush_t flush; + struct rte_event ev; + uint8_t dev_id; + void *arg; + int i; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + if (dlb2->ev_ports[port_id].qm_port.is_directed) + continue; + + ev.op = RTE_EVENT_OP_RELEASE; + + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); + } + + /* Enqueue any additional outstanding releases */ + ev.op = RTE_EVENT_OP_RELEASE; + + for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--) + rte_event_enqueue_burst(dev_id, port_id, &ev, 1); +} + +static uint32_t +dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2, + struct dlb2_eventdev_queue *queue) +{ + struct dlb2_hw_dev *handle = &dlb2->qm_instance; + struct dlb2_get_ldb_queue_depth_args cfg; + int ret; + + cfg.queue_id = queue->qm_queue.id; + + ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } + + return cfg.response.id; +} + +static uint32_t +dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2, + struct dlb2_eventdev_queue *queue) +{ + struct dlb2_hw_dev *handle = &dlb2->qm_instance; + struct dlb2_get_dir_queue_depth_args cfg; + int ret; + + cfg.queue_id = queue->qm_queue.id; + + ret = dlb2_iface_get_dir_queue_depth(handle, &cfg); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } + + return cfg.response.id; +} + +uint32_t +dlb2_get_queue_depth(struct dlb2_eventdev *dlb2, + struct dlb2_eventdev_queue *queue) +{ + if (queue->qm_queue.is_directed) + return dlb2_get_dir_queue_depth(dlb2, queue); + else + return dlb2_get_ldb_queue_depth(dlb2, queue); +} + +static bool +dlb2_queue_is_empty(struct dlb2_eventdev *dlb2, + struct dlb2_eventdev_queue *queue) +{ + return dlb2_get_queue_depth(dlb2, queue) == 0; +} + +static bool +dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2) +{ + int i; + + for (i = 0; i < dlb2->num_queues; i++) { + if (dlb2->ev_queues[i].num_links == 0) + continue; + if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i])) + return false; + } + + return true; +} + +static bool +dlb2_queues_empty(struct dlb2_eventdev *dlb2) +{ + int i; + + for (i = 0; i < dlb2->num_queues; i++) { + if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i])) + return false; + } + + return true; +} + +static void +dlb2_drain(struct rte_eventdev *dev) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + struct dlb2_eventdev_port *ev_port = NULL; + uint8_t dev_id; + int i; + + dev_id = dev->data->dev_id; + + while (!dlb2_linked_queues_empty(dlb2)) { + /* Flush all the ev_ports, which will drain all their connected + * queues. + */ + for (i = 0; i < dlb2->num_ports; i++) + dlb2_flush_port(dev, i); + } + + /* The queues are empty, but there may be events left in the ports. */ + for (i = 0; i < dlb2->num_ports; i++) + dlb2_flush_port(dev, i); + + /* If the domain's queues are empty, we're done. */ + if (dlb2_queues_empty(dlb2)) + return; + + /* Else, there must be at least one unlinked load-balanced queue. + * Select a load-balanced port with which to drain the unlinked + * queue(s). + */ + for (i = 0; i < dlb2->num_ports; i++) { + ev_port = &dlb2->ev_ports[i]; + + if (!ev_port->qm_port.is_directed) + break; + } + + if (i == dlb2->num_ports) { + DLB2_LOG_ERR("internal error: no LDB ev_ports\n"); + return; + } + + rte_errno = 0; + rte_event_port_unlink(dev_id, ev_port->id, NULL, 0); + + if (rte_errno) { + DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n", + ev_port->id); + return; + } + + for (i = 0; i < dlb2->num_queues; i++) { + uint8_t qid, prio; + int ret; + + if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i])) + continue; + + qid = i; + prio = 0; + + /* Link the ev_port to the queue */ + ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1); + if (ret != 1) { + DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + + /* Flush the queue */ + while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i])) + dlb2_flush_port(dev, ev_port->id); + + /* Drain any extant events in the ev_port. */ + dlb2_flush_port(dev, ev_port->id); + + /* Unlink the ev_port from the queue */ + ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1); + if (ret != 1) { + DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n", + ev_port->id, qid); + return; + } + } +} + +static void +dlb2_eventdev_stop(struct rte_eventdev *dev) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + + rte_spinlock_lock(&dlb2->qm_instance.resource_lock); + + if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) { + DLB2_LOG_DBG("Internal error: already stopped\n"); + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return; + } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) { + DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n", + (int)dlb2->run_state); + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return; + } + + dlb2->run_state = DLB2_RUN_STATE_STOPPING; + + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + + dlb2_drain(dev); + + dlb2->run_state = DLB2_RUN_STATE_STOPPED; +} + +static int +dlb2_eventdev_close(struct rte_eventdev *dev) +{ + dlb2_hw_reset_sched_domain(dev, false); + + return 0; +} + +static void dlb2_entry_points_init(struct rte_eventdev *dev) { struct dlb2_eventdev *dlb2; @@ -3425,6 +3648,8 @@ dlb2_entry_points_init(struct rte_eventdev *dev) .dev_infos_get = dlb2_eventdev_info_get, .dev_configure = dlb2_eventdev_configure, .dev_start = dlb2_eventdev_start, + .dev_stop = dlb2_eventdev_stop, + .dev_close = dlb2_eventdev_close, .queue_def_conf = dlb2_eventdev_queue_default_conf_get, .queue_setup = dlb2_eventdev_queue_setup, .port_def_conf = dlb2_eventdev_port_default_conf_get, diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index a86191d..5471dd8 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -66,3 +66,9 @@ int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle, struct dlb2_start_domain_args *cfg); + +int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, + struct dlb2_get_ldb_queue_depth_args *args); + +int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, + struct dlb2_get_dir_queue_depth_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index d5bb6be..bcd9446 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -66,4 +66,10 @@ extern int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle, struct dlb2_start_domain_args *cfg); +extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle, + struct dlb2_get_ldb_queue_depth_args *args); + +extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle, + struct dlb2_get_dir_queue_depth_args *args); + #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index c441d93..71b4604 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -5931,3 +5931,94 @@ dlb2_hw_start_domain(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw, + u32 domain_id, + u32 queue_id, + bool vdev_req, + unsigned int vf_id) +{ + DLB2_HW_DBG(hw, "DLB get directed queue depth:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); + DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id); +} + +int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_get_dir_queue_depth_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_dir_pq_pair *queue; + struct dlb2_hw_domain *domain; + int id; + + id = domain_id; + + dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id, + vdev_req, vdev_id); + + domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id); + if (domain == NULL) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + id = args->queue_id; + + queue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain); + if (queue == NULL) { + resp->status = DLB2_ST_INVALID_QID; + return -EINVAL; + } + + resp->id = dlb2_dir_queue_depth(hw, queue); + + return 0; +} + +static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw, + u32 domain_id, + u32 queue_id, + bool vdev_req, + unsigned int vf_id) +{ + DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); + DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id); +} + +int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_get_ldb_queue_depth_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_queue *queue; + + dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id, + vdev_req, vdev_id); + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (domain == NULL) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain); + if (queue == NULL) { + resp->status = DLB2_ST_INVALID_QID; + return -EINVAL; + } + + resp->id = dlb2_ldb_queue_depth(hw, queue); + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index c9dc200..99e6d8e 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -528,6 +528,56 @@ dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle, + struct dlb2_get_ldb_queue_depth_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + +static int +dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle, + struct dlb2_get_dir_queue_depth_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret = 0; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -545,6 +595,8 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_dir_port_create = dlb2_pf_dir_port_create; dlb2_iface_map_qid = dlb2_pf_map_qid; dlb2_iface_unmap_qid = dlb2_pf_unmap_qid; + dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth; + dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth; dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start; dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps; dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;