From patchwork Fri Oct 30 18:27:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timothy McDaniel X-Patchwork-Id: 83006 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B4DABA04E6; Fri, 30 Oct 2020 19:31:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CB2BAACA2; Fri, 30 Oct 2020 19:26:32 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id C34C25937 for ; Fri, 30 Oct 2020 19:26:04 +0100 (CET) IronPort-SDR: H+HoPsxGwCdzkNqQmCb7MqwyhvrF0gGvSIZBATV+2ODPUZZz7y7fItK2EGchHPURxhpgkkBLIT YYu8qgP47DzA== X-IronPort-AV: E=McAfee;i="6000,8403,9790"; a="155624183" X-IronPort-AV: E=Sophos;i="5.77,434,1596524400"; d="scan'208";a="155624183" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Oct 2020 11:26:04 -0700 IronPort-SDR: fmRQI+ro6IrdOU1NSPaCR3SvrN2k4srmmJZqcLyZgGPGYXNjO/LFPjne7eBhrLwpH2Zm+jRffA VjBAsG41B9aQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,434,1596524400"; d="scan'208";a="361926450" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by FMSMGA003.fm.intel.com with ESMTP; 30 Oct 2020 11:26:03 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net Date: Fri, 30 Oct 2020 13:27:30 -0500 Message-Id: <1604082458-15368-16-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1604082458-15368-1-git-send-email-timothy.mcdaniel@intel.com> References: <20200612212434.6852-2-timothy.mcdaniel@intel.com> <1604082458-15368-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v10 15/23] event/dlb: add port unlink and port unlinks in progress X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add supports for the port unlink(s) eventdev entry points. The unlink operation is an asynchronous operation executed by a control thread, and the unlinks-in-progress function reads a counter shared with the control thread. Port QE and memzone memory is freed here. Signed-off-by: Timothy McDaniel Reviewed-by: Gage Eads --- drivers/event/dlb/dlb.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index 2ad195d..c64f559 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -693,6 +693,169 @@ dlb_eventdev_configure(const struct rte_eventdev *dev) return 0; } +static int16_t +dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle, + uint32_t qm_port_id, + uint16_t qm_qid) +{ + struct dlb_unmap_qid_args cfg; + struct dlb_cmd_response response; + int32_t ret; + + if (handle == NULL) + return -EINVAL; + + cfg.response = (uintptr_t)&response; + cfg.port_id = qm_port_id; + cfg.qid = qm_qid; + + ret = dlb_iface_unmap_qid(handle, &cfg); + if (ret < 0) + DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + + return ret; +} + +static int +dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port, + struct dlb_eventdev_queue *ev_queue) +{ + int ret, i; + + /* Don't unlink until start time. */ + if (dlb->run_state == DLB_RUN_STATE_STOPPED) + return 0; + + for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (ev_port->link[i].valid && + ev_port->link[i].queue_id == ev_queue->id) + break; /* found */ + } + + /* This is expected with eventdev API! + * It blindly attempts to unmap all queues. + */ + if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n", + ev_queue->qm_queue.id, + ev_port->qm_port.id); + return 0; + } + + ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance, + ev_port->qm_port.id, + ev_queue->qm_queue.id); + if (!ret) + ev_port->link[i].mapped = false; + + return ret; +} + +static int +dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + int i; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + if (queues == NULL || nb_unlinks == 0) { + DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n"); + return 0; /* Ignore and return success */ + } + + if (ev_port->qm_port.is_directed) { + DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n", + ev_port->id); + rte_errno = 0; + return nb_unlinks; /* as if success */ + } + + dlb = ev_port->dlb; + + for (i = 0; i < nb_unlinks; i++) { + struct dlb_eventdev_queue *ev_queue; + int ret, j; + + if (queues[i] >= dlb->num_queues) { + DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]); + rte_errno = -EINVAL; + return i; /* return index of offending queue */ + } + + ev_queue = &dlb->ev_queues[queues[i]]; + + /* Does a link exist? */ + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) + if (ev_port->link[j].queue_id == queues[i] && + ev_port->link[j].valid) + break; + + if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ) + continue; + + ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue); + if (ret) { + DLB_LOG_ERR("unlink err=%d for port %d queue %d\n", + ret, ev_port->id, queues[i]); + rte_errno = -ENOENT; + return i; /* return index of offending queue */ + } + + ev_port->link[j].valid = false; + ev_port->num_links--; + ev_queue->num_links--; + } + + return nb_unlinks; +} + +static int +dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + void *event_port) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + struct dlb_hw_dev *handle; + struct dlb_pending_port_unmaps_args cfg; + struct dlb_cmd_response response; + int ret; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + cfg.port_id = ev_port->qm_port.id; + cfg.response = (uintptr_t)&response; + dlb = ev_port->dlb; + handle = &dlb->qm_instance; + ret = dlb_iface_pending_port_unmaps(handle, &cfg); + + if (ret < 0) { + DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + static void dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id, @@ -1848,6 +2011,9 @@ dlb_entry_points_init(struct rte_eventdev *dev) .queue_setup = dlb_eventdev_queue_setup, .port_setup = dlb_eventdev_port_setup, .port_link = dlb_eventdev_port_link, + .port_unlink = dlb_eventdev_port_unlink, + .port_unlinks_in_progress = + dlb_eventdev_port_unlinks_in_progress, .dump = dlb_eventdev_dump, .xstats_get = dlb_eventdev_xstats_get, .xstats_get_names = dlb_eventdev_xstats_get_names,