From patchwork Sat Jun 27 04:37:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Timothy McDaniel X-Patchwork-Id: 72312 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EB13BA0520; Sat, 27 Jun 2020 06:44:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F35071C029; Sat, 27 Jun 2020 06:40:31 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 5EAB01BF78 for ; Sat, 27 Jun 2020 06:40:11 +0200 (CEST) IronPort-SDR: s0V+2pEBSv0ewjJhiqlN9Y8xNqPg+FrARSFR1oL5tSb440KXga3LlUiYVI+nhq4tX6BROaegU7 /GBh1IoX90XA== X-IronPort-AV: E=McAfee;i="6000,8403,9664"; a="125753103" X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="125753103" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Jun 2020 21:40:11 -0700 IronPort-SDR: OBZMWWNTOt8E3AHzoNuMrVLZw3WV6IWg6k1v2cnSnEdI+IaYLzJtF9vB5O7Qlh3LlabHq8Tk8f 9OsWnyX/buVw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="480023018" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by fmsmga006.fm.intel.com with ESMTP; 26 Jun 2020 21:40:10 -0700 From: Tim McDaniel To: jerinj@marvell.com Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com, "McDaniel, Timothy" Date: Fri, 26 Jun 2020 23:37:46 -0500 Message-Id: <1593232671-5690-23-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> References: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "McDaniel, Timothy" Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 166 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index fb96551..ac97f4d 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -854,6 +854,30 @@ int dlb_string_to_int(int *result, const char *str) return ret; } +static int16_t +dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle, + uint32_t qm_port_id, + uint16_t qm_qid) +{ + struct dlb_unmap_qid_args cfg; + struct dlb_cmd_response response; + int32_t ret; + + if (handle == NULL) + return -EINVAL; + + cfg.response = (uintptr_t)&response; + cfg.port_id = qm_port_id; + cfg.qid = qm_qid; + + ret = dlb_iface_unmap_qid(handle, &cfg); + if (ret < 0) + DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + + return ret; +} + /* VDEV-only notes: * This function first unmaps all memory mappings and closes the * domain's file descriptor, which causes the driver to reset the @@ -1898,6 +1922,42 @@ int dlb_string_to_int(int *result, const char *str) } static int +dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port, + struct dlb_eventdev_queue *ev_queue) +{ + int ret, i; + + /* Don't unlink until start time. */ + if (dlb->run_state == DLB_RUN_STATE_STOPPED) + return 0; + + for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (ev_port->link[i].valid && + ev_port->link[i].queue_id == ev_queue->id) + break; /* found */ + } + + /* This is expected with eventdev API! + * It blindly attemmpts to unmap all queues. + */ + if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n", + ev_queue->qm_queue.id, + ev_port->qm_port.id); + return 0; + } + + ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance, + ev_port->qm_port.id, + ev_queue->qm_queue.id); + if (!ret) + ev_port->link[i].mapped = false; + + return ret; +} + +static int dlb_do_port_link(struct rte_eventdev *dev, struct dlb_eventdev_queue *ev_queue, struct dlb_eventdev_port *ev_port, @@ -2470,6 +2530,109 @@ static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb) return 0; } +static int +dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + int i; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + if (!queues || nb_unlinks == 0) { + DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n"); + return 0; /* Ignore and return success */ + } + + if (ev_port->qm_port.is_directed) { + DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n", + ev_port->id); + rte_errno = 0; + return nb_unlinks; /* as if success */ + } + + dlb = ev_port->dlb; + + for (i = 0; i < nb_unlinks; i++) { + struct dlb_eventdev_queue *ev_queue; + int ret, j; + + if (queues[i] >= dlb->num_queues) { + DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]); + rte_errno = -EINVAL; + return i; /* return index of offending queue */ + } + + ev_queue = &dlb->ev_queues[queues[i]]; + + /* Does a link exist? */ + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) + if (ev_port->link[j].queue_id == queues[i] && + ev_port->link[j].valid) + break; + + if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ) + continue; + + ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue); + if (ret) { + DLB_LOG_ERR("unlink err=%d for port %d queue %d\n", + ret, ev_port->id, queues[i]); + rte_errno = -ENOENT; + return i; /* return index of offending queue */ + } + + ev_port->link[j].valid = false; + ev_port->num_links--; + ev_queue->num_links--; + } + + return nb_unlinks; +} + +static int +dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + void *event_port) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + struct dlb_hw_dev *handle; + struct dlb_pending_port_unmaps_args cfg; + struct dlb_cmd_response response; + int ret; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + cfg.port_id = ev_port->qm_port.id; + cfg.response = (uintptr_t)&response; + dlb = ev_port->dlb; + handle = &dlb->qm_instance; + ret = dlb_iface_pending_port_unmaps(handle, &cfg); + + if (ret < 0) { + DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + static void dlb_eventdev_port_release(void *port) { @@ -2576,6 +2739,9 @@ static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb) .port_setup = dlb_eventdev_port_setup, .port_release = dlb_eventdev_port_release, .port_link = dlb_eventdev_port_link, + .port_unlink = dlb_eventdev_port_unlink, + .port_unlinks_in_progress = + dlb_eventdev_port_unlinks_in_progress, }; /* Expose PMD's eventdev interface */