From patchwork Fri Dec 14 13:18:35 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michal Krawczyk X-Patchwork-Id: 48867 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 77D661BBDE; Fri, 14 Dec 2018 14:19:19 +0100 (CET) Received: from mail-lf1-f66.google.com (mail-lf1-f66.google.com [209.85.167.66]) by dpdk.org (Postfix) with ESMTP id 8A03C1BB86 for ; Fri, 14 Dec 2018 14:19:09 +0100 (CET) Received: by mail-lf1-f66.google.com with SMTP id v5so4229183lfe.7 for ; Fri, 14 Dec 2018 05:19:09 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=IKuFkPtSSH7BOtevw+4YqX+a4SaDUIPBBm9lraPmsJg=; b=Mrmj7/sWRdw7NZXVSO5PrzuV6UdYiO/aAovF+WsE+m6F95yepW3qXRSoet98e+VxL6 hqf4ACaeIyhJqB0DL8ikwEycQX7Mr1ZVP7iaH2Sdqv+EGXkVeFr8uVtJRIWD84p8KSeq Qw84GGfeB57h5u5wzwxubbq+7YKidZkGNwtQfsz/g4y2jAXSBbWa5Le+7QxwS24GZJyc fsiSAflRo0mNXXhvWILhYH1yUrToNxNYoK4WgSR43mHH8ggx0HwfH5yeoxl2hIVHde9j Hh6FtF2XLoEpFbDgoCdjEdkZ27KRn5gWEOEVFy3a9mOH3y+omc+j8IBPoxxqrwNsMcBI 41yg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=IKuFkPtSSH7BOtevw+4YqX+a4SaDUIPBBm9lraPmsJg=; b=caE3IjkS6PYCX0ELAFC7qsIgND2UAGoiZeI4fO2c/fhGK2TBRU43nnsYdrbzt5DfY3 xTO3+9cN4j1Ql24e4gwxjM6PeRXVpBg1mLp1P4bXYwMn5SU2pOiZxdDnebxGHsXZynNN TnoIoKcpK9Hc/Nwk2JUP2OckWNePrOGISU0oD0+MObav7gYNvSv8JQvASUTrraHg0dJR Fpv0pgu8diwSLXgSvE/CzjByQiuZ58iU6gH73aaQxlpKYfvmbaD3skINQ3A3+wPlgirL WHVt+Ry/2diWgW1QWrJvNy6cK8dIB5SDMCZ+9dyCne2paArP+yEDuE2azhiPnrYAmvHG jxmQ== X-Gm-Message-State: AA+aEWayYGxXVQFVfO4/fdhY1ArAf5xC3/zTJbOkRFMk66ibikCJ/v2d aNsfKr71e5E8vTt8Azm/dtUPLAuQ1k0= X-Google-Smtp-Source: AFSGD/XTmwBqSdavtTTyWoeOMKZ4lg3LnS7Op5jNagKM/UFb5MUgRtkQN/sGBpYnL/5ZAa1mXndRuA== X-Received: by 2002:a19:1901:: with SMTP id 1mr1595062lfz.99.1544793548399; Fri, 14 Dec 2018 05:19:08 -0800 (PST) Received: from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id o25sm873884lfd.29.2018.12.14.05.19.07 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 14 Dec 2018 05:19:07 -0800 (PST) From: Michal Krawczyk To: dev@dpdk.org Cc: gtzalik@dpdk.org, mw@dpdk.org, matua@amazon.com, rk@semihalf.com, stable@dpdk.org Date: Fri, 14 Dec 2018 14:18:35 +0100 Message-Id: <20181214131846.22439-10-mk@semihalf.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20181214131846.22439-1-mk@semihalf.com> References: <20181214131846.22439-1-mk@semihalf.com> Subject: [dpdk-dev] [PATCH 09/20] net/ena: destroy queues if start failed X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Rafal Kozik If start function fails, previously created queues have to be removed. ena_queue_restart_all() and ena_queue_restart() are renamed to ena_queue_start_all() and ena_queue_start(). ena_free_io_queues_all() is renamed to ena_queue_stop_all(). Fixes: df238f84c0a2 ("net/ena: recreate HW IO rings on start and stop") Cc: stable@dpdk.org Signed-off-by: Rafal Kozik Acked-by: Michal Krawczyk --- drivers/net/ena/ena_ethdev.c | 95 ++++++++++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 35 deletions(-) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 5f7dec086..deb97151d 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -244,10 +244,12 @@ static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int ena_create_io_queue(struct ena_ring *ring); -static void ena_free_io_queues_all(struct ena_adapter *adapter); -static int ena_queue_restart(struct ena_ring *ring); -static int ena_queue_restart_all(struct rte_eth_dev *dev, - enum ena_ring_type ring_type); +static void ena_queue_stop(struct ena_ring *ring); +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); +static int ena_queue_start(struct ena_ring *ring); +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); @@ -805,9 +807,6 @@ static void ena_tx_queue_release(void *queue) ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, "API violation"); - /* Free all bufs */ - ena_tx_queue_release_bufs(ring); - /* Free ring resources */ if (ring->push_buf_intermediate_buf) rte_free(ring->push_buf_intermediate_buf); @@ -872,8 +871,8 @@ static int ena_link_update(struct rte_eth_dev *dev, return 0; } -static int ena_queue_restart_all(struct rte_eth_dev *dev, - enum ena_ring_type ring_type) +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); @@ -901,18 +900,25 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, "Inconsistent state of tx queues\n"); } - rc = ena_queue_restart(&queues[i]); + rc = ena_queue_start(&queues[i]); if (rc) { PMD_INIT_LOG(ERR, - "failed to restart queue %d type(%d)", + "failed to start queue %d type(%d)", i, ring_type); - return rc; + goto err; } } } return 0; + +err: + while (i--) + if (queues[i].configured) + ena_queue_stop(&queues[i]); + + return rc; } static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) @@ -1101,19 +1107,19 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) return rc; - rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); + rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); if (rc) return rc; - rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); + rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); if (rc) - return rc; + goto err_start_tx; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) - return rc; + goto err_rss_init; } ena_stats_restart(dev); @@ -1128,6 +1134,12 @@ static int ena_start(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_RUNNING; return 0; + +err_rss_init: + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); +err_start_tx: + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); + return rc; } static void ena_stop(struct rte_eth_dev *dev) @@ -1136,7 +1148,8 @@ static void ena_stop(struct rte_eth_dev *dev) (struct ena_adapter *)(dev->data->dev_private); rte_timer_stop_sync(&adapter->timer_wd); - ena_free_io_queues_all(adapter); + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); adapter->state = ENA_ADAPTER_STATE_STOPPED; } @@ -1199,36 +1212,46 @@ static int ena_create_io_queue(struct ena_ring *ring) return 0; } -static void ena_free_io_queues_all(struct ena_adapter *adapter) +static void ena_queue_stop(struct ena_ring *ring) { - struct rte_eth_dev *eth_dev = adapter->rte_dev; - struct ena_com_dev *ena_dev = &adapter->ena_dev; - int i; - uint16_t ena_qid; - uint16_t nb_rxq = eth_dev->data->nb_rx_queues; - uint16_t nb_txq = eth_dev->data->nb_tx_queues; - - for (i = 0; i < nb_txq; ++i) { - ena_qid = ENA_IO_TXQ_IDX(i); - ena_com_destroy_io_queue(ena_dev, ena_qid); + struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; - ena_tx_queue_release_bufs(&adapter->tx_ring[i]); + if (ring->type == ENA_RING_TYPE_RX) { + ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); + ena_rx_queue_release_bufs(ring); + } else { + ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); + ena_tx_queue_release_bufs(ring); } +} - for (i = 0; i < nb_rxq; ++i) { - ena_qid = ENA_IO_RXQ_IDX(i); - ena_com_destroy_io_queue(ena_dev, ena_qid); +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_ring *queues = NULL; + uint16_t nb_queues, i; - ena_rx_queue_release_bufs(&adapter->rx_ring[i]); + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; } + + for (i = 0; i < nb_queues; ++i) + if (queues[i].configured) + ena_queue_stop(&queues[i]); } -static int ena_queue_restart(struct ena_ring *ring) +static int ena_queue_start(struct ena_ring *ring) { int rc, bufs_num; ena_assert_msg(ring->configured == 1, - "Trying to restart unconfigured queue\n"); + "Trying to start unconfigured queue\n"); rc = ena_create_io_queue(ring); if (rc) { @@ -1245,6 +1268,8 @@ static int ena_queue_restart(struct ena_ring *ring) bufs_num = ring->ring_size - 1; rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { + ena_com_destroy_io_queue(&ring->adapter->ena_dev, + ENA_IO_RXQ_IDX(ring->id)); PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); return ENA_COM_FAULT; }