From patchwork Sat May 27 08:22:04 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wei Dai X-Patchwork-Id: 24742 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 8FE7F378B; Sat, 27 May 2017 10:31:25 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id C9A72374C for ; Sat, 27 May 2017 10:31:20 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 May 2017 01:31:20 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.38,400,1491289200"; d="scan'208"; a="1135329743" Received: from dpdk6.bj.intel.com ([172.16.182.81]) by orsmga001.jf.intel.com with ESMTP; 27 May 2017 01:31:18 -0700 From: Wei Dai To: wenzhuo.lu@intel.com, konstantin.ananyev@intel.com, helin.zhang@intel.com, jingjing.wu@intel.com Cc: dev@dpdk.org, Wei Dai Date: Sat, 27 May 2017 16:22:04 +0800 Message-Id: <1495873329-43303-3-git-send-email-wei.dai@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1495873329-43303-1-git-send-email-wei.dai@intel.com> References: <1495873329-43303-1-git-send-email-wei.dai@intel.com> Subject: [dpdk-dev] [PATCH 2/7] ethdev: add support of restoration of queue state X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" As dev->dev_ops->dev_start may change dev->data->rx_queue_state[] and dev->data->tx_queue_state[], this patch adds rxq_restore_state[] and txq_restore_state[ ] for restoration. In the restoration process, PMD should start or stop each Rx or Tx queue according to dev->data->rx_restore_state[] or dev->data->tx_restore_state[]. Signed-off-by: Wei Dai --- lib/librte_ether/rte_ethdev.c | 87 +++++++++++++++++++++++++++++++++++++++---- lib/librte_ether/rte_ethdev.h | 5 ++- 2 files changed, 83 insertions(+), 9 deletions(-) diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index a5a9519..97c0044 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -504,6 +504,7 @@ int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -522,14 +523,18 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) return 0; } - return dev->dev_ops->rx_queue_start(dev, rx_queue_id); - + ret = dev->dev_ops->rx_queue_start(dev, rx_queue_id); + if (!ret) + dev->data->rxq_restore_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + return ret; } int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -548,14 +553,18 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) return 0; } - return dev->dev_ops->rx_queue_stop(dev, rx_queue_id); - + ret = dev->dev_ops->rx_queue_stop(dev, rx_queue_id); + if (!ret) + dev->data->rxq_restore_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + return ret; } int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -574,14 +583,18 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) return 0; } - return dev->dev_ops->tx_queue_start(dev, tx_queue_id); - + ret = dev->dev_ops->tx_queue_start(dev, tx_queue_id); + if (!ret) + dev->data->txq_restore_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + return ret; } int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -600,8 +613,11 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) return 0; } - return dev->dev_ops->tx_queue_stop(dev, tx_queue_id); - + ret = dev->dev_ops->tx_queue_stop(dev, tx_queue_id); + if (!ret) + dev->data->txq_restore_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + return ret; } static int @@ -863,6 +879,50 @@ _rte_eth_dev_reset(struct rte_eth_dev *dev) } static void +rte_eth_dev_rx_queue_restore(uint8_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + uint16_t q = queue_id; + + dev = &rte_eth_devices[port_id]; + + if (dev->data->in_restoration == 0) { + dev->data->rxq_restore_state[q] = dev->data->rx_queue_state[q]; + return; + } + + if (dev->data->rxq_restore_state[q] != dev->data->rx_queue_state[q]) { + if (dev->data->rxq_restore_state[q] + == RTE_ETH_QUEUE_STATE_STARTED) + rte_eth_dev_rx_queue_start(port_id, q); + else + rte_eth_dev_rx_queue_stop(port_id, q); + } +} + +static void +rte_eth_dev_tx_queue_restore(uint8_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + uint16_t q = queue_id; + + dev = &rte_eth_devices[port_id]; + + if (dev->data->in_restoration == 0) { + dev->data->txq_restore_state[q] = dev->data->tx_queue_state[q]; + return; + } + + if (dev->data->txq_restore_state[q] != dev->data->tx_queue_state[q]) { + if (dev->data->txq_restore_state[q] + == RTE_ETH_QUEUE_STATE_STARTED) + rte_eth_dev_tx_queue_start(port_id, q); + else + rte_eth_dev_tx_queue_stop(port_id, q); + } +} + +static void rte_eth_dev_config_restore(uint8_t port_id) { struct rte_eth_dev *dev; @@ -871,6 +931,7 @@ rte_eth_dev_config_restore(uint8_t port_id) uint16_t i; uint32_t pool = 0; uint64_t pool_mask; + uint16_t q; dev = &rte_eth_devices[port_id]; @@ -915,6 +976,12 @@ rte_eth_dev_config_restore(uint8_t port_id) rte_eth_allmulticast_enable(port_id); else if (rte_eth_allmulticast_get(port_id) == 0) rte_eth_allmulticast_disable(port_id); + + for (q = 0; q < dev->data->nb_rx_queues; q++) + rte_eth_dev_rx_queue_restore(port_id, q); + for (q = 0; q < dev->data->nb_tx_queues; q++) + rte_eth_dev_tx_queue_restore(port_id, q); + } int @@ -3531,6 +3598,8 @@ rte_eth_dev_restore(uint8_t port_id) rte_eth_dev_stop(port_id); + dev->data->in_restoration = 1; + ret = dev->dev_ops->dev_uninit(dev); if (ret) return ret; @@ -3568,5 +3637,7 @@ rte_eth_dev_restore(uint8_t port_id) if (dev->dev_ops->dev_restore) ret = dev->dev_ops->dev_restore(dev); + dev->data->in_restoration = 0; + return ret; } diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 0298a1f..7a2ce07 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -1754,10 +1754,13 @@ struct rte_eth_dev_data { scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */ all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */ dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */ - lro : 1; /**< RX LRO is ON(1) / OFF(0) */ + lro : 1, /**< RX LRO is ON(1) / OFF(0) */ + in_restoration : 1; /**< In Restoration Yes(1) / NO(0) */ uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + uint8_t rxq_restore_state[RTE_MAX_QUEUES_PER_PORT]; /** Queues state: STARTED(1) / STOPPED(0) */ uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + uint8_t txq_restore_state[RTE_MAX_QUEUES_PER_PORT]; /** Queues state: STARTED(1) / STOPPED(0) */ uint32_t dev_flags; /**< Capabilities */ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */