From patchwork Sun Apr 22 11:58:24 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 38672 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3007B2C19; Sun, 22 Apr 2018 13:58:27 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 531722BCD for ; Sun, 22 Apr 2018 13:58:19 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 22 Apr 2018 04:58:18 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.49,311,1520924400"; d="scan'208";a="222402488" Received: from dpdk51.sh.intel.com ([10.67.110.184]) by fmsmga006.fm.intel.com with ESMTP; 22 Apr 2018 04:58:17 -0700 From: Qi Zhang To: thomas@monjalon.net, ferruh.yigit@intel.com Cc: konstantin.ananyev@intel.com, dev@dpdk.org, beilei.xing@intel.com, jingjing.wu@intel.com, wenzhuo.lu@intel.com, Qi Zhang Date: Sun, 22 Apr 2018 19:58:24 +0800 Message-Id: <20180422115824.105219-6-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20180422115824.105219-1-qi.z.zhang@intel.com> References: <20180212045314.171616-1-qi.z.zhang@intel.com> <20180422115824.105219-1-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v7 5/5] net/i40e: enable runtime queue setup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Qi Zhang Expose the runtime queue configuration capability and enhance i40e_dev_[rx|tx]_queue_setup to handle the situation when device already started. Signed-off-by: Qi Zhang Acked-by: Konstantin Ananyev --- v7: - update i40e.ini v5: - fix first tx queue check. v4: - fix rx/tx conflict check. - no need conflict check for first rx/tx queue at runtime setup. v3: - no queue start/stop in setup/release - return fail when required rx/tx function conflict with exist setup doc/guides/nics/features/i40e.ini | 2 + drivers/net/i40e/i40e_ethdev.c | 4 + drivers/net/i40e/i40e_rxtx.c | 183 +++++++++++++++++++++++++++++++++----- 3 files changed, 166 insertions(+), 23 deletions(-) diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini index e862712c9..5fb5bb296 100644 --- a/doc/guides/nics/features/i40e.ini +++ b/doc/guides/nics/features/i40e.ini @@ -52,3 +52,5 @@ x86-32 = Y x86-64 = Y ARMv8 = Y Power8 = Y +Runtime Rx queue setup = Y +Runtime Tx queue setup = Y diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 180ac7449..e329042df 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -3244,6 +3244,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + dev_info->dev_capa = + DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index bc660596b..df855ff3a 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1706,6 +1706,75 @@ i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) return !((mandatory ^ requested) & supported); } +static int +i40e_dev_first_queue(uint16_t idx, void **queues, int num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + if (i != idx && queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(rxq->queue_id, + dev->data->rx_queues, + dev->data->nb_rx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1834,25 +1903,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, i40e_reset_rx_queue(rxq); rxq->q_set = TRUE; - dev->data->rx_queues[queue_idx] = rxq; - - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - - if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - ad->rx_bulk_alloc_allowed = false; - } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -1867,6 +1917,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; return 0; } @@ -2012,6 +2090,55 @@ i40e_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested) return !((mandatory ^ requested) & supported); } +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(txq->queue_id, + dev->data->tx_queues, + dev->data->nb_tx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_tx_function. + */ + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) != + I40E_SIMPLE_FLAGS) || + txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) { + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + } + + return 0; +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2194,10 +2321,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, i40e_reset_tx_queue(txq); txq->q_set = TRUE; - dev->data->tx_queues[queue_idx] = txq; - - /* Use a simple TX queue without offloads or multi segs if possible */ - i40e_set_tx_function_flag(dev, txq); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -2212,6 +2335,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + return 0; }