From patchwork Tue Jun 23 02:38:32 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 71992 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C212EA0350; Tue, 23 Jun 2020 04:36:08 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C7DD41D5D1; Tue, 23 Jun 2020 04:35:37 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 47A251D582 for ; Tue, 23 Jun 2020 04:35:28 +0200 (CEST) IronPort-SDR: MWEHUTyyEGkflrp8Z1UxDCyxPvT11JZHqugzZ56VTsemc18F1GxPqP8XGWXE/ntAqmSH+LRGf/ ldIAwj7g6deQ== X-IronPort-AV: E=McAfee;i="6000,8403,9660"; a="209128861" X-IronPort-AV: E=Sophos;i="5.75,269,1589266800"; d="scan'208";a="209128861" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Jun 2020 19:35:27 -0700 IronPort-SDR: qg7D+gMRL/WJBU+aaID1rFo9aKtyiXjIVoiX+bkyjwgQem/HG+JrjstAZ4oFOFDelCh/6Yyxh3 r/J2tKuLZeuQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,269,1589266800"; d="scan'208";a="275198108" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga003.jf.intel.com with ESMTP; 22 Jun 2020 19:35:25 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, qiming.yang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, marko.kovacevic@intel.com, john.mcnamara@intel.com, Ting Xu Date: Tue, 23 Jun 2020 10:38:32 +0800 Message-Id: <20200623023838.5608-7-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200623023838.5608-1-ting.xu@intel.com> References: <20200605201737.33766-1-ting.xu@intel.com> <20200623023838.5608-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v5 06/12] net/ice: add Rx queue init in DCF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Qi Zhang Enable Rx queues initialization during device start in DCF. Signed-off-by: Qi Zhang Signed-off-by: Ting Xu --- drivers/net/ice/ice_dcf.h | 1 + drivers/net/ice/ice_dcf_ethdev.c | 83 ++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 152266e3c..dcb2a0283 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -53,6 +53,7 @@ struct ice_dcf_hw { uint8_t *rss_lut; uint8_t *rss_key; uint64_t supported_rxdid; + uint16_t num_queue_pairs; }; int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 676a504fd..5afd07f96 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -42,14 +42,97 @@ ice_dcf_xmit_pkts(__rte_unused void *tx_queue, return 0; } +static int +ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) +{ + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_eth_dev_data *dev_data = dev->data; + struct iavf_hw *hw = &dcf_ad->real_hw.avf; + uint16_t buf_size, max_pkt_len, len; + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); + len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len; + max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* Check if the jumbo frame and maximum packet length are set + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (max_pkt_len <= RTE_ETHER_MAX_LEN || + max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", + (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < RTE_ETHER_MIN_LEN || + max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return -EINVAL; + } + } + + rxq->max_pkt_len = max_pkt_len; + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + return 0; +} + +static int +ice_dcf_init_rx_queues(struct rte_eth_dev *dev) +{ + struct ice_rx_queue **rxq = + (struct ice_rx_queue **)dev->data->rx_queues; + int i, ret; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = ice_dcf_init_rxq(dev, rxq[i]); + if (ret) + return ret; + } + + ice_set_rx_function(dev); + ice_set_tx_function(dev); + + return 0; +} + static int ice_dcf_dev_start(struct rte_eth_dev *dev) { struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; struct ice_adapter *ad = &dcf_ad->parent; + struct ice_dcf_hw *hw = &dcf_ad->real_hw; + int ret; ad->pf.adapter_stopped = 0; + hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + ret = ice_dcf_init_rx_queues(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to init queues"); + return ret; + } + dev->data->dev_link.link_status = ETH_LINK_UP; return 0;