From patchwork Fri Jun 5 20:17:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 70883 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 106DCA0350; Fri, 5 Jun 2020 14:19:58 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B19FC1D653; Fri, 5 Jun 2020 14:18:58 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 4D0B81D63B for ; Fri, 5 Jun 2020 14:18:54 +0200 (CEST) IronPort-SDR: e+89QgcVeF77AmKBBjMuzWkdEsFi2t/bhWaGFW+MCyln3yaBZ8pdjTQtUJPLkvtTxYSXLlImRa 7sgmO9WGJ7Zw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2020 05:18:53 -0700 IronPort-SDR: 2isk8jRt65WRqG621HPiXl7H52l0wBti0zRXWhKCzIwfu7fSVUuDflD3GBSdZWIloyv6Rur4dF 2fGZD4LK6bgA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,476,1583222400"; d="scan'208";a="294673404" Received: from dpdk-xuting-main.sh.intel.com ([10.67.117.84]) by fmsmga004.fm.intel.com with ESMTP; 05 Jun 2020 05:18:52 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, qiming.yang@intel.com, john.mcnamara@intel.com, marko.kovacevic@intel.com Date: Fri, 5 Jun 2020 20:17:34 +0000 Message-Id: <20200605201737.33766-10-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200605201737.33766-1-ting.xu@intel.com> References: <20200605201737.33766-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v1 09/12] net/ice: add queue start and stop for DCF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Qi Zhang Add queue start and stop in DCF. Support queue enable and disable through virtual channel. Add support for Rx queue mbufs allocation and queue reset. Signed-off-by: Qi Zhang --- drivers/net/ice/ice_dcf.c | 57 ++++++ drivers/net/ice/ice_dcf.h | 3 +- drivers/net/ice/ice_dcf_ethdev.c | 309 +++++++++++++++++++++++++++++++ 3 files changed, 368 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index d864ae894..56b8c0d25 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -940,3 +940,60 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) rte_free(map_info); return err; } + +int +ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) +{ + struct virtchnl_queue_select queue_select; + struct dcf_virtchnl_cmd args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = hw->vsi_res->vsi_id; + if (rx) + queue_select.rx_queues |= 1 << qid; + else + queue_select.tx_queues |= 1 << qid; + + memset(&args, 0, sizeof(args)); + if (on) + args.v_op = VIRTCHNL_OP_ENABLE_QUEUES; + else + args.v_op = VIRTCHNL_OP_DISABLE_QUEUES; + + args.req_msg = (u8 *)&queue_select; + args.req_msglen = sizeof(queue_select); + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); + return err; +} + +int +ice_dcf_disable_queues(struct ice_dcf_hw *hw) +{ + struct virtchnl_queue_select queue_select; + struct dcf_virtchnl_cmd args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = hw->vsi_res->vsi_id; + + queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_DISABLE_QUEUES; + args.req_msg = (u8 *)&queue_select; + args.req_msglen = sizeof(queue_select); + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES"); + return err; + } + return 0; +} diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 9470d1df7..68e1661c0 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -70,5 +70,6 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); - +int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); +int ice_dcf_disable_queues(struct ice_dcf_hw *hw); #endif /* _ICE_DCF_H_ */ diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 9605fb8ed..59113fc4b 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -226,6 +226,259 @@ static int ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, return 0; } +static int +alloc_rxq_mbufs(struct ice_rx_queue *rxq) +{ + volatile union ice_32b_rx_flex_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; + + rxq->sw_ring[i].mbuf = (void *)mbuf; + } + + return 0; +} + +static int +ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct iavf_hw *hw = &ad->real_hw.avf; + struct ice_rx_queue *rxq; + int err = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = alloc_rxq_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + else + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +static inline void +reset_rx_queue(struct ice_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (!rxq) + return; + + len = rxq->nb_rx_desc + ICE_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < ICE_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; + + /* for rx bulk */ + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +static inline void +reset_tx_queue(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txe; + uint32_t i, size; + uint16_t prev; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i].cmd_type_offset_bsz = + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + + txq->last_desc_cleaned = txq->nb_tx_desc - 1; + txq->nb_tx_free = txq->nb_tx_desc - 1; + + txq->tx_next_dd = txq->tx_rs_thresh - 1; + txq->tx_next_rs = txq->tx_rs_thresh - 1; +} + +static int +ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_rx_queue *rxq; + int err; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = ice_dcf_switch_queue(hw, rx_queue_id, true, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxq->rx_rel_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct iavf_hw *hw = &ad->real_hw.avf; + struct ice_tx_queue *txq; + int err = 0; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Init the RX tail register. */ + txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id); + IAVF_PCI_REG_WRITE(txq->qtx_tail, 0); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true); + + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +static int +ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_tx_queue *txq; + int err; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = ice_dcf_switch_queue(hw, tx_queue_id, false, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + txq->tx_rel_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +ice_dcf_start_queues(struct rte_eth_dev *dev) +{ + struct ice_rx_queue *rxq; + struct ice_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq->tx_deferred_start) + continue; + if (ice_dcf_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq->rx_deferred_start) + continue; + if (ice_dcf_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + return 0; +} + static int ice_dcf_dev_start(struct rte_eth_dev *dev) { @@ -266,20 +519,72 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) return ret; } + if (dev->data->dev_conf.intr_conf.rxq != 0) { + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); + } + + ret = ice_dcf_start_queues(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to enable queues"); + return ret; + } + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; } +static void +ice_dcf_stop_queues(struct rte_eth_dev *dev) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_rx_queue *rxq; + struct ice_tx_queue *txq; + int ret, i; + + /* Stop All queues */ + ret = ice_dcf_disable_queues(hw); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + txq->tx_rel_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + rxq->rx_rel_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } +} + static void ice_dcf_dev_stop(struct rte_eth_dev *dev) { struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; struct ice_adapter *ad = &dcf_ad->parent; if (ad->pf.adapter_stopped == 1) return; + ice_dcf_stop_queues(dev); + + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + dev->data->dev_link.link_status = ETH_LINK_DOWN; ad->pf.adapter_stopped = 1; } @@ -476,6 +781,10 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .tx_queue_setup = ice_tx_queue_setup, .rx_queue_release = ice_rx_queue_release, .tx_queue_release = ice_tx_queue_release, + .rx_queue_start = ice_dcf_rx_queue_start, + .tx_queue_start = ice_dcf_tx_queue_start, + .rx_queue_stop = ice_dcf_rx_queue_stop, + .tx_queue_stop = ice_dcf_tx_queue_stop, .link_update = ice_dcf_link_update, .stats_get = ice_dcf_stats_get, .stats_reset = ice_dcf_stats_reset,