From patchwork Mon Dec 3 07:06:52 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 48480 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9F8311B54A; Mon, 3 Dec 2018 08:02:45 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 327D81B4EC for ; Mon, 3 Dec 2018 08:02:40 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 02 Dec 2018 23:02:39 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,309,1539673200"; d="scan'208";a="115532610" Received: from dpdk26.sh.intel.com ([10.67.110.164]) by FMSMGA003.fm.intel.com with ESMTP; 02 Dec 2018 23:02:38 -0800 From: Wenzhuo Lu To: dev@dpdk.org Cc: Wenzhuo Lu , Qiming Yang , Xiaoyun Li , Jingjing Wu Date: Mon, 3 Dec 2018 15:06:52 +0800 Message-Id: <1543820821-108122-12-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1543820821-108122-1-git-send-email-wenzhuo.lu@intel.com> References: <1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com> <1543820821-108122-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v2 11/20] net/ice: support RX queue interruption X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add below ops, rx_queue_intr_enable rx_queue_intr_disable Signed-off-by: Wenzhuo Lu Signed-off-by: Qiming Yang Signed-off-by: Xiaoyun Li Signed-off-by: Jingjing Wu --- drivers/net/ice/ice_ethdev.c | 234 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 234 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 8cf4839..008a4fc 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -48,6 +48,10 @@ static int ice_macaddr_add(struct rte_eth_dev *dev, __rte_unused uint32_t index, uint32_t pool); static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); +static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); static int ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on); @@ -86,6 +90,8 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev, .reta_query = ice_rss_reta_query, .rss_hash_update = ice_rss_hash_update, .rss_hash_conf_get = ice_rss_hash_conf_get, + .rx_queue_intr_enable = ice_rx_queue_intr_enable, + .rx_queue_intr_disable = ice_rx_queue_intr_disable, .vlan_pvid_set = ice_vlan_pvid_set, }; @@ -1400,6 +1406,186 @@ static int ice_init_rss(struct ice_pf *pf) return 0; } +static void +__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, + int base_queue, int nb_queue) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint32_t val, val_tx; + int i; + + for (i = 0; i < nb_queue; i++) { + /*do actual bind*/ + val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | + (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M; + val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) | + (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M; + + PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", + base_queue + i, msix_vect); + /* set ITR0 value */ + ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10); + ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); + ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); + } +} + +static void +ice_vsi_queues_bind_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t queue_idx = 0; + int record = 0; + int i; + + /* clear Rx/Tx queue interrupt */ + for (i = 0; i < vsi->nb_used_qps; i++) { + ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); + ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); + } + + /* PF bind interrupt */ + if (rte_intr_dp_is_en(intr_handle)) { + queue_idx = 0; + record = 1; + } + + for (i = 0; i < vsi->nb_used_qps; i++) { + if (nb_msix <= 1) { + if (!rte_intr_allow_others(intr_handle)) + msix_vect = ICE_MISC_VEC_ID; + + /* uio mapping all queue to one msix_vect */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, + vsi->nb_used_qps - i); + + for (; !!record && i < vsi->nb_used_qps; i++) + intr_handle->intr_vec[queue_idx + i] = + msix_vect; + break; + } + + /* vfio 1:1 queue/msix_vect mapping */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, 1); + + if (!!record) + intr_handle->intr_vec[queue_idx + i] = msix_vect; + + msix_vect++; + nb_msix--; + } +} + +static void +ice_vsi_enable_queues_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_used_qps; i++) { + msix_intr = vsi->msix_intr + i; + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), + GLINT_DYN_CTL_INTENA_M | + GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M | + GLINT_DYN_CTL_WB_ON_ITR_M); + } + else + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), + GLINT_DYN_CTL_INTENA_M | + GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M | + GLINT_DYN_CTL_WB_ON_ITR_M); +} + +static void +ice_vsi_disable_queues_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + /* disable interrupt and also clear all the exist config */ + for (i = 0; i < vsi->nb_qps; i++) { + ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); + ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); + rte_wmb(); + } + + if (rte_intr_allow_others(intr_handle)) + /* vfio-pci */ + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), + GLINT_DYN_CTL_WB_ON_ITR_M); + } + else + /* igb_uio */ + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); +} + +static int +ice_rxq_intr_setup(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_vsi *vsi = pf->main_vsi; + uint32_t intr_vector = 0; + + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) { + PMD_DRV_LOG(ERR, "At most %d intr queues supported", + ICE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", dev->data->nb_rx_queues * sizeof(int), + 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* Map queues with MSIX interrupt */ + vsi->nb_used_qps = dev->data->nb_rx_queues; + ice_vsi_queues_bind_intr(vsi); + + /* Enable interrupts for all the queues */ + ice_vsi_enable_queues_intr(vsi); + + rte_intr_enable(intr_handle); + + return 0; +} + static int ice_dev_start(struct rte_eth_dev *dev) { @@ -1436,6 +1622,10 @@ static int ice_init_rss(struct ice_pf *pf) goto rx_err; } + /* enable Rx interrput and mapping Rx queue to interrupt vector */ + if (ice_rxq_intr_setup(dev)) + return -EIO; + ret = ice_aq_set_event_mask(hw, hw->port_info->lport, ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT | ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM | @@ -1470,6 +1660,7 @@ static int ice_init_rss(struct ice_pf *pf) { struct rte_eth_dev_data *data = dev->data; struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *main_vsi = pf->main_vsi; struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint16_t i; @@ -1488,6 +1679,9 @@ static int ice_init_rss(struct ice_pf *pf) for (i = 0; i < data->nb_tx_queues; i++) ice_tx_queue_stop(dev, i); + /* disable all queue interrupts */ + ice_vsi_disable_queues_intr(main_vsi); + /* Clear all queues and release mbufs */ ice_clear_queues(dev); @@ -2338,6 +2532,46 @@ static int ice_macaddr_set(struct rte_eth_dev *dev, return 0; } +static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t val; + uint16_t msix_intr; + + ICE_PROC_SECONDARY_CHECK; + + msix_intr = intr_handle->intr_vec[queue_id]; + + val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M; + val &= ~GLINT_DYN_CTL_WB_ON_ITR_M; + + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val); + rte_intr_enable(&pci_dev->intr_handle); + + return 0; +} + +static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + ICE_PROC_SECONDARY_CHECK; + + msix_intr = intr_handle->intr_vec[queue_id]; + + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M); + + return 0; +} + static int ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info) {