From patchwork Tue Aug 11 07:59:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Steve Yang X-Patchwork-Id: 75383 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0EE09A04C9; Tue, 11 Aug 2020 10:10:32 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 102991C0C2; Tue, 11 Aug 2020 10:10:03 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id A7BF71C0B8 for ; Tue, 11 Aug 2020 10:10:00 +0200 (CEST) IronPort-SDR: vOtArWcL1+wlqCdECQ0VZyry+9u74nK+LnXVUeMHDvs1sN6DUZFwt5970B1KkL3lBCtz4Iurc3 3hvw0Fjy4JBw== X-IronPort-AV: E=McAfee;i="6000,8403,9709"; a="133226899" X-IronPort-AV: E=Sophos;i="5.75,460,1589266800"; d="scan'208";a="133226899" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Aug 2020 01:10:00 -0700 IronPort-SDR: j/eYnucaYq+pVtbpAMrzJ3umJ99sbyFfoTYoM41GxDRH1N6HazHsku879U1wV0ZyhpeN5L3m/Y 2gAbjXmpgusQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,460,1589266800"; d="scan'208";a="308335355" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.134]) by orsmga002.jf.intel.com with ESMTP; 11 Aug 2020 01:09:58 -0700 From: SteveX Yang To: jingjing.wu@intel.com, beilei.xing@intel.com, dev@dpdk.org Cc: qiming.yang@intel.com, SteveX Yang Date: Tue, 11 Aug 2020 07:59:08 +0000 Message-Id: <20200811075910.20954-6-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200811075910.20954-1-stevex.yang@intel.com> References: <20200811075910.20954-1-stevex.yang@intel.com> Subject: [dpdk-dev] [PATCH 5/7] net/iavf: fix multiple interrupts for VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Interrupt mapping should be 1:n queue(s).This patch fixes the logic of interrupt bind by code reconstruction. Fixes: 69dd4c3d0898 ("net/avf: enable queue and device") Signed-off-by: SteveX Yang --- drivers/net/iavf/iavf_vchnl.c | 56 ++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 33acea54a..614ea7e79 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "iavf.h" #include "iavf_rxtx.h" @@ -686,20 +687,53 @@ int iavf_config_irq_map(struct iavf_adapter *adapter) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + uint8_t *cmd_buffer = NULL; struct virtchnl_irq_map_info *map_info; struct virtchnl_vector_map *vecmap; - struct iavf_cmd_info args; - int len, i, err; + struct rte_eth_dev *dev = adapter->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i; + uint16_t rxq_map[vf->vf_res->max_vectors]; + int err; - len = sizeof(struct virtchnl_irq_map_info) + - sizeof(struct virtchnl_vector_map) * vf->nb_msix; + memset(rxq_map, 0, sizeof(rxq_map)); + if (dev->data->dev_conf.intr_conf.rxq && + rte_intr_allow_others(intr_handle)) { + msix_base = IAVF_RX_VEC_START; + max_vectors = vf->vf_res->max_vectors - 1; + nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd); + + vec = msix_base; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->vf_res->max_vectors) + vec = msix_base; + } + } else { + msix_base = IAVF_MISC_VEC_ID; + nb_msix = 1; - map_info = rte_zmalloc("map_info", len, 0); - if (!map_info) - return -ENOMEM; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[msix_base] |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = msix_base; + } + } - map_info->num_vectors = vf->nb_msix; - for (i = 0; i < vf->nb_msix; i++) { + cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * nb_msix; + cmd_buffer = rte_zmalloc("iavf", cmd_buffer_size, 0); + if (!cmd_buffer) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return IAVF_ERR_NO_MEMORY; + } + + map_info = (struct virtchnl_irq_map_info *)cmd_buffer; + map_info->num_vectors = nb_msix; + for (i = 0; i < nb_msix; i++) { vecmap = &map_info->vecmap[i]; vecmap->vsi_id = vf->vsi_res->vsi_id; vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT; @@ -709,8 +743,8 @@ iavf_config_irq_map(struct iavf_adapter *adapter) } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; - args.in_args = (u8 *)map_info; - args.in_args_size = len; + args.in_args = (u8 *)cmd_buffer; + args.in_args_size = cmd_buffer_size; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; err = iavf_execute_vf_cmd(adapter, &args);