From patchwork Fri Sep 16 06:16:28 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Pei, Andy" X-Patchwork-Id: 116388 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6A1D4A0032; Fri, 16 Sep 2022 08:17:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0CAC242B6F; Fri, 16 Sep 2022 08:17:11 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id 9406442802 for ; Fri, 16 Sep 2022 08:17:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1663309026; x=1694845026; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=QfrpEk+HOqjLVcx6UhPJT/1lH9iePijCsyIoXHVqQvg=; b=WHuTjyWD7ZKOKKxNS0rSXQbxRdQVjT38gTptjk+ufbgV+joGaiU5rAoh 6E/z4ulx9OG7C4dCip+tNNYQ4SDYWibtkx4sHm9bNWZcouoThSsgE04Gh 8+9HAdLMZqGM9kESMyhDa/ty3gHG3n4cOo7icFRUSF3r0J96pYpWIJ6GY q629vJRo5DaumjOchoaIT92tiEGxSm03t3isuLTEWi8QFccrDeG16h6bn 1hUwEttmB7wBMUsAOefgnbC1h3Bdo/0nh3RQclb2baK6XPnH2ZfrOCLBd CVJWE1Oh4Bae5Ie3PgkR8PDz9FhIeavjQHkHG9Qs+tX3YmUXTdsCoH51W w==; X-IronPort-AV: E=McAfee;i="6500,9779,10471"; a="278650971" X-IronPort-AV: E=Sophos;i="5.93,320,1654585200"; d="scan'208";a="278650971" Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Sep 2022 23:16:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,320,1654585200"; d="scan'208";a="568724656" Received: from dpdk-dipei.sh.intel.com ([10.67.110.251]) by orsmga003.jf.intel.com with ESMTP; 15 Sep 2022 23:16:41 -0700 From: Andy Pei To: dev@dpdk.org Cc: chenbo.xia@intel.com, rosen.xu@intel.com, wei.huang@intel.com, gang.cao@intel.com, maxime.coquelin@redhat.com Subject: [PATCH v3 6/8] vdpa/ifc: support dynamic enable/disable queue Date: Fri, 16 Sep 2022 14:16:28 +0800 Message-Id: <1663308990-621-7-git-send-email-andy.pei@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1663308990-621-1-git-send-email-andy.pei@intel.com> References: <1661229305-240952-2-git-send-email-andy.pei@intel.com> <1663308990-621-1-git-send-email-andy.pei@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Huang Wei Support dynamic enable or disable queue. For front end, like QEMU, user can use ethtool to configurate queue. For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs. Signed-off-by: Huang Wei Signed-off-by: Andy Pei --- drivers/vdpa/ifc/base/ifcvf.c | 99 +++++++++++++++++++++++++++++++++++++++++++ drivers/vdpa/ifc/base/ifcvf.h | 6 +++ drivers/vdpa/ifc/ifcvf_vdpa.c | 93 +++++++++++++++++++++++++++++++++------- 3 files changed, 183 insertions(+), 15 deletions(-) diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c index 619b034..792d258 100644 --- a/drivers/vdpa/ifc/base/ifcvf.c +++ b/drivers/vdpa/ifc/base/ifcvf.c @@ -227,6 +227,105 @@ } } +int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u8 *lm_cfg; + u16 notify_off; + int msix_vector; + + if (i >= (int)hw->nr_vring) + return -1; + + cfg = hw->common_cfg; + if (!cfg) { + WARNINGOUT("common_cfg in HW is NULL.\n"); + return -1; + } + + ifcvf_enable_multiqueue(hw); + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector != (i + 1)) { + IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector == IFCVF_MSI_NO_VECTOR) { + WARNINGOUT("queue %u, msix vec alloc failed\n", i); + return -1; + } + } + + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + else + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + } + + notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off); + hw->notify_addr[i] = (void *)((u8 *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + IFCVF_WRITE_REG16(1, &cfg->queue_enable); + + return 0; +} + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u32 ring_state; + u8 *lm_cfg; + + if (i >= (int)hw->nr_vring) + return; + + cfg = hw->common_cfg; + if (!cfg) { + WARNINGOUT("common_cfg in HW is NULL.\n"); + return; + } + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + IFCVF_WRITE_REG16(0, &cfg->queue_enable); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) { + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE); + hw->vring[i].last_avail_idx = + (u16)(ring_state & IFCVF_16_BIT_MASK); + } else { + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4); + hw->vring[i].last_avail_idx = (u16)(ring_state >> 16); + } + hw->vring[i].last_used_idx = (u16)(ring_state >> 16); + } +} + STATIC int ifcvf_hw_enable(struct ifcvf_hw *hw) { diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h index 1e133c0..3726da7 100644 --- a/drivers/vdpa/ifc/base/ifcvf.h +++ b/drivers/vdpa/ifc/base/ifcvf.h @@ -164,6 +164,12 @@ struct ifcvf_hw { ifcvf_get_features(struct ifcvf_hw *hw); int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i); + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i); + +int ifcvf_start_hw(struct ifcvf_hw *hw); void diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index b00afdb..32bc1c9 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -1282,13 +1282,59 @@ struct rte_vdpa_dev_info { } static int +ifcvf_config_vring(struct ifcvf_internal *internal, int vring) +{ + struct ifcvf_hw *hw = &internal->hw; + int vid = internal->vid; + struct rte_vhost_vring vq; + uint64_t gpa; + + if (hw->vring[vring].enable) { + rte_vhost_get_vhost_vring(vid, vring, &vq); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[vring].desc = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[vring].avail = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[vring].used = gpa; + + hw->vring[vring].size = vq.size; + rte_vhost_get_vring_base(vid, vring, + &hw->vring[vring].last_avail_idx, + &hw->vring[vring].last_used_idx); + ifcvf_enable_vring_hw(&internal->hw, vring); + } else { + ifcvf_disable_vring_hw(&internal->hw, vring); + rte_vhost_set_vring_base(vid, vring, + hw->vring[vring].last_avail_idx, + hw->vring[vring].last_used_idx); + } + + return 0; +} + +static int ifcvf_set_vring_state(int vid, int vring, int state) { struct rte_vdpa_device *vdev; struct internal_list *list; struct ifcvf_internal *internal; struct ifcvf_hw *hw; - struct ifcvf_pci_common_cfg *cfg; + bool enable = !!state; int ret = 0; vdev = rte_vhost_get_vdpa_device(vid); @@ -1298,6 +1344,9 @@ struct rte_vdpa_dev_info { return -1; } + DRV_LOG(INFO, "%s queue %d of vDPA device %s", + enable ? "enable" : "disable", vring, vdev->device->name); + internal = list->internal; if (vring < 0 || vring >= internal->max_queues * 2) { DRV_LOG(ERR, "Vring index %d not correct", vring); @@ -1305,27 +1354,41 @@ struct rte_vdpa_dev_info { } hw = &internal->hw; + hw->vring[vring].enable = enable; + if (!internal->configured) - goto exit; + return 0; - cfg = hw->common_cfg; - IFCVF_WRITE_REG16(vring, &cfg->queue_select); - IFCVF_WRITE_REG16(!!state, &cfg->queue_enable); + unset_notify_relay(internal); - if (!state && hw->vring[vring].enable) { - ret = vdpa_disable_vfio_intr(internal); - if (ret) - return ret; + ret = vdpa_enable_vfio_intr(internal, false); + if (ret) { + DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s", + vdev->device->name); + return ret; } - if (state && !hw->vring[vring].enable) { - ret = vdpa_enable_vfio_intr(internal, false); - if (ret) - return ret; + ret = ifcvf_config_vring(internal, vring); + if (ret) { + DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s", + vring, vdev->device->name); + return ret; + } + + ret = setup_notify_relay(internal); + if (ret) { + DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s", + vdev->device->name); + return ret; + } + + ret = rte_vhost_host_notifier_ctrl(vid, vring, enable); + if (ret) { + DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail", + vdev->device->name, vring); + return ret; } -exit: - hw->vring[vring].enable = !!state; return 0; }