From patchwork Mon May 15 06:47:00 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Li, Miao" X-Patchwork-Id: 126848 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 311D942B0E; Mon, 15 May 2023 08:47:48 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D4B4A42D36; Mon, 15 May 2023 08:47:37 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 9314042D2F for ; Mon, 15 May 2023 08:47:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684133256; x=1715669256; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=IY2xaN6wL5dhjjXDZmA+NH9Y+vfe7IlCIFcI/K94qkk=; b=YulBaBFjGDGKZNzEvl51bTsbCzxPTZ+NP4tVVNGPOWgGnc4d89ZX/Got TNZbuo5UpK8wqv+JFdzGusn5MFpYgW/tgF8RoM7hipHdx4UDgD24W8+IC T7/UwlZP6Ed9od9AOGR4NWn9TSb1bwpKEs/gQfgITedJERO4GdtC9UVGU xYVauvmbV+OH0lGwOpaVraLtFQ8YKdL6027Wni37woLksSyMgWgQmHECr 3+HTryYYsIjJ15FiWqqHD4ElcggxTW40lttzbu5bT3/bakjIJiVYaYVOh OFwcDcWBH/t8sfEZ6lfNmdFQydnXEg8LWHC9iTI1z+gQdIuA7VrZuOWqn Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10710"; a="414520173" X-IronPort-AV: E=Sophos;i="5.99,275,1677571200"; d="scan'208";a="414520173" Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 May 2023 23:47:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10710"; a="651306343" X-IronPort-AV: E=Sophos;i="5.99,275,1677571200"; d="scan'208";a="651306343" Received: from dpdk-limiao-icelake.sh.intel.com ([10.67.111.26]) by orsmga003.jf.intel.com with ESMTP; 14 May 2023 23:47:31 -0700 From: Miao Li To: dev@dpdk.org Cc: skori@marvell.com, thomas@monjalon.net, david.marchand@redhat.com, ferruh.yigit@amd.com, chenbo.xia@intel.com, yahui.cao@intel.com, Anatoly Burakov Subject: [PATCH v1 4/4] bus/pci: add VFIO sparse mmap support Date: Mon, 15 May 2023 06:47:00 +0000 Message-Id: <20230515064700.624054-5-miao.li@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230515064700.624054-1-miao.li@intel.com> References: <20230418053012.10667-1-chenbo.xia@intel.com> <20230515064700.624054-1-miao.li@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch adds sparse mmap support in PCI bus. Sparse mmap is a capability defined in VFIO which allows multiple mmap areas in one VFIO region. In this patch, the sparse mmap regions are mapped to one continuous virtual address region that follows device-specific BAR layout. So, driver can still access all mapped sparse mmap regions by using 'bar_base_address + bar_offset'. Signed-off-by: Miao Li Signed-off-by: Chenbo Xia --- drivers/bus/pci/linux/pci_vfio.c | 104 +++++++++++++++++++++++++++---- drivers/bus/pci/private.h | 2 + 2 files changed, 94 insertions(+), 12 deletions(-) diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c index f6289c907f..304c168e01 100644 --- a/drivers/bus/pci/linux/pci_vfio.c +++ b/drivers/bus/pci/linux/pci_vfio.c @@ -673,6 +673,54 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res, return 0; } +static int +pci_vfio_sparse_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res, + int bar_index, int additional_flags) +{ + struct pci_map *bar = &vfio_res->maps[bar_index]; + struct vfio_region_sparse_mmap_area *sparse; + void *bar_addr; + uint32_t i; + + if (bar->size == 0) { + RTE_LOG(DEBUG, EAL, "Bar size is 0, skip BAR%d\n", bar_index); + return 0; + } + + /* reserve the address using an inaccessible mapping */ + bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE | + MAP_ANONYMOUS | additional_flags, -1, 0); + if (bar_addr != MAP_FAILED) { + void *map_addr = NULL; + for (i = 0; i < bar->nr_areas; i++) { + sparse = &bar->areas[i]; + if (sparse->size) { + void *addr = RTE_PTR_ADD(bar_addr, sparse->offset); + map_addr = pci_map_resource(addr, vfio_dev_fd, + bar->offset + sparse->offset, sparse->size, + RTE_MAP_FORCE_ADDRESS); + if (map_addr == NULL) { + munmap(bar_addr, bar->size); + RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n", + bar_index); + goto err_map; + } + } + } + } else { + RTE_LOG(ERR, EAL, "Failed to create inaccessible mapping for BAR%d\n", + bar_index); + goto err_map; + } + + bar->addr = bar_addr; + return 0; + +err_map: + bar->nr_areas = 0; + return -1; +} + /* * region info may contain capability headers, so we need to keep reallocating * the memory until we match allocated memory size with argsz. @@ -875,6 +923,8 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) for (i = 0; i < vfio_res->nb_maps; i++) { void *bar_addr; + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_sparse_mmap *sparse; ret = pci_vfio_get_region_info(vfio_dev_fd, ®, i); if (ret < 0) { @@ -920,12 +970,33 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) maps[i].size = reg->size; maps[i].path = NULL; /* vfio doesn't have per-resource paths */ - ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0); - if (ret < 0) { - RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n", - pci_addr, i, strerror(errno)); - free(reg); - goto err_vfio_res; + hdr = pci_vfio_info_cap(reg, VFIO_REGION_INFO_CAP_SPARSE_MMAP); + + if (hdr != NULL) { + sparse = container_of(hdr, + struct vfio_region_info_cap_sparse_mmap, header); + if (sparse->nr_areas > 0) { + maps[i].nr_areas = sparse->nr_areas; + maps[i].areas = sparse->areas; + } + } + + if (maps[i].nr_areas > 0) { + ret = pci_vfio_sparse_mmap_bar(vfio_dev_fd, vfio_res, i, 0); + if (ret < 0) { + RTE_LOG(ERR, EAL, "%s sparse mapping BAR%i failed: %s\n", + pci_addr, i, strerror(errno)); + free(reg); + goto err_vfio_res; + } + } else { + ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0); + if (ret < 0) { + RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n", + pci_addr, i, strerror(errno)); + free(reg); + goto err_vfio_res; + } } dev->mem_resource[i].addr = maps[i].addr; @@ -1008,11 +1079,20 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev) maps = vfio_res->maps; for (i = 0; i < vfio_res->nb_maps; i++) { - ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED); - if (ret < 0) { - RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n", - pci_addr, i, strerror(errno)); - goto err_vfio_dev_fd; + if (maps[i].nr_areas > 0) { + ret = pci_vfio_sparse_mmap_bar(vfio_dev_fd, vfio_res, i, 0); + if (ret < 0) { + RTE_LOG(ERR, EAL, "%s sparse mapping BAR%i failed: %s\n", + pci_addr, i, strerror(errno)); + goto err_vfio_dev_fd; + } + } else { + ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0); + if (ret < 0) { + RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n", + pci_addr, i, strerror(errno)); + goto err_vfio_dev_fd; + } } dev->mem_resource[i].addr = maps[i].addr; @@ -1062,7 +1142,7 @@ find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list, break; } - if (vfio_res == NULL) + if (vfio_res == NULL) return vfio_res; RTE_LOG(INFO, EAL, "Releasing PCI mapped resource for %s\n", diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h index 2d6991ccb7..8b0ce73533 100644 --- a/drivers/bus/pci/private.h +++ b/drivers/bus/pci/private.h @@ -121,6 +121,8 @@ struct pci_map { uint64_t offset; uint64_t size; uint64_t phaddr; + uint32_t nr_areas; + struct vfio_region_sparse_mmap_area *areas; }; struct pci_msix_table {