From patchwork Wed Oct 27 10:00:26 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ding, Xuan" X-Patchwork-Id: 103030 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D535FA0547; Wed, 27 Oct 2021 12:12:23 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9CAC34068C; Wed, 27 Oct 2021 12:12:23 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id C09134003F for ; Wed, 27 Oct 2021 12:12:21 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="253676205" X-IronPort-AV: E=Sophos;i="5.87,186,1631602800"; d="scan'208";a="253676205" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2021 03:12:20 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,186,1631602800"; d="scan'208";a="597307054" Received: from dpdk-xuanding-dev2.sh.intel.com ([10.67.119.250]) by orsmga004.jf.intel.com with ESMTP; 27 Oct 2021 03:12:17 -0700 From: Xuan Ding To: dev@dpdk.org, maxime.coquelin@redhat.com, chenbo.xia@intel.com Cc: jiayu.hu@intel.com, Xuan Ding , anatoly.burakov@intel.com Date: Wed, 27 Oct 2021 10:00:26 +0000 Message-Id: <20211027100026.99650-1-xuan.ding@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211027095538.98512-1-xuan.ding@intel.com> References: <20211027095538.98512-1-xuan.ding@intel.com> Subject: [dpdk-dev] [PATCH v2] vhost: remove async dma map status X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Async dma map status flag was added to prevent the unnecessary unmap when DMA devices bound to kernel driver. This brings maintenance cost for a lot of code. This patch removes the dma map status by using rte_errno instead. This patch relies on the following patch to fix a partial unmap check in vfio unmapping API. [1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html Cc: anatoly.burakov@intel.com Signed-off-by: Xuan Ding Reviewed-by: Maxime Coquelin --- v2: * Fix a typo in commit log. --- lib/vhost/vhost.h | 3 -- lib/vhost/vhost_user.c | 70 ++++++++---------------------------------- 2 files changed, 13 insertions(+), 60 deletions(-) diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 6c6a2da2c9..71fddf3592 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -370,9 +370,6 @@ struct virtio_net { uint32_t nr_vring; int async_copy; - /* Record the dma map status for each region. */ - bool *async_map_status; - int extbuf; int linearbuf; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 720d1c1c9d..9489d03e45 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -144,7 +144,7 @@ get_blk_size(int fd) } static int -async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool do_map) +async_dma_map(struct rte_vhost_mem_region *region, bool do_map) { uint64_t host_iova; int ret = 0; @@ -156,8 +156,6 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d region->host_user_addr, host_iova, region->size); - *dma_map_success = ret == 0; - if (ret) { /* * DMA device may bind with kernel driver, in this case, @@ -175,26 +173,24 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d return 0; VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n"); - return ret; - + /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */ + return 0; } } else { - /* No need to do vfio unmap if the map failed. */ - if (!*dma_map_success) - return 0; - /* Remove mapped region from the default container of DPDK. */ ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD, region->host_user_addr, host_iova, region->size); if (ret) { + /* like DMA map, ignore the kernel driver case when unmap. */ + if (rte_errno == EINVAL) + return 0; + VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n"); return ret; } - /* Clear the flag once the unmap succeeds. */ - *dma_map_success = 0; } return ret; @@ -213,7 +209,7 @@ free_mem_region(struct virtio_net *dev) reg = &dev->mem->regions[i]; if (reg->host_user_addr) { if (dev->async_copy && rte_vfio_is_enabled("vfio")) - async_dma_map(reg, &dev->async_map_status[i], false); + async_dma_map(reg, false); munmap(reg->mmap_addr, reg->mmap_size); close(reg->fd); @@ -228,11 +224,6 @@ vhost_backend_cleanup(struct virtio_net *dev) free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; - - if (dev->async_map_status) { - rte_free(dev->async_map_status); - dev->async_map_status = NULL; - } } rte_free(dev->guest_pages); @@ -688,19 +679,6 @@ numa_realloc(struct virtio_net *dev, int index) } dev->mem = mem; - if (dev->async_copy && rte_vfio_is_enabled("vfio")) { - if (dev->async_map_status == NULL) { - dev->async_map_status = rte_zmalloc_socket("async-dma-map-status", - sizeof(bool) * dev->mem->nregions, 0, node); - if (!dev->async_map_status) { - VHOST_LOG_CONFIG(ERR, - "(%d) failed to realloc dma mapping status on node\n", - dev->vid); - return dev; - } - } - } - gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), RTE_CACHE_LINE_SIZE, node); if (!gp) { @@ -1231,7 +1209,6 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, static int vhost_user_mmap_region(struct virtio_net *dev, struct rte_vhost_mem_region *region, - uint32_t region_index, uint64_t mmap_offset) { void *mmap_addr; @@ -1294,16 +1271,14 @@ vhost_user_mmap_region(struct virtio_net *dev, if (dev->async_copy) { if (add_guest_pages(dev, region, alignment) < 0) { - VHOST_LOG_CONFIG(ERR, - "adding guest pages to region failed.\n"); + VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n"); return -1; } if (rte_vfio_is_enabled("vfio")) { - ret = async_dma_map(region, &dev->async_map_status[region_index], true); + ret = async_dma_map(region, true); if (ret) { - VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA " - "engine failed\n"); + VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n"); return -1; } } @@ -1381,11 +1356,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; - - if (dev->async_map_status) { - rte_free(dev->async_map_status); - dev->async_map_status = NULL; - } } /* Flush IOTLB cache as previous HVAs are now invalid */ @@ -1426,17 +1396,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, goto free_guest_pages; } - if (dev->async_copy) { - dev->async_map_status = rte_zmalloc_socket("async-dma-map-status", - sizeof(bool) * memory->nregions, 0, numa_node); - if (!dev->async_map_status) { - VHOST_LOG_CONFIG(ERR, - "(%d) failed to allocate memory for dma mapping status\n", - dev->vid); - goto free_mem_table; - } - } - for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; @@ -1453,7 +1412,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, mmap_offset = memory->regions[i].mmap_offset; - if (vhost_user_mmap_region(dev, reg, i, mmap_offset) < 0) { + if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i); goto free_mem_table; } @@ -1501,10 +1460,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; - if (dev->async_map_status) { - rte_free(dev->async_map_status); - dev->async_map_status = NULL; - } + free_guest_pages: rte_free(dev->guest_pages); dev->guest_pages = NULL;