[21.02,3/3] vhost: refactor memory regions mapping

Message ID 20201116113620.587073-4-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: vhost_user_set_mem_table refactoring |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-intel-Functional fail Functional Testing issues
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed

Commit Message

Maxime Coquelin Nov. 16, 2020, 11:36 a.m. UTC
  This patch moves memory region mmaping and related
preparation in a dedicated function in order to simplify
VHOST_USER_SET_MEM_TABLE request handling function.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/vhost_user.c | 178 ++++++++++++++++++----------------
 1 file changed, 94 insertions(+), 84 deletions(-)
  

Comments

Chenbo Xia Dec. 9, 2020, 2:16 p.m. UTC | #1
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Monday, November 16, 2020 7:36 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 21.02 3/3] vhost: refactor memory regions mapping
> 
> This patch moves memory region mmaping and related
> preparation in a dedicated function in order to simplify
> VHOST_USER_SET_MEM_TABLE request handling function.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/librte_vhost/vhost_user.c | 178 ++++++++++++++++++----------------
>  1 file changed, 94 insertions(+), 84 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 2ee22ef76d..24109bd4c1 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -1097,6 +1097,96 @@ vhost_user_postcopy_register(struct virtio_net *dev,
> int main_fd,
>  	return 0;
>  }
> 
> +static int
> +vhost_user_mmap_region(struct virtio_net *dev,
> +		struct rte_vhost_mem_region *region,
> +		uint64_t mmap_offset)
> +{
> +	void *mmap_addr;
> +	uint64_t mmap_size;
> +	uint64_t alignment;
> +	int populate;
> +
> +	/* Check for memory_size + mmap_offset overflow */
> +	if (mmap_offset >= -region->size) {
> +		VHOST_LOG_CONFIG(ERR,
> +				"mmap_offset (%#"PRIx64") and memory_size "
> +				"(%#"PRIx64") overflow\n",
> +				mmap_offset, region->size);
> +		return -1;
> +	}
> +
> +	mmap_size = region->size + mmap_offset;
> +
> +	/* mmap() without flag of MAP_ANONYMOUS, should be called with
> length
> +	 * argument aligned with hugepagesz at older longterm version Linux,
> +	 * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL.
> +	 *
> +	 * To avoid failure, make sure in caller to keep length aligned.
> +	 */
> +	alignment = get_blk_size(region->fd);
> +	if (alignment == (uint64_t)-1) {
> +		VHOST_LOG_CONFIG(ERR,
> +				"couldn't get hugepage size through fstat\n");
> +		return -1;
> +	}
> +	mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
> +	if (mmap_size == 0) {
> +		/*
> +		 * It could happen if initial mmap_size + alignment overflows
> +		 * the sizeof uint64, which could happen if either mmap_size
> or
> +		 * alignment value is wrong.
> +		 *
> +		 * mmap() kernel implementation would return an error, but
> +		 * better catch it before and provide useful info in the logs.
> +		 */
> +		VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
> +				"or alignment (0x%" PRIx64 ") is invalid\n",
> +				region->size + mmap_offset, alignment);
> +		return -1;
> +	}
> +
> +	populate = dev->async_copy ? MAP_POPULATE : 0;
> +	mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
> +			MAP_SHARED | populate, region->fd, 0);
> +
> +	if (mmap_addr == MAP_FAILED) {
> +		VHOST_LOG_CONFIG(ERR, "mmap failed (%s).\n", strerror(errno));
> +		return -1;
> +	}
> +
> +	region->mmap_addr = mmap_addr;
> +	region->mmap_size = mmap_size;
> +	region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
> mmap_offset;
> +
> +	if (dev->async_copy)
> +		if (add_guest_pages(dev, region, alignment) < 0) {
> +			VHOST_LOG_CONFIG(ERR,
> +					"adding guest pages to region failed.\n");
> +			return -1;
> +		}
> +
> +	VHOST_LOG_CONFIG(INFO,
> +			"guest memory region size: 0x%" PRIx64 "\n"
> +			"\t guest physical addr: 0x%" PRIx64 "\n"
> +			"\t guest virtual  addr: 0x%" PRIx64 "\n"
> +			"\t host  virtual  addr: 0x%" PRIx64 "\n"
> +			"\t mmap addr : 0x%" PRIx64 "\n"
> +			"\t mmap size : 0x%" PRIx64 "\n"
> +			"\t mmap align: 0x%" PRIx64 "\n"
> +			"\t mmap off  : 0x%" PRIx64 "\n",
> +			region->size,
> +			region->guest_phys_addr,
> +			region->guest_user_addr,
> +			region->host_user_addr,
> +			(uint64_t)(uintptr_t)mmap_addr,
> +			mmap_size,
> +			alignment,
> +			mmap_offset);
> +
> +	return 0;
> +}
> +
>  static int
>  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg
> *msg,
>  			int main_fd)
> @@ -1104,12 +1194,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
>  	struct virtio_net *dev = *pdev;
>  	struct VhostUserMemory *memory = &msg->payload.memory;
>  	struct rte_vhost_mem_region *reg;
> -	void *mmap_addr;
> -	uint64_t mmap_size;
> +
>  	uint64_t mmap_offset;
> -	uint64_t alignment;
>  	uint32_t i;
> -	int populate;
> 
>  	if (validate_msg_fds(msg, memory->nregions) != 0)
>  		return RTE_VHOST_MSG_RESULT_ERR;
> @@ -1171,7 +1258,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
>  			dev->vid);
>  		goto free_guest_pages;
>  	}
> -	dev->mem->nregions = memory->nregions;
> 
>  	for (i = 0; i < memory->nregions; i++) {
>  		reg = &dev->mem->regions[i];
> @@ -1189,88 +1275,12 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
> 
>  		mmap_offset = memory->regions[i].mmap_offset;
> 
> -		/* Check for memory_size + mmap_offset overflow */
> -		if (mmap_offset >= -reg->size) {
> -			VHOST_LOG_CONFIG(ERR,
> -				"mmap_offset (%#"PRIx64") and memory_size "
> -				"(%#"PRIx64") overflow\n",
> -				mmap_offset, reg->size);
> -			goto free_mem_table;
> -		}
> -
> -		mmap_size = reg->size + mmap_offset;
> -
> -		/* mmap() without flag of MAP_ANONYMOUS, should be called
> -		 * with length argument aligned with hugepagesz at older
> -		 * longterm version Linux, like 2.6.32 and 3.2.72, or
> -		 * mmap() will fail with EINVAL.
> -		 *
> -		 * to avoid failure, make sure in caller to keep length
> -		 * aligned.
> -		 */
> -		alignment = get_blk_size(reg->fd);
> -		if (alignment == (uint64_t)-1) {
> -			VHOST_LOG_CONFIG(ERR,
> -				"couldn't get hugepage size through fstat\n");
> -			goto free_mem_table;
> -		}
> -		mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
> -		if (mmap_size == 0) {
> -			/*
> -			 * It could happen if initial mmap_size + alignment
> -			 * overflows the sizeof uint64, which could happen if
> -			 * either mmap_size or alignment value is wrong.
> -			 *
> -			 * mmap() kernel implementation would return an error,
> -			 * but better catch it before and provide useful info
> -			 * in the logs.
> -			 */
> -			VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
> -					"or alignment (0x%" PRIx64 ") is invalid\n",
> -					reg->size + mmap_offset, alignment);
> -			goto free_mem_table;
> -		}
> -
> -		populate = dev->async_copy ? MAP_POPULATE : 0;
> -		mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
> -				 MAP_SHARED | populate, reg->fd, 0);
> -
> -		if (mmap_addr == MAP_FAILED) {
> -			VHOST_LOG_CONFIG(ERR,
> -				"mmap region %u failed.\n", i);
> +		if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
> +			VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
>  			goto free_mem_table;
>  		}
> 
> -		reg->mmap_addr = mmap_addr;
> -		reg->mmap_size = mmap_size;
> -		reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
> -				      mmap_offset;
> -
> -		if (dev->async_copy)
> -			if (add_guest_pages(dev, reg, alignment) < 0) {
> -				VHOST_LOG_CONFIG(ERR,
> -					"adding guest pages to region %u failed.\n",
> -					i);
> -				goto free_mem_table;
> -			}
> -
> -		VHOST_LOG_CONFIG(INFO,
> -			"guest memory region %u, size: 0x%" PRIx64 "\n"
> -			"\t guest physical addr: 0x%" PRIx64 "\n"
> -			"\t guest virtual  addr: 0x%" PRIx64 "\n"
> -			"\t host  virtual  addr: 0x%" PRIx64 "\n"
> -			"\t mmap addr : 0x%" PRIx64 "\n"
> -			"\t mmap size : 0x%" PRIx64 "\n"
> -			"\t mmap align: 0x%" PRIx64 "\n"
> -			"\t mmap off  : 0x%" PRIx64 "\n",
> -			i, reg->size,
> -			reg->guest_phys_addr,
> -			reg->guest_user_addr,
> -			reg->host_user_addr,
> -			(uint64_t)(uintptr_t)mmap_addr,
> -			mmap_size,
> -			alignment,
> -			mmap_offset);
> +		dev->mem->nregions++;
>  	}
> 
>  	if (vhost_user_postcopy_register(dev, main_fd, msg) < 0)
> --
> 2.26.2

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
  

Patch

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 2ee22ef76d..24109bd4c1 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1097,6 +1097,96 @@  vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
 	return 0;
 }
 
+static int
+vhost_user_mmap_region(struct virtio_net *dev,
+		struct rte_vhost_mem_region *region,
+		uint64_t mmap_offset)
+{
+	void *mmap_addr;
+	uint64_t mmap_size;
+	uint64_t alignment;
+	int populate;
+
+	/* Check for memory_size + mmap_offset overflow */
+	if (mmap_offset >= -region->size) {
+		VHOST_LOG_CONFIG(ERR,
+				"mmap_offset (%#"PRIx64") and memory_size "
+				"(%#"PRIx64") overflow\n",
+				mmap_offset, region->size);
+		return -1;
+	}
+
+	mmap_size = region->size + mmap_offset;
+
+	/* mmap() without flag of MAP_ANONYMOUS, should be called with length
+	 * argument aligned with hugepagesz at older longterm version Linux,
+	 * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL.
+	 *
+	 * To avoid failure, make sure in caller to keep length aligned.
+	 */
+	alignment = get_blk_size(region->fd);
+	if (alignment == (uint64_t)-1) {
+		VHOST_LOG_CONFIG(ERR,
+				"couldn't get hugepage size through fstat\n");
+		return -1;
+	}
+	mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
+	if (mmap_size == 0) {
+		/*
+		 * It could happen if initial mmap_size + alignment overflows
+		 * the sizeof uint64, which could happen if either mmap_size or
+		 * alignment value is wrong.
+		 *
+		 * mmap() kernel implementation would return an error, but
+		 * better catch it before and provide useful info in the logs.
+		 */
+		VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
+				"or alignment (0x%" PRIx64 ") is invalid\n",
+				region->size + mmap_offset, alignment);
+		return -1;
+	}
+
+	populate = dev->async_copy ? MAP_POPULATE : 0;
+	mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
+			MAP_SHARED | populate, region->fd, 0);
+
+	if (mmap_addr == MAP_FAILED) {
+		VHOST_LOG_CONFIG(ERR, "mmap failed (%s).\n", strerror(errno));
+		return -1;
+	}
+
+	region->mmap_addr = mmap_addr;
+	region->mmap_size = mmap_size;
+	region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
+
+	if (dev->async_copy)
+		if (add_guest_pages(dev, region, alignment) < 0) {
+			VHOST_LOG_CONFIG(ERR,
+					"adding guest pages to region failed.\n");
+			return -1;
+		}
+
+	VHOST_LOG_CONFIG(INFO,
+			"guest memory region size: 0x%" PRIx64 "\n"
+			"\t guest physical addr: 0x%" PRIx64 "\n"
+			"\t guest virtual  addr: 0x%" PRIx64 "\n"
+			"\t host  virtual  addr: 0x%" PRIx64 "\n"
+			"\t mmap addr : 0x%" PRIx64 "\n"
+			"\t mmap size : 0x%" PRIx64 "\n"
+			"\t mmap align: 0x%" PRIx64 "\n"
+			"\t mmap off  : 0x%" PRIx64 "\n",
+			region->size,
+			region->guest_phys_addr,
+			region->guest_user_addr,
+			region->host_user_addr,
+			(uint64_t)(uintptr_t)mmap_addr,
+			mmap_size,
+			alignment,
+			mmap_offset);
+
+	return 0;
+}
+
 static int
 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd)
@@ -1104,12 +1194,9 @@  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 	struct virtio_net *dev = *pdev;
 	struct VhostUserMemory *memory = &msg->payload.memory;
 	struct rte_vhost_mem_region *reg;
-	void *mmap_addr;
-	uint64_t mmap_size;
+
 	uint64_t mmap_offset;
-	uint64_t alignment;
 	uint32_t i;
-	int populate;
 
 	if (validate_msg_fds(msg, memory->nregions) != 0)
 		return RTE_VHOST_MSG_RESULT_ERR;
@@ -1171,7 +1258,6 @@  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			dev->vid);
 		goto free_guest_pages;
 	}
-	dev->mem->nregions = memory->nregions;
 
 	for (i = 0; i < memory->nregions; i++) {
 		reg = &dev->mem->regions[i];
@@ -1189,88 +1275,12 @@  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 
 		mmap_offset = memory->regions[i].mmap_offset;
 
-		/* Check for memory_size + mmap_offset overflow */
-		if (mmap_offset >= -reg->size) {
-			VHOST_LOG_CONFIG(ERR,
-				"mmap_offset (%#"PRIx64") and memory_size "
-				"(%#"PRIx64") overflow\n",
-				mmap_offset, reg->size);
-			goto free_mem_table;
-		}
-
-		mmap_size = reg->size + mmap_offset;
-
-		/* mmap() without flag of MAP_ANONYMOUS, should be called
-		 * with length argument aligned with hugepagesz at older
-		 * longterm version Linux, like 2.6.32 and 3.2.72, or
-		 * mmap() will fail with EINVAL.
-		 *
-		 * to avoid failure, make sure in caller to keep length
-		 * aligned.
-		 */
-		alignment = get_blk_size(reg->fd);
-		if (alignment == (uint64_t)-1) {
-			VHOST_LOG_CONFIG(ERR,
-				"couldn't get hugepage size through fstat\n");
-			goto free_mem_table;
-		}
-		mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
-		if (mmap_size == 0) {
-			/*
-			 * It could happen if initial mmap_size + alignment
-			 * overflows the sizeof uint64, which could happen if
-			 * either mmap_size or alignment value is wrong.
-			 *
-			 * mmap() kernel implementation would return an error,
-			 * but better catch it before and provide useful info
-			 * in the logs.
-			 */
-			VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
-					"or alignment (0x%" PRIx64 ") is invalid\n",
-					reg->size + mmap_offset, alignment);
-			goto free_mem_table;
-		}
-
-		populate = dev->async_copy ? MAP_POPULATE : 0;
-		mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
-				 MAP_SHARED | populate, reg->fd, 0);
-
-		if (mmap_addr == MAP_FAILED) {
-			VHOST_LOG_CONFIG(ERR,
-				"mmap region %u failed.\n", i);
+		if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
+			VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
 			goto free_mem_table;
 		}
 
-		reg->mmap_addr = mmap_addr;
-		reg->mmap_size = mmap_size;
-		reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
-				      mmap_offset;
-
-		if (dev->async_copy)
-			if (add_guest_pages(dev, reg, alignment) < 0) {
-				VHOST_LOG_CONFIG(ERR,
-					"adding guest pages to region %u failed.\n",
-					i);
-				goto free_mem_table;
-			}
-
-		VHOST_LOG_CONFIG(INFO,
-			"guest memory region %u, size: 0x%" PRIx64 "\n"
-			"\t guest physical addr: 0x%" PRIx64 "\n"
-			"\t guest virtual  addr: 0x%" PRIx64 "\n"
-			"\t host  virtual  addr: 0x%" PRIx64 "\n"
-			"\t mmap addr : 0x%" PRIx64 "\n"
-			"\t mmap size : 0x%" PRIx64 "\n"
-			"\t mmap align: 0x%" PRIx64 "\n"
-			"\t mmap off  : 0x%" PRIx64 "\n",
-			i, reg->size,
-			reg->guest_phys_addr,
-			reg->guest_user_addr,
-			reg->host_user_addr,
-			(uint64_t)(uintptr_t)mmap_addr,
-			mmap_size,
-			alignment,
-			mmap_offset);
+		dev->mem->nregions++;
 	}
 
 	if (vhost_user_postcopy_register(dev, main_fd, msg) < 0)