[RFC,09/27] vhost: add page size info to IOTLB entry

Message ID 20230331154259.1447831-10-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series Add VDUSE support to Vhost library |

Commit Message

Maxime Coquelin March 31, 2023, 3:42 p.m. UTC
  VDUSE will close the file descriptor after having mapped
the shared memory, so it will not be possible to get the
page size afterwards.

This patch adds an new page_shift field to the IOTLB entry,
so that the information will be passed at IOTLB cache
insertion time. The information is stored as a bit shift
value so that IOTLB entry keeps fitting in a single
cacheline.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/iotlb.c      | 46 ++++++++++++++++++++----------------------
 lib/vhost/iotlb.h      |  2 +-
 lib/vhost/vhost.h      |  1 -
 lib/vhost/vhost_user.c |  8 +++++---
 4 files changed, 28 insertions(+), 29 deletions(-)
  

Comments

Chenbo Xia April 25, 2023, 6:20 a.m. UTC | #1
Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, March 31, 2023 11:43 PM
> To: dev@dpdk.org; david.marchand@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; mkp@redhat.com; fbl@redhat.com;
> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
> amorenoz@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [RFC 09/27] vhost: add page size info to IOTLB entry
> 
> VDUSE will close the file descriptor after having mapped
> the shared memory, so it will not be possible to get the
> page size afterwards.
> 
> This patch adds an new page_shift field to the IOTLB entry,
> so that the information will be passed at IOTLB cache
> insertion time. The information is stored as a bit shift
> value so that IOTLB entry keeps fitting in a single
> cacheline.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/iotlb.c      | 46 ++++++++++++++++++++----------------------
>  lib/vhost/iotlb.h      |  2 +-
>  lib/vhost/vhost.h      |  1 -
>  lib/vhost/vhost_user.c |  8 +++++---
>  4 files changed, 28 insertions(+), 29 deletions(-)
> 
> diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
> index 51f118bc48..188dfb8e38 100644
> --- a/lib/vhost/iotlb.c
> +++ b/lib/vhost/iotlb.c
> @@ -19,14 +19,14 @@ struct vhost_iotlb_entry {
>  	uint64_t uaddr;
>  	uint64_t uoffset;
>  	uint64_t size;
> +	uint8_t page_shift;
>  	uint8_t perm;
>  };
> 
>  #define IOTLB_CACHE_SIZE 2048
> 
>  static bool
> -vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct
> vhost_iotlb_entry *b,
> -		uint64_t align)
> +vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct
> vhost_iotlb_entry *b)
>  {
>  	uint64_t a_start, a_end, b_start;
> 
> @@ -38,44 +38,41 @@ vhost_user_iotlb_share_page(struct vhost_iotlb_entry
> *a, struct vhost_iotlb_entr
> 
>  	/* Assumes entry a lower than entry b */
>  	RTE_ASSERT(a_start < b_start);
> -	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
> -	b_start = RTE_ALIGN_FLOOR(b_start, align);
> +	a_end = RTE_ALIGN_CEIL(a_start + a->size, RTE_BIT64(a->page_shift));
> +	b_start = RTE_ALIGN_FLOOR(b_start, RTE_BIT64(b->page_shift));
> 
>  	return a_end > b_start;
>  }
> 
>  static void
> -vhost_user_iotlb_set_dump(struct virtio_net *dev, struct
> vhost_iotlb_entry *node)
> +vhost_user_iotlb_set_dump(struct vhost_iotlb_entry *node)
>  {
> -	uint64_t align, start;
> +	uint64_t start;
> 
>  	start = node->uaddr + node->uoffset;
> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
> -
> -	mem_set_dump((void *)(uintptr_t)start, node->size, false, align);
> +	mem_set_dump((void *)(uintptr_t)start, node->size, false,
> RTE_BIT64(node->page_shift));
>  }
> 
>  static void
> -vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct
> vhost_iotlb_entry *node,
> +vhost_user_iotlb_clear_dump(struct vhost_iotlb_entry *node,
>  		struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
>  {
> -	uint64_t align, start, end;
> +	uint64_t start, end;
> 
>  	start = node->uaddr + node->uoffset;
>  	end = start + node->size;
> 
> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
> -
>  	/* Skip first page if shared with previous entry. */
> -	if (vhost_user_iotlb_share_page(prev, node, align))
> -		start = RTE_ALIGN_CEIL(start, align);
> +	if (vhost_user_iotlb_share_page(prev, node))
> +		start = RTE_ALIGN_CEIL(start, RTE_BIT64(node->page_shift));
> 
>  	/* Skip last page if shared with next entry. */
> -	if (vhost_user_iotlb_share_page(node, next, align))
> -		end = RTE_ALIGN_FLOOR(end, align);
> +	if (vhost_user_iotlb_share_page(node, next))
> +		end = RTE_ALIGN_FLOOR(end, RTE_BIT64(node->page_shift));
> 
>  	if (end > start)
> -		mem_set_dump((void *)(uintptr_t)start, end - start, false,
> align);
> +		mem_set_dump((void *)(uintptr_t)start, end - start, false,
> +			RTE_BIT64(node->page_shift));
>  }
> 
>  static struct vhost_iotlb_entry *
> @@ -198,7 +195,7 @@ vhost_user_iotlb_cache_remove_all(struct virtio_net
> *dev)
>  	vhost_user_iotlb_wr_lock_all(dev);
> 
>  	RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
> -		vhost_user_iotlb_set_dump(dev, node);
> +		vhost_user_iotlb_set_dump(node);
> 
>  		TAILQ_REMOVE(&dev->iotlb_list, node, next);
>  		vhost_user_iotlb_pool_put(dev, node);
> @@ -223,7 +220,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net
> *dev)
>  		if (!entry_idx) {
>  			struct vhost_iotlb_entry *next_node =
> RTE_TAILQ_NEXT(node, next);
> 
> -			vhost_user_iotlb_clear_dump(dev, node, prev_node,
> next_node);
> +			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
> 
>  			TAILQ_REMOVE(&dev->iotlb_list, node, next);
>  			vhost_user_iotlb_pool_put(dev, node);
> @@ -239,7 +236,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net
> *dev)
> 
>  void
>  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova,
> uint64_t uaddr,
> -				uint64_t uoffset, uint64_t size, uint8_t perm)
> +				uint64_t uoffset, uint64_t size, uint64_t
> page_size, uint8_t perm)
>  {
>  	struct vhost_iotlb_entry *node, *new_node;
> 
> @@ -263,6 +260,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev,
> uint64_t iova, uint64_t ua
>  	new_node->uaddr = uaddr;
>  	new_node->uoffset = uoffset;
>  	new_node->size = size;
> +	new_node->page_shift = __builtin_ctz(page_size);

__builtin_ctzll ?

Thanks,
Chenbo

>  	new_node->perm = perm;
> 
>  	vhost_user_iotlb_wr_lock_all(dev);
> @@ -276,7 +274,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev,
> uint64_t iova, uint64_t ua
>  			vhost_user_iotlb_pool_put(dev, new_node);
>  			goto unlock;
>  		} else if (node->iova > new_node->iova) {
> -			vhost_user_iotlb_set_dump(dev, new_node);
> +			vhost_user_iotlb_set_dump(new_node);
> 
>  			TAILQ_INSERT_BEFORE(node, new_node, next);
>  			dev->iotlb_cache_nr++;
> @@ -284,7 +282,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev,
> uint64_t iova, uint64_t ua
>  		}
>  	}
> 
> -	vhost_user_iotlb_set_dump(dev, new_node);
> +	vhost_user_iotlb_set_dump(new_node);
> 
>  	TAILQ_INSERT_TAIL(&dev->iotlb_list, new_node, next);
>  	dev->iotlb_cache_nr++;
> @@ -313,7 +311,7 @@ vhost_user_iotlb_cache_remove(struct virtio_net *dev,
> uint64_t iova, uint64_t si
>  		if (iova < node->iova + node->size) {
>  			struct vhost_iotlb_entry *next_node =
> RTE_TAILQ_NEXT(node, next);
> 
> -			vhost_user_iotlb_clear_dump(dev, node, prev_node,
> next_node);
> +			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
> 
>  			TAILQ_REMOVE(&dev->iotlb_list, node, next);
>  			vhost_user_iotlb_pool_put(dev, node);
> diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
> index bee36c5903..81ca04df21 100644
> --- a/lib/vhost/iotlb.h
> +++ b/lib/vhost/iotlb.h
> @@ -58,7 +58,7 @@ vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
>  }
> 
>  void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova,
> uint64_t uaddr,
> -					uint64_t uoffset, uint64_t size, uint8_t
> perm);
> +		uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t
> perm);
>  void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova,
> uint64_t size);
>  uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t
> iova,
>  					uint64_t *size, uint8_t perm);
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 67cc4a2fdb..4ace5ab081 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -1016,6 +1016,5 @@ mbuf_is_consumed(struct rte_mbuf *m)
>  	return true;
>  }
> 
> -uint64_t hua_to_alignment(struct rte_vhost_memory *mem, void *ptr);
>  void mem_set_dump(void *ptr, size_t size, bool enable, uint64_t
> alignment);
>  #endif /* _VHOST_NET_CDEV_H_ */
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 93673d3902..a989f2c46d 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -743,7 +743,7 @@ log_addr_to_gpa(struct virtio_net *dev, struct
> vhost_virtqueue *vq)
>  	return log_gpa;
>  }
> 
> -uint64_t
> +static uint64_t
>  hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
>  {
>  	struct rte_vhost_mem_region *r;
> @@ -2632,7 +2632,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
>  	struct virtio_net *dev = *pdev;
>  	struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb;
>  	uint16_t i;
> -	uint64_t vva, len;
> +	uint64_t vva, len, pg_sz;
> 
>  	switch (imsg->type) {
>  	case VHOST_IOTLB_UPDATE:
> @@ -2641,7 +2641,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
>  		if (!vva)
>  			return RTE_VHOST_MSG_RESULT_ERR;
> 
> -		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len,
> imsg->perm);
> +		pg_sz = hua_to_alignment(dev->mem, (void *)(uintptr_t)vva);
> +
> +		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len,
> pg_sz, imsg->perm);
> 
>  		for (i = 0; i < dev->nr_vring; i++) {
>  			struct vhost_virtqueue *vq = dev->virtqueue[i];
> --
> 2.39.2
  
Maxime Coquelin May 3, 2023, 1:57 p.m. UTC | #2
Hi Chenbo,

On 4/25/23 08:20, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Friday, March 31, 2023 11:43 PM
>> To: dev@dpdk.org; david.marchand@redhat.com; Xia, Chenbo
>> <chenbo.xia@intel.com>; mkp@redhat.com; fbl@redhat.com;
>> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
>> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
>> amorenoz@redhat.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [RFC 09/27] vhost: add page size info to IOTLB entry
>>
>> VDUSE will close the file descriptor after having mapped
>> the shared memory, so it will not be possible to get the
>> page size afterwards.
>>
>> This patch adds an new page_shift field to the IOTLB entry,
>> so that the information will be passed at IOTLB cache
>> insertion time. The information is stored as a bit shift
>> value so that IOTLB entry keeps fitting in a single
>> cacheline.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>   lib/vhost/iotlb.c      | 46 ++++++++++++++++++++----------------------
>>   lib/vhost/iotlb.h      |  2 +-
>>   lib/vhost/vhost.h      |  1 -
>>   lib/vhost/vhost_user.c |  8 +++++---
>>   4 files changed, 28 insertions(+), 29 deletions(-)
>>
>> diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
>> index 51f118bc48..188dfb8e38 100644
>> --- a/lib/vhost/iotlb.c
>> +++ b/lib/vhost/iotlb.c
>> @@ -19,14 +19,14 @@ struct vhost_iotlb_entry {
>>   	uint64_t uaddr;
>>   	uint64_t uoffset;
>>   	uint64_t size;
>> +	uint8_t page_shift;
>>   	uint8_t perm;
>>   };
>>
>>   #define IOTLB_CACHE_SIZE 2048
>>
>>   static bool
>> -vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct
>> vhost_iotlb_entry *b,
>> -		uint64_t align)
>> +vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct
>> vhost_iotlb_entry *b)
>>   {
>>   	uint64_t a_start, a_end, b_start;
>>
>> @@ -38,44 +38,41 @@ vhost_user_iotlb_share_page(struct vhost_iotlb_entry
>> *a, struct vhost_iotlb_entr
>>
>>   	/* Assumes entry a lower than entry b */
>>   	RTE_ASSERT(a_start < b_start);
>> -	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
>> -	b_start = RTE_ALIGN_FLOOR(b_start, align);
>> +	a_end = RTE_ALIGN_CEIL(a_start + a->size, RTE_BIT64(a->page_shift));
>> +	b_start = RTE_ALIGN_FLOOR(b_start, RTE_BIT64(b->page_shift));
>>
>>   	return a_end > b_start;
>>   }
>>
>>   static void
>> -vhost_user_iotlb_set_dump(struct virtio_net *dev, struct
>> vhost_iotlb_entry *node)
>> +vhost_user_iotlb_set_dump(struct vhost_iotlb_entry *node)
>>   {
>> -	uint64_t align, start;
>> +	uint64_t start;
>>
>>   	start = node->uaddr + node->uoffset;
>> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
>> -
>> -	mem_set_dump((void *)(uintptr_t)start, node->size, false, align);
>> +	mem_set_dump((void *)(uintptr_t)start, node->size, false,
>> RTE_BIT64(node->page_shift));
>>   }
>>
>>   static void
>> -vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct
>> vhost_iotlb_entry *node,
>> +vhost_user_iotlb_clear_dump(struct vhost_iotlb_entry *node,
>>   		struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
>>   {
>> -	uint64_t align, start, end;
>> +	uint64_t start, end;
>>
>>   	start = node->uaddr + node->uoffset;
>>   	end = start + node->size;
>>
>> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
>> -
>>   	/* Skip first page if shared with previous entry. */
>> -	if (vhost_user_iotlb_share_page(prev, node, align))
>> -		start = RTE_ALIGN_CEIL(start, align);
>> +	if (vhost_user_iotlb_share_page(prev, node))
>> +		start = RTE_ALIGN_CEIL(start, RTE_BIT64(node->page_shift));
>>
>>   	/* Skip last page if shared with next entry. */
>> -	if (vhost_user_iotlb_share_page(node, next, align))
>> -		end = RTE_ALIGN_FLOOR(end, align);
>> +	if (vhost_user_iotlb_share_page(node, next))
>> +		end = RTE_ALIGN_FLOOR(end, RTE_BIT64(node->page_shift));
>>
>>   	if (end > start)
>> -		mem_set_dump((void *)(uintptr_t)start, end - start, false,
>> align);
>> +		mem_set_dump((void *)(uintptr_t)start, end - start, false,
>> +			RTE_BIT64(node->page_shift));
>>   }
>>
>>   static struct vhost_iotlb_entry *
>> @@ -198,7 +195,7 @@ vhost_user_iotlb_cache_remove_all(struct virtio_net
>> *dev)
>>   	vhost_user_iotlb_wr_lock_all(dev);
>>
>>   	RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
>> -		vhost_user_iotlb_set_dump(dev, node);
>> +		vhost_user_iotlb_set_dump(node);
>>
>>   		TAILQ_REMOVE(&dev->iotlb_list, node, next);
>>   		vhost_user_iotlb_pool_put(dev, node);
>> @@ -223,7 +220,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net
>> *dev)
>>   		if (!entry_idx) {
>>   			struct vhost_iotlb_entry *next_node =
>> RTE_TAILQ_NEXT(node, next);
>>
>> -			vhost_user_iotlb_clear_dump(dev, node, prev_node,
>> next_node);
>> +			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
>>
>>   			TAILQ_REMOVE(&dev->iotlb_list, node, next);
>>   			vhost_user_iotlb_pool_put(dev, node);
>> @@ -239,7 +236,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net
>> *dev)
>>
>>   void
>>   vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova,
>> uint64_t uaddr,
>> -				uint64_t uoffset, uint64_t size, uint8_t perm)
>> +				uint64_t uoffset, uint64_t size, uint64_t
>> page_size, uint8_t perm)
>>   {
>>   	struct vhost_iotlb_entry *node, *new_node;
>>
>> @@ -263,6 +260,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev,
>> uint64_t iova, uint64_t ua
>>   	new_node->uaddr = uaddr;
>>   	new_node->uoffset = uoffset;
>>   	new_node->size = size;
>> +	new_node->page_shift = __builtin_ctz(page_size);
> 
> __builtin_ctzll ?

Indeed, that's better. Weird I don't get a warning!
Fixed in v1.

Thanks,
Maxime

> Thanks,
> Chenbo
  

Patch

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 51f118bc48..188dfb8e38 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -19,14 +19,14 @@  struct vhost_iotlb_entry {
 	uint64_t uaddr;
 	uint64_t uoffset;
 	uint64_t size;
+	uint8_t page_shift;
 	uint8_t perm;
 };
 
 #define IOTLB_CACHE_SIZE 2048
 
 static bool
-vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b,
-		uint64_t align)
+vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b)
 {
 	uint64_t a_start, a_end, b_start;
 
@@ -38,44 +38,41 @@  vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entr
 
 	/* Assumes entry a lower than entry b */
 	RTE_ASSERT(a_start < b_start);
-	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
-	b_start = RTE_ALIGN_FLOOR(b_start, align);
+	a_end = RTE_ALIGN_CEIL(a_start + a->size, RTE_BIT64(a->page_shift));
+	b_start = RTE_ALIGN_FLOOR(b_start, RTE_BIT64(b->page_shift));
 
 	return a_end > b_start;
 }
 
 static void
-vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node)
+vhost_user_iotlb_set_dump(struct vhost_iotlb_entry *node)
 {
-	uint64_t align, start;
+	uint64_t start;
 
 	start = node->uaddr + node->uoffset;
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
-
-	mem_set_dump((void *)(uintptr_t)start, node->size, false, align);
+	mem_set_dump((void *)(uintptr_t)start, node->size, false, RTE_BIT64(node->page_shift));
 }
 
 static void
-vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node,
+vhost_user_iotlb_clear_dump(struct vhost_iotlb_entry *node,
 		struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
 {
-	uint64_t align, start, end;
+	uint64_t start, end;
 
 	start = node->uaddr + node->uoffset;
 	end = start + node->size;
 
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
-
 	/* Skip first page if shared with previous entry. */
-	if (vhost_user_iotlb_share_page(prev, node, align))
-		start = RTE_ALIGN_CEIL(start, align);
+	if (vhost_user_iotlb_share_page(prev, node))
+		start = RTE_ALIGN_CEIL(start, RTE_BIT64(node->page_shift));
 
 	/* Skip last page if shared with next entry. */
-	if (vhost_user_iotlb_share_page(node, next, align))
-		end = RTE_ALIGN_FLOOR(end, align);
+	if (vhost_user_iotlb_share_page(node, next))
+		end = RTE_ALIGN_FLOOR(end, RTE_BIT64(node->page_shift));
 
 	if (end > start)
-		mem_set_dump((void *)(uintptr_t)start, end - start, false, align);
+		mem_set_dump((void *)(uintptr_t)start, end - start, false,
+			RTE_BIT64(node->page_shift));
 }
 
 static struct vhost_iotlb_entry *
@@ -198,7 +195,7 @@  vhost_user_iotlb_cache_remove_all(struct virtio_net *dev)
 	vhost_user_iotlb_wr_lock_all(dev);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
-		vhost_user_iotlb_set_dump(dev, node);
+		vhost_user_iotlb_set_dump(node);
 
 		TAILQ_REMOVE(&dev->iotlb_list, node, next);
 		vhost_user_iotlb_pool_put(dev, node);
@@ -223,7 +220,7 @@  vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 		if (!entry_idx) {
 			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
 
-			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
+			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
 
 			TAILQ_REMOVE(&dev->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(dev, node);
@@ -239,7 +236,7 @@  vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 
 void
 vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-				uint64_t uoffset, uint64_t size, uint8_t perm)
+				uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t perm)
 {
 	struct vhost_iotlb_entry *node, *new_node;
 
@@ -263,6 +260,7 @@  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 	new_node->uaddr = uaddr;
 	new_node->uoffset = uoffset;
 	new_node->size = size;
+	new_node->page_shift = __builtin_ctz(page_size);
 	new_node->perm = perm;
 
 	vhost_user_iotlb_wr_lock_all(dev);
@@ -276,7 +274,7 @@  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 			vhost_user_iotlb_pool_put(dev, new_node);
 			goto unlock;
 		} else if (node->iova > new_node->iova) {
-			vhost_user_iotlb_set_dump(dev, new_node);
+			vhost_user_iotlb_set_dump(new_node);
 
 			TAILQ_INSERT_BEFORE(node, new_node, next);
 			dev->iotlb_cache_nr++;
@@ -284,7 +282,7 @@  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 		}
 	}
 
-	vhost_user_iotlb_set_dump(dev, new_node);
+	vhost_user_iotlb_set_dump(new_node);
 
 	TAILQ_INSERT_TAIL(&dev->iotlb_list, new_node, next);
 	dev->iotlb_cache_nr++;
@@ -313,7 +311,7 @@  vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t si
 		if (iova < node->iova + node->size) {
 			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
 
-			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
+			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
 
 			TAILQ_REMOVE(&dev->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(dev, node);
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index bee36c5903..81ca04df21 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -58,7 +58,7 @@  vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
 }
 
 void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-					uint64_t uoffset, uint64_t size, uint8_t perm);
+		uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t perm);
 void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t size);
 uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova,
 					uint64_t *size, uint8_t perm);
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 67cc4a2fdb..4ace5ab081 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -1016,6 +1016,5 @@  mbuf_is_consumed(struct rte_mbuf *m)
 	return true;
 }
 
-uint64_t hua_to_alignment(struct rte_vhost_memory *mem, void *ptr);
 void mem_set_dump(void *ptr, size_t size, bool enable, uint64_t alignment);
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 93673d3902..a989f2c46d 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -743,7 +743,7 @@  log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	return log_gpa;
 }
 
-uint64_t
+static uint64_t
 hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
 {
 	struct rte_vhost_mem_region *r;
@@ -2632,7 +2632,7 @@  vhost_user_iotlb_msg(struct virtio_net **pdev,
 	struct virtio_net *dev = *pdev;
 	struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb;
 	uint16_t i;
-	uint64_t vva, len;
+	uint64_t vva, len, pg_sz;
 
 	switch (imsg->type) {
 	case VHOST_IOTLB_UPDATE:
@@ -2641,7 +2641,9 @@  vhost_user_iotlb_msg(struct virtio_net **pdev,
 		if (!vva)
 			return RTE_VHOST_MSG_RESULT_ERR;
 
-		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, imsg->perm);
+		pg_sz = hua_to_alignment(dev->mem, (void *)(uintptr_t)vva);
+
+		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, pg_sz, imsg->perm);
 
 		for (i = 0; i < dev->nr_vring; i++) {
 			struct vhost_virtqueue *vq = dev->virtqueue[i];