[RFC,08/27] vhost: add offset field to IOTLB entries

Message ID 20230331154259.1447831-9-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series Add VDUSE support to Vhost library |

Commit Message

Maxime Coquelin March 31, 2023, 3:42 p.m. UTC
  This patch is a preliminary work to prepare for VDUSE
support, for which we need to keep track of the mmaped base
address and offset in order to be able to unmap it later
when IOTLB entry is invalidated.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/iotlb.c      | 30 ++++++++++++++++++------------
 lib/vhost/iotlb.h      |  2 +-
 lib/vhost/vhost_user.c |  2 +-
 3 files changed, 20 insertions(+), 14 deletions(-)
  

Comments

Chenbo Xia April 25, 2023, 6:20 a.m. UTC | #1
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, March 31, 2023 11:43 PM
> To: dev@dpdk.org; david.marchand@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; mkp@redhat.com; fbl@redhat.com;
> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
> amorenoz@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [RFC 08/27] vhost: add offset field to IOTLB entries
> 
> This patch is a preliminary work to prepare for VDUSE
> support, for which we need to keep track of the mmaped base
> address and offset in order to be able to unmap it later
> when IOTLB entry is invalidated.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/iotlb.c      | 30 ++++++++++++++++++------------
>  lib/vhost/iotlb.h      |  2 +-
>  lib/vhost/vhost_user.c |  2 +-
>  3 files changed, 20 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
> index a91115cf1c..51f118bc48 100644
> --- a/lib/vhost/iotlb.c
> +++ b/lib/vhost/iotlb.c
> @@ -17,6 +17,7 @@ struct vhost_iotlb_entry {
> 
>  	uint64_t iova;
>  	uint64_t uaddr;
> +	uint64_t uoffset;
>  	uint64_t size;
>  	uint8_t perm;
>  };
> @@ -27,15 +28,18 @@ static bool
>  vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct
> vhost_iotlb_entry *b,
>  		uint64_t align)
>  {
> -	uint64_t a_end, b_start;
> +	uint64_t a_start, a_end, b_start;
> 
>  	if (a == NULL || b == NULL)
>  		return false;
> 
> +	a_start = a->uaddr + a->uoffset;
> +	b_start = b->uaddr + b->uoffset;
> +
>  	/* Assumes entry a lower than entry b */
> -	RTE_ASSERT(a->uaddr < b->uaddr);
> -	a_end = RTE_ALIGN_CEIL(a->uaddr + a->size, align);
> -	b_start = RTE_ALIGN_FLOOR(b->uaddr, align);
> +	RTE_ASSERT(a_start < b_start);
> +	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
> +	b_start = RTE_ALIGN_FLOOR(b_start, align);
> 
>  	return a_end > b_start;
>  }
> @@ -43,11 +47,12 @@ vhost_user_iotlb_share_page(struct vhost_iotlb_entry
> *a, struct vhost_iotlb_entr
>  static void
>  vhost_user_iotlb_set_dump(struct virtio_net *dev, struct
> vhost_iotlb_entry *node)
>  {
> -	uint64_t align;
> +	uint64_t align, start;
> 
> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
> +	start = node->uaddr + node->uoffset;
> +	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
> 
> -	mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, false,
> align);
> +	mem_set_dump((void *)(uintptr_t)start, node->size, false, align);
>  }
> 
>  static void
> @@ -56,10 +61,10 @@ vhost_user_iotlb_clear_dump(struct virtio_net *dev,
> struct vhost_iotlb_entry *no
>  {
>  	uint64_t align, start, end;
> 
> -	start = node->uaddr;
> -	end = node->uaddr + node->size;
> +	start = node->uaddr + node->uoffset;
> +	end = start + node->size;
> 
> -	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
> +	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
> 
>  	/* Skip first page if shared with previous entry. */
>  	if (vhost_user_iotlb_share_page(prev, node, align))
> @@ -234,7 +239,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net
> *dev)
> 
>  void
>  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova,
> uint64_t uaddr,
> -				uint64_t size, uint8_t perm)
> +				uint64_t uoffset, uint64_t size, uint8_t perm)
>  {
>  	struct vhost_iotlb_entry *node, *new_node;
> 
> @@ -256,6 +261,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev,
> uint64_t iova, uint64_t ua
> 
>  	new_node->iova = iova;
>  	new_node->uaddr = uaddr;
> +	new_node->uoffset = uoffset;
>  	new_node->size = size;
>  	new_node->perm = perm;
> 
> @@ -344,7 +350,7 @@ vhost_user_iotlb_cache_find(struct virtio_net *dev,
> uint64_t iova, uint64_t *siz
> 
>  		offset = iova - node->iova;
>  		if (!vva)
> -			vva = node->uaddr + offset;
> +			vva = node->uaddr + node->uoffset + offset;
> 
>  		mapped += node->size - offset;
>  		iova = node->iova + node->size;
> diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
> index 3490b9e6be..bee36c5903 100644
> --- a/lib/vhost/iotlb.h
> +++ b/lib/vhost/iotlb.h
> @@ -58,7 +58,7 @@ vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
>  }
> 
>  void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova,
> uint64_t uaddr,
> -					uint64_t size, uint8_t perm);
> +					uint64_t uoffset, uint64_t size, uint8_t
> perm);
>  void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova,
> uint64_t size);
>  uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t
> iova,
>  					uint64_t *size, uint8_t perm);
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 81ebef0137..93673d3902 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2641,7 +2641,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
>  		if (!vva)
>  			return RTE_VHOST_MSG_RESULT_ERR;
> 
> -		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, len, imsg-
> >perm);
> +		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len,
> imsg->perm);
> 
>  		for (i = 0; i < dev->nr_vring; i++) {
>  			struct vhost_virtqueue *vq = dev->virtqueue[i];
> --
> 2.39.2

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
  

Patch

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index a91115cf1c..51f118bc48 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -17,6 +17,7 @@  struct vhost_iotlb_entry {
 
 	uint64_t iova;
 	uint64_t uaddr;
+	uint64_t uoffset;
 	uint64_t size;
 	uint8_t perm;
 };
@@ -27,15 +28,18 @@  static bool
 vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b,
 		uint64_t align)
 {
-	uint64_t a_end, b_start;
+	uint64_t a_start, a_end, b_start;
 
 	if (a == NULL || b == NULL)
 		return false;
 
+	a_start = a->uaddr + a->uoffset;
+	b_start = b->uaddr + b->uoffset;
+
 	/* Assumes entry a lower than entry b */
-	RTE_ASSERT(a->uaddr < b->uaddr);
-	a_end = RTE_ALIGN_CEIL(a->uaddr + a->size, align);
-	b_start = RTE_ALIGN_FLOOR(b->uaddr, align);
+	RTE_ASSERT(a_start < b_start);
+	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
+	b_start = RTE_ALIGN_FLOOR(b_start, align);
 
 	return a_end > b_start;
 }
@@ -43,11 +47,12 @@  vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entr
 static void
 vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node)
 {
-	uint64_t align;
+	uint64_t align, start;
 
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
+	start = node->uaddr + node->uoffset;
+	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
 
-	mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, false, align);
+	mem_set_dump((void *)(uintptr_t)start, node->size, false, align);
 }
 
 static void
@@ -56,10 +61,10 @@  vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *no
 {
 	uint64_t align, start, end;
 
-	start = node->uaddr;
-	end = node->uaddr + node->size;
+	start = node->uaddr + node->uoffset;
+	end = start + node->size;
 
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
+	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
 
 	/* Skip first page if shared with previous entry. */
 	if (vhost_user_iotlb_share_page(prev, node, align))
@@ -234,7 +239,7 @@  vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 
 void
 vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-				uint64_t size, uint8_t perm)
+				uint64_t uoffset, uint64_t size, uint8_t perm)
 {
 	struct vhost_iotlb_entry *node, *new_node;
 
@@ -256,6 +261,7 @@  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 
 	new_node->iova = iova;
 	new_node->uaddr = uaddr;
+	new_node->uoffset = uoffset;
 	new_node->size = size;
 	new_node->perm = perm;
 
@@ -344,7 +350,7 @@  vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova, uint64_t *siz
 
 		offset = iova - node->iova;
 		if (!vva)
-			vva = node->uaddr + offset;
+			vva = node->uaddr + node->uoffset + offset;
 
 		mapped += node->size - offset;
 		iova = node->iova + node->size;
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index 3490b9e6be..bee36c5903 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -58,7 +58,7 @@  vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
 }
 
 void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-					uint64_t size, uint8_t perm);
+					uint64_t uoffset, uint64_t size, uint8_t perm);
 void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t size);
 uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova,
 					uint64_t *size, uint8_t perm);
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 81ebef0137..93673d3902 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2641,7 +2641,7 @@  vhost_user_iotlb_msg(struct virtio_net **pdev,
 		if (!vva)
 			return RTE_VHOST_MSG_RESULT_ERR;
 
-		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, len, imsg->perm);
+		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, imsg->perm);
 
 		for (i = 0; i < dev->nr_vring; i++) {
 			struct vhost_virtqueue *vq = dev->virtqueue[i];