[v2,3/5] vhost: prepare memory regions addresses

Message ID 20200921064837.15957-4-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost add vectorized data path |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Marvin Liu Sept. 21, 2020, 6:48 a.m. UTC
  Prepare memory regions guest physical addresses for vectorized data
path. These information will be utilized by SIMD instructions to find
matched region index.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Comments

Maxime Coquelin Oct. 6, 2020, 3:06 p.m. UTC | #1
On 9/21/20 8:48 AM, Marvin Liu wrote:
> Prepare memory regions guest physical addresses for vectorized data
> path. These information will be utilized by SIMD instructions to find
> matched region index.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 5a5c945551..4a81f18f01 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -52,6 +52,8 @@
>  
>  #define ASYNC_MAX_POLL_SEG 255
>  
> +#define MAX_NREGIONS 8
> +
>  #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
>  #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
>  
> @@ -375,6 +377,8 @@ struct inflight_mem_info {
>  struct virtio_net {
>  	/* Frontend (QEMU) memory and memory region information */
>  	struct rte_vhost_memory	*mem;
> +	uint64_t		regions_low_addrs[MAX_NREGIONS];
> +	uint64_t		regions_high_addrs[MAX_NREGIONS];

It eats two cache lines, so it would be better to have it in a dedicated
structure dynamically allocated.

It would be better for non-vectorized path, as it will avoid polluting
cache with useless data in its case. And it would be better for
vectorized path too, as when the DP will need to use it, it will use
exactly two cache lines instead of 3three.

>  	uint64_t		features;
>  	uint64_t		protocol_features;
>  	int			vid;
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index c3c924faec..89e75e9e71 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -1291,6 +1291,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  		}
>  	}
>  
> +	RTE_BUILD_BUG_ON(VHOST_MEMORY_MAX_NREGIONS != 8);
> +	if (dev->vectorized) {
> +		for (i = 0; i < memory->nregions; i++) {
> +			dev->regions_low_addrs[i] =
> +				memory->regions[i].guest_phys_addr;
> +			dev->regions_high_addrs[i] =
> +				memory->regions[i].guest_phys_addr +
> +				memory->regions[i].memory_size;
> +		}
> +	}
> +
>  	for (i = 0; i < dev->nr_vring; i++) {
>  		struct vhost_virtqueue *vq = dev->virtqueue[i];
>  
>
  

Patch

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5a5c945551..4a81f18f01 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -52,6 +52,8 @@ 
 
 #define ASYNC_MAX_POLL_SEG 255
 
+#define MAX_NREGIONS 8
+
 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
 
@@ -375,6 +377,8 @@  struct inflight_mem_info {
 struct virtio_net {
 	/* Frontend (QEMU) memory and memory region information */
 	struct rte_vhost_memory	*mem;
+	uint64_t		regions_low_addrs[MAX_NREGIONS];
+	uint64_t		regions_high_addrs[MAX_NREGIONS];
 	uint64_t		features;
 	uint64_t		protocol_features;
 	int			vid;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index c3c924faec..89e75e9e71 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1291,6 +1291,17 @@  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 		}
 	}
 
+	RTE_BUILD_BUG_ON(VHOST_MEMORY_MAX_NREGIONS != 8);
+	if (dev->vectorized) {
+		for (i = 0; i < memory->nregions; i++) {
+			dev->regions_low_addrs[i] =
+				memory->regions[i].guest_phys_addr;
+			dev->regions_high_addrs[i] =
+				memory->regions[i].guest_phys_addr +
+				memory->regions[i].memory_size;
+		}
+	}
+
 	for (i = 0; i < dev->nr_vring; i++) {
 		struct vhost_virtqueue *vq = dev->virtqueue[i];