[3/3] net/virtio-user: fix memory hotplug support in vhost-kernel

Message ID 20180905042852.6212-4-tiwei.bie@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series Some fixes/improvements for virtio-user memory table |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Tiwei Bie Sept. 5, 2018, 4:28 a.m. UTC
  It's possible to have much more hugepage backed memory regions
than what vhost-kernel supports due to the memory hotplug, which
may cause problems. A better solution is to have the virtio-user
pass all the memory ranges reserved by DPDK to vhost-kernel.

Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
Cc: stable@dpdk.org

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
 1 file changed, 18 insertions(+), 20 deletions(-)
  

Comments

Burakov, Anatoly Sept. 7, 2018, 9:44 a.m. UTC | #1
On 05-Sep-18 5:28 AM, Tiwei Bie wrote:
> It's possible to have much more hugepage backed memory regions
> than what vhost-kernel supports due to the memory hotplug, which
> may cause problems. A better solution is to have the virtio-user
> pass all the memory ranges reserved by DPDK to vhost-kernel.
> 
> Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> ---
>   drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
>   1 file changed, 18 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
> index 897fee0af..9338166d9 100644
> --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
> +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
> @@ -70,41 +70,41 @@ static uint64_t vhost_req_user_to_kernel[] = {
>   	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
>   };
>   
> -struct walk_arg {
> -	struct vhost_memory_kernel *vm;
> -	uint32_t region_nr;
> -};
>   static int
> -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
> -		const struct rte_memseg *ms, size_t len, void *arg)
> +add_memseg_list(const struct rte_memseg_list *msl, void *arg)
>   {
> -	struct walk_arg *wa = arg;
> +	struct vhost_memory_kernel *vm = arg;
>   	struct vhost_memory_region *mr;
>   	void *start_addr;
> +	uint64_t len;
>   
> -	if (wa->region_nr >= max_regions)
> +	if (vm->nregions >= max_regions)
>   		return -1;
>   
> -	mr = &wa->vm->regions[wa->region_nr++];
> -	start_addr = ms->addr;
> +	start_addr = msl->base_va;
> +	len = msl->page_sz * msl->memseg_arr.len;
> +
> +	mr = &vm->regions[vm->nregions++];
>   
>   	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
>   	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
>   	mr->memory_size = len;
> -	mr->mmap_offset = 0;
> +	mr->mmap_offset = 0; /* flags_padding */
> +
> +	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
> +			vm->nregions - 1, start_addr, len);
>   
>   	return 0;
>   }
>   
> -/* By default, vhost kernel module allows 64 regions, but DPDK allows
> - * 256 segments. As a relief, below function merges those virtually
> - * adjacent memsegs into one region.
> +/* By default, vhost kernel module allows 64 regions, but DPDK may
> + * have much more memory regions. Below function will treat each
> + * contiguous memory space reserved by DPDK as one region.
>    */
>   static struct vhost_memory_kernel *
>   prepare_vhost_memory_kernel(void)
>   {
>   	struct vhost_memory_kernel *vm;
> -	struct walk_arg wa;
>   
>   	vm = malloc(sizeof(struct vhost_memory_kernel) +
>   			max_regions *
> @@ -112,20 +112,18 @@ prepare_vhost_memory_kernel(void)
>   	if (!vm)
>   		return NULL;
>   
> -	wa.region_nr = 0;
> -	wa.vm = vm;
> +	vm->nregions = 0;
> +	vm->padding = 0;
>   
>   	/*
>   	 * The memory lock has already been taken by memory subsystem
>   	 * or virtio_user_start_device().
>   	 */
> -	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
> +	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
>   		free(vm);
>   		return NULL;
>   	}
>   
> -	vm->nregions = wa.region_nr;
> -	vm->padding = 0;
>   	return vm;
>   }
>   
> 

Doesn't that assume single file segments mode?
  
Tiwei Bie Sept. 7, 2018, 11:37 a.m. UTC | #2
On Fri, Sep 07, 2018 at 10:44:22AM +0100, Burakov, Anatoly wrote:
> On 05-Sep-18 5:28 AM, Tiwei Bie wrote:
> > It's possible to have much more hugepage backed memory regions
> > than what vhost-kernel supports due to the memory hotplug, which
> > may cause problems. A better solution is to have the virtio-user
> > pass all the memory ranges reserved by DPDK to vhost-kernel.
> > 
> > Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
> > Cc: stable@dpdk.org
> > 
> > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > ---
> >   drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
> >   1 file changed, 18 insertions(+), 20 deletions(-)
> > 
> > diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
> > index 897fee0af..9338166d9 100644
> > --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
> > +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
> > @@ -70,41 +70,41 @@ static uint64_t vhost_req_user_to_kernel[] = {
> >   	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
> >   };
> > -struct walk_arg {
> > -	struct vhost_memory_kernel *vm;
> > -	uint32_t region_nr;
> > -};
> >   static int
> > -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
> > -		const struct rte_memseg *ms, size_t len, void *arg)
> > +add_memseg_list(const struct rte_memseg_list *msl, void *arg)
> >   {
> > -	struct walk_arg *wa = arg;
> > +	struct vhost_memory_kernel *vm = arg;
> >   	struct vhost_memory_region *mr;
> >   	void *start_addr;
> > +	uint64_t len;
> > -	if (wa->region_nr >= max_regions)
> > +	if (vm->nregions >= max_regions)
> >   		return -1;
> > -	mr = &wa->vm->regions[wa->region_nr++];
> > -	start_addr = ms->addr;
> > +	start_addr = msl->base_va;
> > +	len = msl->page_sz * msl->memseg_arr.len;
> > +
> > +	mr = &vm->regions[vm->nregions++];
> >   	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
> >   	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
> >   	mr->memory_size = len;
> > -	mr->mmap_offset = 0;
> > +	mr->mmap_offset = 0; /* flags_padding */
> > +
> > +	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
> > +			vm->nregions - 1, start_addr, len);
> >   	return 0;
> >   }
> > -/* By default, vhost kernel module allows 64 regions, but DPDK allows
> > - * 256 segments. As a relief, below function merges those virtually
> > - * adjacent memsegs into one region.
> > +/* By default, vhost kernel module allows 64 regions, but DPDK may
> > + * have much more memory regions. Below function will treat each
> > + * contiguous memory space reserved by DPDK as one region.
> >    */
> >   static struct vhost_memory_kernel *
> >   prepare_vhost_memory_kernel(void)
> >   {
> >   	struct vhost_memory_kernel *vm;
> > -	struct walk_arg wa;
> >   	vm = malloc(sizeof(struct vhost_memory_kernel) +
> >   			max_regions *
> > @@ -112,20 +112,18 @@ prepare_vhost_memory_kernel(void)
> >   	if (!vm)
> >   		return NULL;
> > -	wa.region_nr = 0;
> > -	wa.vm = vm;
> > +	vm->nregions = 0;
> > +	vm->padding = 0;
> >   	/*
> >   	 * The memory lock has already been taken by memory subsystem
> >   	 * or virtio_user_start_device().
> >   	 */
> > -	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
> > +	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
> >   		free(vm);
> >   		return NULL;
> >   	}
> > -	vm->nregions = wa.region_nr;
> > -	vm->padding = 0;
> >   	return vm;
> >   }
> > 
> 
> Doesn't that assume single file segments mode?

This is to find out the VA ranges reserved by memory subsystem.
Why does it need to assume single file segments mode?


> 
> -- 
> Thanks,
> Anatoly
  
Burakov, Anatoly Sept. 7, 2018, 12:24 p.m. UTC | #3
On 07-Sep-18 12:37 PM, Tiwei Bie wrote:
> On Fri, Sep 07, 2018 at 10:44:22AM +0100, Burakov, Anatoly wrote:
>> On 05-Sep-18 5:28 AM, Tiwei Bie wrote:
>>> It's possible to have much more hugepage backed memory regions
>>> than what vhost-kernel supports due to the memory hotplug, which
>>> may cause problems. A better solution is to have the virtio-user
>>> pass all the memory ranges reserved by DPDK to vhost-kernel.
>>>
>>> Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
>>> Cc: stable@dpdk.org
>>>
>>> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
>>> ---
>>>    drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
>>>    1 file changed, 18 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
>>> index 897fee0af..9338166d9 100644
>>> --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
>>> +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
>>> @@ -70,41 +70,41 @@ static uint64_t vhost_req_user_to_kernel[] = {
>>>    	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
>>>    };
>>> -struct walk_arg {
>>> -	struct vhost_memory_kernel *vm;
>>> -	uint32_t region_nr;
>>> -};
>>>    static int
>>> -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
>>> -		const struct rte_memseg *ms, size_t len, void *arg)
>>> +add_memseg_list(const struct rte_memseg_list *msl, void *arg)
>>>    {
>>> -	struct walk_arg *wa = arg;
>>> +	struct vhost_memory_kernel *vm = arg;
>>>    	struct vhost_memory_region *mr;
>>>    	void *start_addr;
>>> +	uint64_t len;
>>> -	if (wa->region_nr >= max_regions)
>>> +	if (vm->nregions >= max_regions)
>>>    		return -1;
>>> -	mr = &wa->vm->regions[wa->region_nr++];
>>> -	start_addr = ms->addr;
>>> +	start_addr = msl->base_va;
>>> +	len = msl->page_sz * msl->memseg_arr.len;
>>> +
>>> +	mr = &vm->regions[vm->nregions++];
>>>    	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
>>>    	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
>>>    	mr->memory_size = len;
>>> -	mr->mmap_offset = 0;
>>> +	mr->mmap_offset = 0; /* flags_padding */
>>> +
>>> +	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
>>> +			vm->nregions - 1, start_addr, len);
>>>    	return 0;
>>>    }
>>> -/* By default, vhost kernel module allows 64 regions, but DPDK allows
>>> - * 256 segments. As a relief, below function merges those virtually
>>> - * adjacent memsegs into one region.
>>> +/* By default, vhost kernel module allows 64 regions, but DPDK may
>>> + * have much more memory regions. Below function will treat each
>>> + * contiguous memory space reserved by DPDK as one region.
>>>     */
>>>    static struct vhost_memory_kernel *
>>>    prepare_vhost_memory_kernel(void)
>>>    {
>>>    	struct vhost_memory_kernel *vm;
>>> -	struct walk_arg wa;
>>>    	vm = malloc(sizeof(struct vhost_memory_kernel) +
>>>    			max_regions *
>>> @@ -112,20 +112,18 @@ prepare_vhost_memory_kernel(void)
>>>    	if (!vm)
>>>    		return NULL;
>>> -	wa.region_nr = 0;
>>> -	wa.vm = vm;
>>> +	vm->nregions = 0;
>>> +	vm->padding = 0;
>>>    	/*
>>>    	 * The memory lock has already been taken by memory subsystem
>>>    	 * or virtio_user_start_device().
>>>    	 */
>>> -	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
>>> +	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
>>>    		free(vm);
>>>    		return NULL;
>>>    	}
>>> -	vm->nregions = wa.region_nr;
>>> -	vm->padding = 0;
>>>    	return vm;
>>>    }
>>>
>>
>> Doesn't that assume single file segments mode?
> 
> This is to find out the VA ranges reserved by memory subsystem.
> Why does it need to assume single file segments mode?

If you are not in single-file segments mode, each individual page in a 
VA-contiguous area will be behind a different fd - so it will be part of 
a different region, would it not?

> 
> 
>>
>> -- 
>> Thanks,
>> Anatoly
>
  
Tiwei Bie Sept. 10, 2018, 4:04 a.m. UTC | #4
On Fri, Sep 07, 2018 at 01:24:05PM +0100, Burakov, Anatoly wrote:
> On 07-Sep-18 12:37 PM, Tiwei Bie wrote:
> > On Fri, Sep 07, 2018 at 10:44:22AM +0100, Burakov, Anatoly wrote:
> > > On 05-Sep-18 5:28 AM, Tiwei Bie wrote:
> > > > It's possible to have much more hugepage backed memory regions
> > > > than what vhost-kernel supports due to the memory hotplug, which
> > > > may cause problems. A better solution is to have the virtio-user
> > > > pass all the memory ranges reserved by DPDK to vhost-kernel.
> > > > 
> > > > Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
> > > > Cc: stable@dpdk.org
> > > > 
> > > > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > > > ---
> > > >    drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
> > > >    1 file changed, 18 insertions(+), 20 deletions(-)
> > > > 
> > > > diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
> > > > index 897fee0af..9338166d9 100644
> > > > --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
> > > > +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
> > > > @@ -70,41 +70,41 @@ static uint64_t vhost_req_user_to_kernel[] = {
> > > >    	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
> > > >    };
> > > > -struct walk_arg {
> > > > -	struct vhost_memory_kernel *vm;
> > > > -	uint32_t region_nr;
> > > > -};
> > > >    static int
> > > > -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
> > > > -		const struct rte_memseg *ms, size_t len, void *arg)
> > > > +add_memseg_list(const struct rte_memseg_list *msl, void *arg)
> > > >    {
> > > > -	struct walk_arg *wa = arg;
> > > > +	struct vhost_memory_kernel *vm = arg;
> > > >    	struct vhost_memory_region *mr;
> > > >    	void *start_addr;
> > > > +	uint64_t len;
> > > > -	if (wa->region_nr >= max_regions)
> > > > +	if (vm->nregions >= max_regions)
> > > >    		return -1;
> > > > -	mr = &wa->vm->regions[wa->region_nr++];
> > > > -	start_addr = ms->addr;
> > > > +	start_addr = msl->base_va;
> > > > +	len = msl->page_sz * msl->memseg_arr.len;
> > > > +
> > > > +	mr = &vm->regions[vm->nregions++];
> > > >    	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
> > > >    	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
> > > >    	mr->memory_size = len;
> > > > -	mr->mmap_offset = 0;
> > > > +	mr->mmap_offset = 0; /* flags_padding */
> > > > +
> > > > +	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
> > > > +			vm->nregions - 1, start_addr, len);
> > > >    	return 0;
> > > >    }
> > > > -/* By default, vhost kernel module allows 64 regions, but DPDK allows
> > > > - * 256 segments. As a relief, below function merges those virtually
> > > > - * adjacent memsegs into one region.
> > > > +/* By default, vhost kernel module allows 64 regions, but DPDK may
> > > > + * have much more memory regions. Below function will treat each
> > > > + * contiguous memory space reserved by DPDK as one region.
> > > >     */
> > > >    static struct vhost_memory_kernel *
> > > >    prepare_vhost_memory_kernel(void)
> > > >    {
> > > >    	struct vhost_memory_kernel *vm;
> > > > -	struct walk_arg wa;
> > > >    	vm = malloc(sizeof(struct vhost_memory_kernel) +
> > > >    			max_regions *
> > > > @@ -112,20 +112,18 @@ prepare_vhost_memory_kernel(void)
> > > >    	if (!vm)
> > > >    		return NULL;
> > > > -	wa.region_nr = 0;
> > > > -	wa.vm = vm;
> > > > +	vm->nregions = 0;
> > > > +	vm->padding = 0;
> > > >    	/*
> > > >    	 * The memory lock has already been taken by memory subsystem
> > > >    	 * or virtio_user_start_device().
> > > >    	 */
> > > > -	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
> > > > +	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
> > > >    		free(vm);
> > > >    		return NULL;
> > > >    	}
> > > > -	vm->nregions = wa.region_nr;
> > > > -	vm->padding = 0;
> > > >    	return vm;
> > > >    }
> > > > 
> > > 
> > > Doesn't that assume single file segments mode?
> > 
> > This is to find out the VA ranges reserved by memory subsystem.
> > Why does it need to assume single file segments mode?
> 
> If you are not in single-file segments mode, each individual page in a
> VA-contiguous area will be behind a different fd - so it will be part of a
> different region, would it not?

Above code is for vhost-kernel. Kernel doesn't need the
fds to get the access to virtio-user process's memory.
Kernel just needs to know the mappings between GPA (guest
physical address) and VA (virtio-user's virtual address).


> 
> > 
> > 
> > > 
> > > -- 
> > > Thanks,
> > > Anatoly
> > 
> 
> 
> -- 
> Thanks,
> Anatoly
  
Maxime Coquelin Sept. 11, 2018, 1:10 p.m. UTC | #5
On 09/05/2018 06:28 AM, Tiwei Bie wrote:
> It's possible to have much more hugepage backed memory regions
> than what vhost-kernel supports due to the memory hotplug, which
> may cause problems. A better solution is to have the virtio-user
> pass all the memory ranges reserved by DPDK to vhost-kernel.
> 
> Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> ---
>   drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
>   1 file changed, 18 insertions(+), 20 deletions(-)


Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
  
Burakov, Anatoly Sept. 17, 2018, 10:18 a.m. UTC | #6
On 10-Sep-18 5:04 AM, Tiwei Bie wrote:
> On Fri, Sep 07, 2018 at 01:24:05PM +0100, Burakov, Anatoly wrote:
>> On 07-Sep-18 12:37 PM, Tiwei Bie wrote:
>>> On Fri, Sep 07, 2018 at 10:44:22AM +0100, Burakov, Anatoly wrote:
>>>> On 05-Sep-18 5:28 AM, Tiwei Bie wrote:
>>>>> It's possible to have much more hugepage backed memory regions
>>>>> than what vhost-kernel supports due to the memory hotplug, which
>>>>> may cause problems. A better solution is to have the virtio-user
>>>>> pass all the memory ranges reserved by DPDK to vhost-kernel.
>>>>>
>>>>> Fixes: 12ecb2f63b12 ("net/virtio-user: support memory hotplug")
>>>>> Cc: stable@dpdk.org
>>>>>
>>>>> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
>>>>> ---
>>>>>     drivers/net/virtio/virtio_user/vhost_kernel.c | 38 +++++++++----------
>>>>>     1 file changed, 18 insertions(+), 20 deletions(-)
>>>>>
>>>>> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
>>>>> index 897fee0af..9338166d9 100644
>>>>> --- a/drivers/net/virtio/virtio_user/vhost_kernel.c
>>>>> +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
>>>>> @@ -70,41 +70,41 @@ static uint64_t vhost_req_user_to_kernel[] = {
>>>>>     	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
>>>>>     };
>>>>> -struct walk_arg {
>>>>> -	struct vhost_memory_kernel *vm;
>>>>> -	uint32_t region_nr;
>>>>> -};
>>>>>     static int
>>>>> -add_memory_region(const struct rte_memseg_list *msl __rte_unused,
>>>>> -		const struct rte_memseg *ms, size_t len, void *arg)
>>>>> +add_memseg_list(const struct rte_memseg_list *msl, void *arg)
>>>>>     {
>>>>> -	struct walk_arg *wa = arg;
>>>>> +	struct vhost_memory_kernel *vm = arg;
>>>>>     	struct vhost_memory_region *mr;
>>>>>     	void *start_addr;
>>>>> +	uint64_t len;
>>>>> -	if (wa->region_nr >= max_regions)
>>>>> +	if (vm->nregions >= max_regions)
>>>>>     		return -1;
>>>>> -	mr = &wa->vm->regions[wa->region_nr++];
>>>>> -	start_addr = ms->addr;
>>>>> +	start_addr = msl->base_va;
>>>>> +	len = msl->page_sz * msl->memseg_arr.len;
>>>>> +
>>>>> +	mr = &vm->regions[vm->nregions++];
>>>>>     	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
>>>>>     	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
>>>>>     	mr->memory_size = len;
>>>>> -	mr->mmap_offset = 0;
>>>>> +	mr->mmap_offset = 0; /* flags_padding */
>>>>> +
>>>>> +	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
>>>>> +			vm->nregions - 1, start_addr, len);
>>>>>     	return 0;
>>>>>     }
>>>>> -/* By default, vhost kernel module allows 64 regions, but DPDK allows
>>>>> - * 256 segments. As a relief, below function merges those virtually
>>>>> - * adjacent memsegs into one region.
>>>>> +/* By default, vhost kernel module allows 64 regions, but DPDK may
>>>>> + * have much more memory regions. Below function will treat each
>>>>> + * contiguous memory space reserved by DPDK as one region.
>>>>>      */
>>>>>     static struct vhost_memory_kernel *
>>>>>     prepare_vhost_memory_kernel(void)
>>>>>     {
>>>>>     	struct vhost_memory_kernel *vm;
>>>>> -	struct walk_arg wa;
>>>>>     	vm = malloc(sizeof(struct vhost_memory_kernel) +
>>>>>     			max_regions *
>>>>> @@ -112,20 +112,18 @@ prepare_vhost_memory_kernel(void)
>>>>>     	if (!vm)
>>>>>     		return NULL;
>>>>> -	wa.region_nr = 0;
>>>>> -	wa.vm = vm;
>>>>> +	vm->nregions = 0;
>>>>> +	vm->padding = 0;
>>>>>     	/*
>>>>>     	 * The memory lock has already been taken by memory subsystem
>>>>>     	 * or virtio_user_start_device().
>>>>>     	 */
>>>>> -	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
>>>>> +	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
>>>>>     		free(vm);
>>>>>     		return NULL;
>>>>>     	}
>>>>> -	vm->nregions = wa.region_nr;
>>>>> -	vm->padding = 0;
>>>>>     	return vm;
>>>>>     }
>>>>>
>>>>
>>>> Doesn't that assume single file segments mode?
>>>
>>> This is to find out the VA ranges reserved by memory subsystem.
>>> Why does it need to assume single file segments mode?
>>
>> If you are not in single-file segments mode, each individual page in a
>> VA-contiguous area will be behind a different fd - so it will be part of a
>> different region, would it not?
> 
> Above code is for vhost-kernel. Kernel doesn't need the
> fds to get the access to virtio-user process's memory.
> Kernel just needs to know the mappings between GPA (guest
> physical address) and VA (virtio-user's virtual address).
> 

Ah OK. Thanks for clarification!
  

Patch

diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 897fee0af..9338166d9 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -70,41 +70,41 @@  static uint64_t vhost_req_user_to_kernel[] = {
 	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
 };
 
-struct walk_arg {
-	struct vhost_memory_kernel *vm;
-	uint32_t region_nr;
-};
 static int
-add_memory_region(const struct rte_memseg_list *msl __rte_unused,
-		const struct rte_memseg *ms, size_t len, void *arg)
+add_memseg_list(const struct rte_memseg_list *msl, void *arg)
 {
-	struct walk_arg *wa = arg;
+	struct vhost_memory_kernel *vm = arg;
 	struct vhost_memory_region *mr;
 	void *start_addr;
+	uint64_t len;
 
-	if (wa->region_nr >= max_regions)
+	if (vm->nregions >= max_regions)
 		return -1;
 
-	mr = &wa->vm->regions[wa->region_nr++];
-	start_addr = ms->addr;
+	start_addr = msl->base_va;
+	len = msl->page_sz * msl->memseg_arr.len;
+
+	mr = &vm->regions[vm->nregions++];
 
 	mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
 	mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
 	mr->memory_size = len;
-	mr->mmap_offset = 0;
+	mr->mmap_offset = 0; /* flags_padding */
+
+	PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
+			vm->nregions - 1, start_addr, len);
 
 	return 0;
 }
 
-/* By default, vhost kernel module allows 64 regions, but DPDK allows
- * 256 segments. As a relief, below function merges those virtually
- * adjacent memsegs into one region.
+/* By default, vhost kernel module allows 64 regions, but DPDK may
+ * have much more memory regions. Below function will treat each
+ * contiguous memory space reserved by DPDK as one region.
  */
 static struct vhost_memory_kernel *
 prepare_vhost_memory_kernel(void)
 {
 	struct vhost_memory_kernel *vm;
-	struct walk_arg wa;
 
 	vm = malloc(sizeof(struct vhost_memory_kernel) +
 			max_regions *
@@ -112,20 +112,18 @@  prepare_vhost_memory_kernel(void)
 	if (!vm)
 		return NULL;
 
-	wa.region_nr = 0;
-	wa.vm = vm;
+	vm->nregions = 0;
+	vm->padding = 0;
 
 	/*
 	 * The memory lock has already been taken by memory subsystem
 	 * or virtio_user_start_device().
 	 */
-	if (rte_memseg_contig_walk_thread_unsafe(add_memory_region, &wa) < 0) {
+	if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
 		free(vm);
 		return NULL;
 	}
 
-	vm->nregions = wa.region_nr;
-	vm->padding = 0;
 	return vm;
 }