[v2] common/mlx5: Optimize mlx5 mempool get extmem

Message ID 20231010143800.102459-1-aconole@redhat.com (mailing list archive)
State New
Delegated to: Raslan Darawsheh
Headers
Series [v2] common/mlx5: Optimize mlx5 mempool get extmem |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/intel-Functional success Functional PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS

Commit Message

Aaron Conole Oct. 10, 2023, 2:38 p.m. UTC
  From: John Romein <romein@astron.nl>

This patch reduces the time to allocate and register tens of gigabytes
of GPU memory from hours to seconds, by sorting the heap only once
instead of for each object in the mempool.

Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")

Signed-off-by: John Romein <romein@astron.nl>
---
 drivers/common/mlx5/mlx5_common_mr.c | 69 ++++++++--------------------
 1 file changed, 20 insertions(+), 49 deletions(-)
  

Comments

Slava Ovsiienko Nov. 1, 2023, 8:29 a.m. UTC | #1
Hi,

Thank you for this optimizing patch.
My concern is this line:
> +	heap = malloc(mp->size * sizeof(struct mlx5_range));
The pool size can be huge and it might cause the large memory allocation 
(on host CPU side).

What is the reason causing "hours" of registering? Reallocs per each pool element?
The mp struct has "struct rte_mempool_memhdr_list mem_list" member.
I think we should consider populating this list with data from
"struct rte_pktmbuf_extmem *ext_mem" on pool creation.

Because of it seems the rte_mempool_mem_iter() functionality is
completely broken for the pools with external memory, and that's why
mlx5 implemented the dedicated branch to handle their registration.

With best regards,
Slava

> -----Original Message-----
> From: Aaron Conole <aconole@redhat.com>
> Sent: Tuesday, October 10, 2023 5:38 PM
> To: dev@dpdk.org
> Cc: John Romein <romein@astron.nl>; Raslan Darawsheh
> <rasland@nvidia.com>; Elena Agostini <eagostini@nvidia.com>; Dmitry
> Kozlyuk <dkozlyuk@nvidia.com>; Matan Azrad <matan@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>;
> Suanming Mou <suanmingm@nvidia.com>
> Subject: [PATCH v2] common/mlx5: Optimize mlx5 mempool get extmem
> 
> From: John Romein <romein@astron.nl>
> 
> This patch reduces the time to allocate and register tens of gigabytes of GPU
> memory from hours to seconds, by sorting the heap only once instead of for
> each object in the mempool.
> 
> Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")
> 
> Signed-off-by: John Romein <romein@astron.nl>
> ---
>  drivers/common/mlx5/mlx5_common_mr.c | 69 ++++++++--------------------
>  1 file changed, 20 insertions(+), 49 deletions(-)
> 
> diff --git a/drivers/common/mlx5/mlx5_common_mr.c
> b/drivers/common/mlx5/mlx5_common_mr.c
> index 40ff9153bd..77b66e444b 100644
> --- a/drivers/common/mlx5/mlx5_common_mr.c
> +++ b/drivers/common/mlx5/mlx5_common_mr.c
> @@ -1389,63 +1389,23 @@ mlx5_mempool_get_chunks(struct
> rte_mempool *mp, struct mlx5_range **out,
>  	return 0;
>  }
> 
> -struct mlx5_mempool_get_extmem_data {
> -	struct mlx5_range *heap;
> -	unsigned int heap_size;
> -	int ret;
> -};
> -
>  static void
>  mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
>  			   void *obj, unsigned int obj_idx)
>  {
> -	struct mlx5_mempool_get_extmem_data *data = opaque;
> +	struct mlx5_range *heap = opaque;
>  	struct rte_mbuf *mbuf = obj;
>  	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
> -	struct mlx5_range *seg, *heap;
>  	struct rte_memseg_list *msl;
>  	size_t page_size;
>  	uintptr_t page_start;
> -	unsigned int pos = 0, len = data->heap_size, delta;
> 
>  	RTE_SET_USED(mp);
> -	RTE_SET_USED(obj_idx);
> -	if (data->ret < 0)
> -		return;
> -	/* Binary search for an already visited page. */
> -	while (len > 1) {
> -		delta = len / 2;
> -		if (addr < data->heap[pos + delta].start) {
> -			len = delta;
> -		} else {
> -			pos += delta;
> -			len -= delta;
> -		}
> -	}
> -	if (data->heap != NULL) {
> -		seg = &data->heap[pos];
> -		if (seg->start <= addr && addr < seg->end)
> -			return;
> -	}
> -	/* Determine the page boundaries and remember them. */
> -	heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
> -	if (heap == NULL) {
> -		free(data->heap);
> -		data->heap = NULL;
> -		data->ret = -1;
> -		return;
> -	}
> -	data->heap = heap;
> -	data->heap_size++;
> -	seg = &heap[data->heap_size - 1];
>  	msl = rte_mem_virt2memseg_list((void *)addr);
>  	page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
>  	page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
> -	seg->start = page_start;
> -	seg->end = page_start + page_size;
> -	/* Maintain the heap order. */
> -	qsort(data->heap, data->heap_size, sizeof(heap[0]),
> -	      mlx5_range_compare_start);
> +	heap[obj_idx].start = page_start;
> +	heap[obj_idx].end = page_start + page_size;
>  }
> 
>  /**
> @@ -1457,15 +1417,26 @@ static int
>  mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range
> **out,
>  			unsigned int *out_n)
>  {
> -	struct mlx5_mempool_get_extmem_data data;
> +	unsigned int out_size = 1;
> +	struct mlx5_range *heap;
> 
>  	DRV_LOG(DEBUG, "Recovering external pinned pages of mempool
> %s",
>  		mp->name);
> -	memset(&data, 0, sizeof(data));
> -	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb,
> &data);
> -	*out = data.heap;
> -	*out_n = data.heap_size;
> -	return data.ret;
> +	heap = malloc(mp->size * sizeof(struct mlx5_range));
> +	if (heap == NULL)
> +		return -1;
> +	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, heap);
> +	qsort(heap, mp->size, sizeof(heap[0]), mlx5_range_compare_start);
> +	/* remove duplicates */
> +	for (unsigned int i = 1; i < mp->size; i++)
> +		if (heap[out_size - 1].start != heap[i].start)
> +			heap[out_size++] = heap[i];
> +	heap = realloc(heap, out_size * sizeof(struct mlx5_range));
> +	if (heap == NULL)
> +		return -1;
> +	*out = heap;
> +	*out_n = out_size;
> +	return 0;
>  }
> 
>  /**
> --
> 2.41.0
  
John Romein Nov. 1, 2023, 9:21 p.m. UTC | #2
Dear Slava,

Thank you for looking at the patch.  With the original code, I saw that 
the application spent literally hours in this function during program 
start up, if tens of gigabytes of GPU memory are registered.  This was 
due to qsort being invoked for every new added item (to keep the list 
sorted).  So I tried to write equivalent code that sorts the list only 
once, after all items were added.  At least for our application, this 
works well and is /much/ faster, as the complexity decreased from n^2 
log(n) to n log(n).  But I must admit that I have no idea /what/ is 
being sorted, or why; I only understand this isolated piece of code (or 
at least I think so).  So if you think there are better ways to 
initialize the list, then I am sure you will be absolutely right.  But I 
will not be able to implement this, as I do not understand the full 
context of the code.

Kind Regards,  John

On 01-11-2023 09:29, Slava Ovsiienko wrote:
> Hi,
>
> Thank you for this optimizing patch.
> My concern is this line:
>> +	heap = malloc(mp->size * sizeof(struct mlx5_range));
> The pool size can be huge and it might cause the large memory allocation
> (on host CPU side).
>
> What is the reason causing "hours" of registering? Reallocs per each pool element?
> The mp struct has "struct rte_mempool_memhdr_list mem_list" member.
> I think we should consider populating this list with data from
> "struct rte_pktmbuf_extmem *ext_mem" on pool creation.
>
> Because of it seems the rte_mempool_mem_iter() functionality is
> completely broken for the pools with external memory, and that's why
> mlx5 implemented the dedicated branch to handle their registration.
>
> With best regards,
> Slava
>
>> -----Original Message-----
>> From: Aaron Conole<aconole@redhat.com>
>> Sent: Tuesday, October 10, 2023 5:38 PM
>> To:dev@dpdk.org
>> Cc: John Romein<romein@astron.nl>; Raslan Darawsheh
>> <rasland@nvidia.com>; Elena Agostini<eagostini@nvidia.com>; Dmitry
>> Kozlyuk<dkozlyuk@nvidia.com>; Matan Azrad<matan@nvidia.com>; Slava
>> Ovsiienko<viacheslavo@nvidia.com>; Ori Kam<orika@nvidia.com>;
>> Suanming Mou<suanmingm@nvidia.com>
>> Subject: [PATCH v2] common/mlx5: Optimize mlx5 mempool get extmem
>>
>> From: John Romein<romein@astron.nl>
>>
>> This patch reduces the time to allocate and register tens of gigabytes of GPU
>> memory from hours to seconds, by sorting the heap only once instead of for
>> each object in the mempool.
>>
>> Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")
>>
>> Signed-off-by: John Romein<romein@astron.nl>
>> ---
>>   drivers/common/mlx5/mlx5_common_mr.c | 69 ++++++++--------------------
>>   1 file changed, 20 insertions(+), 49 deletions(-)
>>
>> diff --git a/drivers/common/mlx5/mlx5_common_mr.c
>> b/drivers/common/mlx5/mlx5_common_mr.c
>> index 40ff9153bd..77b66e444b 100644
>> --- a/drivers/common/mlx5/mlx5_common_mr.c
>> +++ b/drivers/common/mlx5/mlx5_common_mr.c
>> @@ -1389,63 +1389,23 @@ mlx5_mempool_get_chunks(struct
>> rte_mempool *mp, struct mlx5_range **out,
>>   	return 0;
>>   }
>>
>> -struct mlx5_mempool_get_extmem_data {
>> -	struct mlx5_range *heap;
>> -	unsigned int heap_size;
>> -	int ret;
>> -};
>> -
>>   static void
>>   mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
>>   			   void *obj, unsigned int obj_idx)
>>   {
>> -	struct mlx5_mempool_get_extmem_data *data = opaque;
>> +	struct mlx5_range *heap = opaque;
>>   	struct rte_mbuf *mbuf = obj;
>>   	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
>> -	struct mlx5_range *seg, *heap;
>>   	struct rte_memseg_list *msl;
>>   	size_t page_size;
>>   	uintptr_t page_start;
>> -	unsigned int pos = 0, len = data->heap_size, delta;
>>
>>   	RTE_SET_USED(mp);
>> -	RTE_SET_USED(obj_idx);
>> -	if (data->ret < 0)
>> -		return;
>> -	/* Binary search for an already visited page. */
>> -	while (len > 1) {
>> -		delta = len / 2;
>> -		if (addr < data->heap[pos + delta].start) {
>> -			len = delta;
>> -		} else {
>> -			pos += delta;
>> -			len -= delta;
>> -		}
>> -	}
>> -	if (data->heap != NULL) {
>> -		seg = &data->heap[pos];
>> -		if (seg->start <= addr && addr < seg->end)
>> -			return;
>> -	}
>> -	/* Determine the page boundaries and remember them. */
>> -	heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
>> -	if (heap == NULL) {
>> -		free(data->heap);
>> -		data->heap = NULL;
>> -		data->ret = -1;
>> -		return;
>> -	}
>> -	data->heap = heap;
>> -	data->heap_size++;
>> -	seg = &heap[data->heap_size - 1];
>>   	msl = rte_mem_virt2memseg_list((void *)addr);
>>   	page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
>>   	page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
>> -	seg->start = page_start;
>> -	seg->end = page_start + page_size;
>> -	/* Maintain the heap order. */
>> -	qsort(data->heap, data->heap_size, sizeof(heap[0]),
>> -	      mlx5_range_compare_start);
>> +	heap[obj_idx].start = page_start;
>> +	heap[obj_idx].end = page_start + page_size;
>>   }
>>
>>   /**
>> @@ -1457,15 +1417,26 @@ static int
>>   mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range
>> **out,
>>   			unsigned int *out_n)
>>   {
>> -	struct mlx5_mempool_get_extmem_data data;
>> +	unsigned int out_size = 1;
>> +	struct mlx5_range *heap;
>>
>>   	DRV_LOG(DEBUG, "Recovering external pinned pages of mempool
>> %s",
>>   		mp->name);
>> -	memset(&data, 0, sizeof(data));
>> -	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb,
>> &data);
>> -	*out = data.heap;
>> -	*out_n = data.heap_size;
>> -	return data.ret;
>> +	heap = malloc(mp->size * sizeof(struct mlx5_range));
>> +	if (heap == NULL)
>> +		return -1;
>> +	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, heap);
>> +	qsort(heap, mp->size, sizeof(heap[0]), mlx5_range_compare_start);
>> +	/* remove duplicates */
>> +	for (unsigned int i = 1; i < mp->size; i++)
>> +		if (heap[out_size - 1].start != heap[i].start)
>> +			heap[out_size++] = heap[i];
>> +	heap = realloc(heap, out_size * sizeof(struct mlx5_range));
>> +	if (heap == NULL)
>> +		return -1;
>> +	*out = heap;
>> +	*out_n = out_size;
>> +	return 0;
>>   }
>>
>>   /**
>> --
>> 2.41.0
  

Patch

diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 40ff9153bd..77b66e444b 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -1389,63 +1389,23 @@  mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
 	return 0;
 }
 
-struct mlx5_mempool_get_extmem_data {
-	struct mlx5_range *heap;
-	unsigned int heap_size;
-	int ret;
-};
-
 static void
 mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
 			   void *obj, unsigned int obj_idx)
 {
-	struct mlx5_mempool_get_extmem_data *data = opaque;
+	struct mlx5_range *heap = opaque;
 	struct rte_mbuf *mbuf = obj;
 	uintptr_t addr = (uintptr_t)mbuf->buf_addr;
-	struct mlx5_range *seg, *heap;
 	struct rte_memseg_list *msl;
 	size_t page_size;
 	uintptr_t page_start;
-	unsigned int pos = 0, len = data->heap_size, delta;
 
 	RTE_SET_USED(mp);
-	RTE_SET_USED(obj_idx);
-	if (data->ret < 0)
-		return;
-	/* Binary search for an already visited page. */
-	while (len > 1) {
-		delta = len / 2;
-		if (addr < data->heap[pos + delta].start) {
-			len = delta;
-		} else {
-			pos += delta;
-			len -= delta;
-		}
-	}
-	if (data->heap != NULL) {
-		seg = &data->heap[pos];
-		if (seg->start <= addr && addr < seg->end)
-			return;
-	}
-	/* Determine the page boundaries and remember them. */
-	heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
-	if (heap == NULL) {
-		free(data->heap);
-		data->heap = NULL;
-		data->ret = -1;
-		return;
-	}
-	data->heap = heap;
-	data->heap_size++;
-	seg = &heap[data->heap_size - 1];
 	msl = rte_mem_virt2memseg_list((void *)addr);
 	page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
 	page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
-	seg->start = page_start;
-	seg->end = page_start + page_size;
-	/* Maintain the heap order. */
-	qsort(data->heap, data->heap_size, sizeof(heap[0]),
-	      mlx5_range_compare_start);
+	heap[obj_idx].start = page_start;
+	heap[obj_idx].end = page_start + page_size;
 }
 
 /**
@@ -1457,15 +1417,26 @@  static int
 mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
 			unsigned int *out_n)
 {
-	struct mlx5_mempool_get_extmem_data data;
+	unsigned int out_size = 1;
+	struct mlx5_range *heap;
 
 	DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
 		mp->name);
-	memset(&data, 0, sizeof(data));
-	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
-	*out = data.heap;
-	*out_n = data.heap_size;
-	return data.ret;
+	heap = malloc(mp->size * sizeof(struct mlx5_range));
+	if (heap == NULL)
+		return -1;
+	rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, heap);
+	qsort(heap, mp->size, sizeof(heap[0]), mlx5_range_compare_start);
+	/* remove duplicates */
+	for (unsigned int i = 1; i < mp->size; i++)
+		if (heap[out_size - 1].start != heap[i].start)
+			heap[out_size++] = heap[i];
+	heap = realloc(heap, out_size * sizeof(struct mlx5_range));
+	if (heap == NULL)
+		return -1;
+	*out = heap;
+	*out_n = out_size;
+	return 0;
 }
 
 /**