[dpdk-dev,2/3] mem: improve memory preallocation on 32-bit

Message ID 9bd2877ea94d32f99ea6d17afb6e5f16f4340649.1524235066.git.anatoly.burakov@intel.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Anatoly Burakov April 20, 2018, 2:41 p.m. UTC
  Previously, if we couldn't preallocate VA space on 32-bit for
one page size, we simply bailed out, even though we could've
tried allocating VA space with other page sizes.

For example, if user had both 1G and 2M pages enabled, and
has asked DPDK to allocate memory on both sockets, DPDK
would've tried to allocate VA space for 1x1G page on both
sockets, failed and never tried again, even though it
could've allocated the same 1G of VA space for 512x2M pages.

Fix this by retrying with different page sizes if VA space
reservation failed.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/common/eal_common_memory.c | 42 +++++++++++++++++++++++++------
 1 file changed, 35 insertions(+), 7 deletions(-)
  

Patch

diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index c0d4673..d819abe 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -142,6 +142,17 @@  get_mem_amount(uint64_t page_sz, uint64_t max_mem)
 }
 
 static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+	if (rte_fbarray_destroy(&msl->memseg_arr)) {
+		RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+		return -1;
+	}
+	memset(msl, 0, sizeof(*msl));
+	return 0;
+}
+
+static int
 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
 		uint64_t max_mem, int socket_id, int type_msl_idx)
 {
@@ -339,24 +350,41 @@  memseg_primary_init_32(void)
 					return -1;
 				}
 
-				msl = &mcfg->memsegs[msl_idx++];
+				msl = &mcfg->memsegs[msl_idx];
 
 				if (alloc_memseg_list(msl, hugepage_sz,
 						max_pagesz_mem, socket_id,
-						type_msl_idx))
+						type_msl_idx)) {
+					/* failing to allocate a memseg list is
+					 * a serious error.
+					 */
+					RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
 					return -1;
+				}
+
+				if (alloc_va_space(msl)) {
+					/* if we couldn't allocate VA space, we
+					 * can try with smaller page sizes.
+					 */
+					RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+					/* deallocate memseg list */
+					if (free_memseg_list(msl))
+						return -1;
+					break;
+				}
 
 				total_segs += msl->memseg_arr.len;
 				cur_pagesz_mem = total_segs * hugepage_sz;
 				type_msl_idx++;
-
-				if (alloc_va_space(msl)) {
-					RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
-					return -1;
-				}
+				msl_idx++;
 			}
 			cur_socket_mem += cur_pagesz_mem;
 		}
+		if (cur_socket_mem == 0) {
+			RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+				socket_id);
+			return -1;
+		}
 	}
 
 	return 0;