[dpdk-dev,RFC,v2,13/23] eal: make use of dynamic memory allocation for init

Message ID a345b702ab7403f344e4b28674c7c9e78a61030d.1513681966.git.anatoly.burakov@intel.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Burakov, Anatoly Dec. 19, 2017, 11:14 a.m. UTC
  Add a new (non-legacy) memory init path for EAL. It uses the
new dynamic allocation facilities, although it's only being run
at startup.

If no -m or --socket-mem switches were specified, the new init
will not allocate anything, whereas if those switches were passed,
appropriate amounts of pages would be requested, just like for
legacy init.

Since rte_malloc support for dynamic allocation comes in later
patches, running DPDK without --socket-mem or -m switches will
fail in this patch.

Also, allocated pages will be physically discontiguous (or rather,
they're not guaranteed to be physically contiguous - they may still
be, by accident) unless IOVA_AS_VA mode is used.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_memory.c | 60 ++++++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)
  

Patch

diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 59f6889..7cc4a55 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -68,6 +68,7 @@ 
 #include <rte_string_fns.h>
 
 #include "eal_private.h"
+#include "eal_memalloc.h"
 #include "eal_internal_cfg.h"
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
@@ -1322,6 +1323,61 @@  eal_legacy_hugepage_init(void)
 	return -1;
 }
 
+static int
+eal_hugepage_init(void) {
+	struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+	uint64_t memory[RTE_MAX_NUMA_NODES];
+	int hp_sz_idx, socket_id;
+
+	test_phys_addrs_available();
+
+	memset(used_hp, 0, sizeof(used_hp));
+
+	for (hp_sz_idx = 0;
+			hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+			hp_sz_idx++) {
+		/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
+		struct hugepage_info *hpi;
+		hpi = &internal_config.hugepage_info[hp_sz_idx];
+		used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+	}
+
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+		memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
+
+	/* calculate final number of pages */
+	if (calc_num_pages_per_socket(memory,
+			internal_config.hugepage_info, used_hp,
+			internal_config.num_hugepage_sizes) < 0)
+		return -1;
+
+	for (int hp_sz_idx = 0;
+			hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+			hp_sz_idx++) {
+		for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
+				socket_id++) {
+			struct hugepage_info *hpi = &used_hp[hp_sz_idx];
+			unsigned num_pages = hpi->num_pages[socket_id];
+			int num_pages_alloc;
+
+			if (num_pages == 0)
+				continue;
+
+			RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %luM on socket %i\n",
+				num_pages, hpi->hugepage_sz >> 20, socket_id);
+
+			num_pages_alloc = eal_memalloc_alloc_page_bulk(NULL,
+					num_pages,
+					hpi->hugepage_sz, socket_id,
+					true);
+			if (num_pages_alloc < 0)
+				return -1;
+		}
+	}
+	return 0;
+}
+
 /*
  * uses fstat to report the size of a file on disk
  */
@@ -1533,6 +1589,8 @@  int
 rte_eal_hugepage_init(void) {
 	if (internal_config.legacy_mem)
 		return eal_legacy_hugepage_init();
+	else
+		return eal_hugepage_init();
 	return -1;
 }
 
@@ -1540,6 +1598,8 @@  int
 rte_eal_hugepage_attach(void) {
 	if (internal_config.legacy_mem)
 		return eal_legacy_hugepage_attach();
+	else
+		RTE_LOG(ERR, EAL, "Secondary processes aren't supported yet\n");
 	return -1;
 }