get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/71525/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 71525,
    "url": "http://patches.dpdk.org/api/patches/71525/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200615004354.14380-6-dmitry.kozliuk@gmail.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200615004354.14380-6-dmitry.kozliuk@gmail.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200615004354.14380-6-dmitry.kozliuk@gmail.com",
    "date": "2020-06-15T00:43:47",
    "name": "[v9,05/12] eal/mem: extract common code for dynamic memory allocation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a9d17bb4f2e39c7f5601dcb5157aa4ddecbfa54f",
    "submitter": {
        "id": 1581,
        "url": "http://patches.dpdk.org/api/people/1581/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dmitry.kozliuk@gmail.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200615004354.14380-6-dmitry.kozliuk@gmail.com/mbox/",
    "series": [
        {
            "id": 10455,
            "url": "http://patches.dpdk.org/api/series/10455/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=10455",
            "date": "2020-06-15T00:43:42",
            "name": "Windows basic memory management",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/10455/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/71525/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/71525/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D5FBBA04A3;\n\tMon, 15 Jun 2020 02:45:04 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 98AA21BC24;\n\tMon, 15 Jun 2020 02:44:24 +0200 (CEST)",
            "from mail-lj1-f196.google.com (mail-lj1-f196.google.com\n [209.85.208.196]) by dpdk.org (Postfix) with ESMTP id E9ED04C9D\n for <dev@dpdk.org>; Mon, 15 Jun 2020 02:44:09 +0200 (CEST)",
            "by mail-lj1-f196.google.com with SMTP id i27so17049059ljb.12\n for <dev@dpdk.org>; Sun, 14 Jun 2020 17:44:09 -0700 (PDT)",
            "from localhost.localdomain (broadband-37-110-65-23.ip.moscow.rt.ru.\n [37.110.65.23])\n by smtp.gmail.com with ESMTPSA id f19sm4176342lfk.24.2020.06.14.17.44.07\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Sun, 14 Jun 2020 17:44:07 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=VYQ5p2nPpBR5DJvILXrUnlOfJmOiXgnGWSaG5+Zoefw=;\n b=mmWrXxUvie4KuuLm7B2kRcnWeGUSKit2s9CsFbe9MBHNJDjyCytCzUT+XsKuzsrR0k\n +J7N0YNM/uxuN99SliAxxRdxZOqkX/NhZjff4WrW/MozhtY8szf4NIowd3NIl25CEer6\n Yh55l0JbzMsjKWuoj45BfPLciGtY+7UHLPQtBP6LlJrNK9QKYn9tT7DNUQuJWZ47bmT0\n EGDTxrTHoCm3u1FuJLEmZjVlICCmL3dAl+yF1Ih19irhO1GuMzaL49An7zk+9Rr8Qim7\n JJoN8gN0lIJ2TA0qC01fK1wCiv6FNx+ZKFwtOHC6CV2yw+bdF+JsXv7STp+fH5Pv9YCr\n BMCQ==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=VYQ5p2nPpBR5DJvILXrUnlOfJmOiXgnGWSaG5+Zoefw=;\n b=RgMOQs2etD/bVxMUE5XazFGcIc00XEzrG3ZCEZ7jfuhlKHze/eqkI0yJiEZccfSFcL\n +5nk4NugKVGEoviXs7baO5XtKHL6Qrr+aCLJ0EGJT8So5EZ3SAHxKiJifxO+xJO+IYb9\n paWq/0teiKwVoK0OdqUx6FC7zHGftUnxFcJ/J45X17EBfg2F2CbFuapeJnvH22dWaUJT\n akoDbuPNCLMvmLG/srtLJTRYdzvxme17MXq1+TWjAI/YXiUC9XOX0vcIXQoJySUuNyQn\n dseCacPKqWs5/3d161j391MLmuxV+lSmAQ7wM8KpdyQlXULgmkavxhO97zXKZVZsAbe0\n FZpw==",
        "X-Gm-Message-State": "AOAM530k4M8tAGBzFdBsuO8kI3jpBdCUy89YhamjKgkFKgmFzNRRzGWR\n s0UiN5T1FGfz3GkfGJG4VZUos7BEz2K5/w==",
        "X-Google-Smtp-Source": "\n ABdhPJy6QwR6jlTrxYJAwtbbqxQIgo71nSgNTz63hBdS1Ek4iIRUgEn+kqD7gZdDgPqJ/DEjKcuj4g==",
        "X-Received": "by 2002:a05:651c:484:: with SMTP id\n s4mr10797288ljc.381.1592181848330;\n Sun, 14 Jun 2020 17:44:08 -0700 (PDT)",
        "From": "Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>",
        "To": "dev@dpdk.org",
        "Cc": "Dmitry Malloy <dmitrym@microsoft.com>,\n Narcisa Ana Maria Vasile <Narcisa.Vasile@microsoft.com>,\n Fady Bader <fady@mellanox.com>, Tal Shnaiderman <talshn@mellanox.com>,\n Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Bruce Richardson <bruce.richardson@intel.com>",
        "Date": "Mon, 15 Jun 2020 03:43:47 +0300",
        "Message-Id": "<20200615004354.14380-6-dmitry.kozliuk@gmail.com>",
        "X-Mailer": "git-send-email 2.25.4",
        "In-Reply-To": "<20200615004354.14380-1-dmitry.kozliuk@gmail.com>",
        "References": "<20200610142730.31376-1-dmitry.kozliuk@gmail.com>\n <20200615004354.14380-1-dmitry.kozliuk@gmail.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v9 05/12] eal/mem: extract common code for\n\tdynamic memory allocation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Code in Linux EAL that supports dynamic memory allocation (as opposed to\nstatic allocation used by FreeBSD) is not OS-dependent and can be reused\nby Windows EAL. Move such code to a file compiled only for the OS that\nrequire it. Keep Anatoly Burakov maintainer of extracted code.\n\nSigned-off-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>\n---\n MAINTAINERS                               |   1 +\n lib/librte_eal/common/eal_common_dynmem.c | 521 +++++++++++++++++++++\n lib/librte_eal/common/eal_private.h       |  43 +-\n lib/librte_eal/common/meson.build         |   4 +\n lib/librte_eal/freebsd/eal_memory.c       |  12 +-\n lib/librte_eal/linux/Makefile             |   1 +\n lib/librte_eal/linux/eal_memory.c         | 523 +---------------------\n 7 files changed, 582 insertions(+), 523 deletions(-)\n create mode 100644 lib/librte_eal/common/eal_common_dynmem.c",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 4d162efd6..241dbc3d7 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -209,6 +209,7 @@ F: lib/librte_eal/include/rte_fbarray.h\n F: lib/librte_eal/include/rte_mem*\n F: lib/librte_eal/include/rte_malloc.h\n F: lib/librte_eal/common/*malloc*\n+F: lib/librte_eal/common/eal_common_dynmem.c\n F: lib/librte_eal/common/eal_common_fbarray.c\n F: lib/librte_eal/common/eal_common_mem*\n F: lib/librte_eal/common/eal_hugepages.h\ndiff --git a/lib/librte_eal/common/eal_common_dynmem.c b/lib/librte_eal/common/eal_common_dynmem.c\nnew file mode 100644\nindex 000000000..6b07672d0\n--- /dev/null\n+++ b/lib/librte_eal/common/eal_common_dynmem.c\n@@ -0,0 +1,521 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2014 Intel Corporation.\n+ * Copyright(c) 2013 6WIND S.A.\n+ */\n+\n+#include <inttypes.h>\n+#include <string.h>\n+\n+#include <rte_log.h>\n+#include <rte_string_fns.h>\n+\n+#include \"eal_internal_cfg.h\"\n+#include \"eal_memalloc.h\"\n+#include \"eal_memcfg.h\"\n+#include \"eal_private.h\"\n+\n+/** @file Functions common to EALs that support dynamic memory allocation. */\n+\n+int\n+eal_dynmem_memseg_lists_init(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tstruct memtype {\n+\t\tuint64_t page_sz;\n+\t\tint socket_id;\n+\t} *memtypes = NULL;\n+\tint i, hpi_idx, msl_idx, ret = -1; /* fail unless told to succeed */\n+\tstruct rte_memseg_list *msl;\n+\tuint64_t max_mem, max_mem_per_type;\n+\tunsigned int max_seglists_per_type;\n+\tunsigned int n_memtypes, cur_type;\n+\n+\t/* no-huge does not need this at all */\n+\tif (internal_config.no_hugetlbfs)\n+\t\treturn 0;\n+\n+\t/*\n+\t * figuring out amount of memory we're going to have is a long and very\n+\t * involved process. the basic element we're operating with is a memory\n+\t * type, defined as a combination of NUMA node ID and page size (so that\n+\t * e.g. 2 sockets with 2 page sizes yield 4 memory types in total).\n+\t *\n+\t * deciding amount of memory going towards each memory type is a\n+\t * balancing act between maximum segments per type, maximum memory per\n+\t * type, and number of detected NUMA nodes. the goal is to make sure\n+\t * each memory type gets at least one memseg list.\n+\t *\n+\t * the total amount of memory is limited by RTE_MAX_MEM_MB value.\n+\t *\n+\t * the total amount of memory per type is limited by either\n+\t * RTE_MAX_MEM_MB_PER_TYPE, or by RTE_MAX_MEM_MB divided by the number\n+\t * of detected NUMA nodes. additionally, maximum number of segments per\n+\t * type is also limited by RTE_MAX_MEMSEG_PER_TYPE. this is because for\n+\t * smaller page sizes, it can take hundreds of thousands of segments to\n+\t * reach the above specified per-type memory limits.\n+\t *\n+\t * additionally, each type may have multiple memseg lists associated\n+\t * with it, each limited by either RTE_MAX_MEM_MB_PER_LIST for bigger\n+\t * page sizes, or RTE_MAX_MEMSEG_PER_LIST segments for smaller ones.\n+\t *\n+\t * the number of memseg lists per type is decided based on the above\n+\t * limits, and also taking number of detected NUMA nodes, to make sure\n+\t * that we don't run out of memseg lists before we populate all NUMA\n+\t * nodes with memory.\n+\t *\n+\t * we do this in three stages. first, we collect the number of types.\n+\t * then, we figure out memory constraints and populate the list of\n+\t * would-be memseg lists. then, we go ahead and allocate the memseg\n+\t * lists.\n+\t */\n+\n+\t/* create space for mem types */\n+\tn_memtypes = internal_config.num_hugepage_sizes * rte_socket_count();\n+\tmemtypes = calloc(n_memtypes, sizeof(*memtypes));\n+\tif (memtypes == NULL) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot allocate space for memory types\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* populate mem types */\n+\tcur_type = 0;\n+\tfor (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;\n+\t\t\thpi_idx++) {\n+\t\tstruct hugepage_info *hpi;\n+\t\tuint64_t hugepage_sz;\n+\n+\t\thpi = &internal_config.hugepage_info[hpi_idx];\n+\t\thugepage_sz = hpi->hugepage_sz;\n+\n+\t\tfor (i = 0; i < (int) rte_socket_count(); i++, cur_type++) {\n+\t\t\tint socket_id = rte_socket_id_by_idx(i);\n+\n+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n+\t\t\t/* we can still sort pages by socket in legacy mode */\n+\t\t\tif (!internal_config.legacy_mem && socket_id > 0)\n+\t\t\t\tbreak;\n+#endif\n+\t\t\tmemtypes[cur_type].page_sz = hugepage_sz;\n+\t\t\tmemtypes[cur_type].socket_id = socket_id;\n+\n+\t\t\tRTE_LOG(DEBUG, EAL, \"Detected memory type: \"\n+\t\t\t\t\"socket_id:%u hugepage_sz:%\" PRIu64 \"\\n\",\n+\t\t\t\tsocket_id, hugepage_sz);\n+\t\t}\n+\t}\n+\t/* number of memtypes could have been lower due to no NUMA support */\n+\tn_memtypes = cur_type;\n+\n+\t/* set up limits for types */\n+\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n+\tmax_mem_per_type = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20,\n+\t\t\tmax_mem / n_memtypes);\n+\t/*\n+\t * limit maximum number of segment lists per type to ensure there's\n+\t * space for memseg lists for all NUMA nodes with all page sizes\n+\t */\n+\tmax_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes;\n+\n+\tif (max_seglists_per_type == 0) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot accommodate all memory types, please increase %s\\n\",\n+\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n+\t\tgoto out;\n+\t}\n+\n+\t/* go through all mem types and create segment lists */\n+\tmsl_idx = 0;\n+\tfor (cur_type = 0; cur_type < n_memtypes; cur_type++) {\n+\t\tunsigned int cur_seglist, n_seglists, n_segs;\n+\t\tunsigned int max_segs_per_type, max_segs_per_list;\n+\t\tstruct memtype *type = &memtypes[cur_type];\n+\t\tuint64_t max_mem_per_list, pagesz;\n+\t\tint socket_id;\n+\n+\t\tpagesz = type->page_sz;\n+\t\tsocket_id = type->socket_id;\n+\n+\t\t/*\n+\t\t * we need to create segment lists for this type. we must take\n+\t\t * into account the following things:\n+\t\t *\n+\t\t * 1. total amount of memory we can use for this memory type\n+\t\t * 2. total amount of memory per memseg list allowed\n+\t\t * 3. number of segments needed to fit the amount of memory\n+\t\t * 4. number of segments allowed per type\n+\t\t * 5. number of segments allowed per memseg list\n+\t\t * 6. number of memseg lists we are allowed to take up\n+\t\t */\n+\n+\t\t/* calculate how much segments we will need in total */\n+\t\tmax_segs_per_type = max_mem_per_type / pagesz;\n+\t\t/* limit number of segments to maximum allowed per type */\n+\t\tmax_segs_per_type = RTE_MIN(max_segs_per_type,\n+\t\t\t\t(unsigned int)RTE_MAX_MEMSEG_PER_TYPE);\n+\t\t/* limit number of segments to maximum allowed per list */\n+\t\tmax_segs_per_list = RTE_MIN(max_segs_per_type,\n+\t\t\t\t(unsigned int)RTE_MAX_MEMSEG_PER_LIST);\n+\n+\t\t/* calculate how much memory we can have per segment list */\n+\t\tmax_mem_per_list = RTE_MIN(max_segs_per_list * pagesz,\n+\t\t\t\t(uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20);\n+\n+\t\t/* calculate how many segments each segment list will have */\n+\t\tn_segs = RTE_MIN(max_segs_per_list, max_mem_per_list / pagesz);\n+\n+\t\t/* calculate how many segment lists we can have */\n+\t\tn_seglists = RTE_MIN(max_segs_per_type / n_segs,\n+\t\t\t\tmax_mem_per_type / max_mem_per_list);\n+\n+\t\t/* limit number of segment lists according to our maximum */\n+\t\tn_seglists = RTE_MIN(n_seglists, max_seglists_per_type);\n+\n+\t\tRTE_LOG(DEBUG, EAL, \"Creating %i segment lists: \"\n+\t\t\t\t\"n_segs:%i socket_id:%i hugepage_sz:%\" PRIu64 \"\\n\",\n+\t\t\tn_seglists, n_segs, socket_id, pagesz);\n+\n+\t\t/* create all segment lists */\n+\t\tfor (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {\n+\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n+\t\t\t\tRTE_LOG(ERR, EAL,\n+\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n+\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\t\t\tmsl = &mcfg->memsegs[msl_idx++];\n+\n+\t\t\tif (eal_memseg_list_init(msl, pagesz, n_segs,\n+\t\t\t\t\tsocket_id, cur_seglist, true))\n+\t\t\t\tgoto out;\n+\n+\t\t\tif (eal_memseg_list_alloc(msl, 0)) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list\\n\");\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\t\t}\n+\t}\n+\t/* we're successful */\n+\tret = 0;\n+out:\n+\tfree(memtypes);\n+\treturn ret;\n+}\n+\n+static int __rte_unused\n+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)\n+{\n+\tstruct hugepage_info *hpi = arg;\n+\n+\tif (msl->page_sz != hpi->hugepage_sz)\n+\t\treturn 0;\n+\n+\thpi->num_pages[msl->socket_id] += msl->memseg_arr.len;\n+\treturn 0;\n+}\n+\n+static int\n+limits_callback(int socket_id, size_t cur_limit, size_t new_len)\n+{\n+\tRTE_SET_USED(socket_id);\n+\tRTE_SET_USED(cur_limit);\n+\tRTE_SET_USED(new_len);\n+\treturn -1;\n+}\n+\n+int\n+eal_dynmem_hugepage_init(void)\n+{\n+\tstruct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];\n+\tuint64_t memory[RTE_MAX_NUMA_NODES];\n+\tint hp_sz_idx, socket_id;\n+\n+\tmemset(used_hp, 0, sizeof(used_hp));\n+\n+\tfor (hp_sz_idx = 0;\n+\t\t\thp_sz_idx < (int) internal_config.num_hugepage_sizes;\n+\t\t\thp_sz_idx++) {\n+#ifndef RTE_ARCH_64\n+\t\tstruct hugepage_info dummy;\n+\t\tunsigned int i;\n+#endif\n+\t\t/* also initialize used_hp hugepage sizes in used_hp */\n+\t\tstruct hugepage_info *hpi;\n+\t\thpi = &internal_config.hugepage_info[hp_sz_idx];\n+\t\tused_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;\n+\n+#ifndef RTE_ARCH_64\n+\t\t/* for 32-bit, limit number of pages on socket to whatever we've\n+\t\t * preallocated, as we cannot allocate more.\n+\t\t */\n+\t\tmemset(&dummy, 0, sizeof(dummy));\n+\t\tdummy.hugepage_sz = hpi->hugepage_sz;\n+\t\tif (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)\n+\t\t\treturn -1;\n+\n+\t\tfor (i = 0; i < RTE_DIM(dummy.num_pages); i++) {\n+\t\t\thpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],\n+\t\t\t\t\tdummy.num_pages[i]);\n+\t\t}\n+#endif\n+\t}\n+\n+\t/* make a copy of socket_mem, needed for balanced allocation. */\n+\tfor (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)\n+\t\tmemory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];\n+\n+\t/* calculate final number of pages */\n+\tif (eal_dynmem_calc_num_pages_per_socket(memory,\n+\t\t\tinternal_config.hugepage_info, used_hp,\n+\t\t\tinternal_config.num_hugepage_sizes) < 0)\n+\t\treturn -1;\n+\n+\tfor (hp_sz_idx = 0;\n+\t\t\thp_sz_idx < (int)internal_config.num_hugepage_sizes;\n+\t\t\thp_sz_idx++) {\n+\t\tfor (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;\n+\t\t\t\tsocket_id++) {\n+\t\t\tstruct rte_memseg **pages;\n+\t\t\tstruct hugepage_info *hpi = &used_hp[hp_sz_idx];\n+\t\t\tunsigned int num_pages = hpi->num_pages[socket_id];\n+\t\t\tunsigned int num_pages_alloc;\n+\n+\t\t\tif (num_pages == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tRTE_LOG(DEBUG, EAL,\n+\t\t\t\t\"Allocating %u pages of size %\" PRIu64 \"M \"\n+\t\t\t\t\"on socket %i\\n\",\n+\t\t\t\tnum_pages, hpi->hugepage_sz >> 20, socket_id);\n+\n+\t\t\t/* we may not be able to allocate all pages in one go,\n+\t\t\t * because we break up our memory map into multiple\n+\t\t\t * memseg lists. therefore, try allocating multiple\n+\t\t\t * times and see if we can get the desired number of\n+\t\t\t * pages from multiple allocations.\n+\t\t\t */\n+\n+\t\t\tnum_pages_alloc = 0;\n+\t\t\tdo {\n+\t\t\t\tint i, cur_pages, needed;\n+\n+\t\t\t\tneeded = num_pages - num_pages_alloc;\n+\n+\t\t\t\tpages = malloc(sizeof(*pages) * needed);\n+\n+\t\t\t\t/* do not request exact number of pages */\n+\t\t\t\tcur_pages = eal_memalloc_alloc_seg_bulk(pages,\n+\t\t\t\t\t\tneeded, hpi->hugepage_sz,\n+\t\t\t\t\t\tsocket_id, false);\n+\t\t\t\tif (cur_pages <= 0) {\n+\t\t\t\t\tfree(pages);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\n+\t\t\t\t/* mark preallocated pages as unfreeable */\n+\t\t\t\tfor (i = 0; i < cur_pages; i++) {\n+\t\t\t\t\tstruct rte_memseg *ms = pages[i];\n+\t\t\t\t\tms->flags |=\n+\t\t\t\t\t\tRTE_MEMSEG_FLAG_DO_NOT_FREE;\n+\t\t\t\t}\n+\t\t\t\tfree(pages);\n+\n+\t\t\t\tnum_pages_alloc += cur_pages;\n+\t\t\t} while (num_pages_alloc != num_pages);\n+\t\t}\n+\t}\n+\n+\t/* if socket limits were specified, set them */\n+\tif (internal_config.force_socket_limits) {\n+\t\tunsigned int i;\n+\t\tfor (i = 0; i < RTE_MAX_NUMA_NODES; i++) {\n+\t\t\tuint64_t limit = internal_config.socket_limit[i];\n+\t\t\tif (limit == 0)\n+\t\t\t\tcontinue;\n+\t\t\tif (rte_mem_alloc_validator_register(\"socket-limit\",\n+\t\t\t\t\tlimits_callback, i, limit))\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Failed to register socket limits validator callback\\n\");\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+__rte_unused /* function is unused on 32-bit builds */\n+static inline uint64_t\n+get_socket_mem_size(int socket)\n+{\n+\tuint64_t size = 0;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < internal_config.num_hugepage_sizes; i++) {\n+\t\tstruct hugepage_info *hpi = &internal_config.hugepage_info[i];\n+\t\tsize += hpi->hugepage_sz * hpi->num_pages[socket];\n+\t}\n+\n+\treturn size;\n+}\n+\n+int\n+eal_dynmem_calc_num_pages_per_socket(\n+\tuint64_t *memory, struct hugepage_info *hp_info,\n+\tstruct hugepage_info *hp_used, unsigned int num_hp_info)\n+{\n+\tunsigned int socket, j, i = 0;\n+\tunsigned int requested, available;\n+\tint total_num_pages = 0;\n+\tuint64_t remaining_mem, cur_mem;\n+\tuint64_t total_mem = internal_config.memory;\n+\n+\tif (num_hp_info == 0)\n+\t\treturn -1;\n+\n+\t/* if specific memory amounts per socket weren't requested */\n+\tif (internal_config.force_sockets == 0) {\n+\t\tsize_t total_size;\n+#ifdef RTE_ARCH_64\n+\t\tint cpu_per_socket[RTE_MAX_NUMA_NODES];\n+\t\tsize_t default_size;\n+\t\tunsigned int lcore_id;\n+\n+\t\t/* Compute number of cores per socket */\n+\t\tmemset(cpu_per_socket, 0, sizeof(cpu_per_socket));\n+\t\tRTE_LCORE_FOREACH(lcore_id) {\n+\t\t\tcpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;\n+\t\t}\n+\n+\t\t/*\n+\t\t * Automatically spread requested memory amongst detected\n+\t\t * sockets according to number of cores from CPU mask present\n+\t\t * on each socket.\n+\t\t */\n+\t\ttotal_size = internal_config.memory;\n+\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;\n+\t\t\t\tsocket++) {\n+\n+\t\t\t/* Set memory amount per socket */\n+\t\t\tdefault_size = internal_config.memory *\n+\t\t\t\tcpu_per_socket[socket] / rte_lcore_count();\n+\n+\t\t\t/* Limit to maximum available memory on socket */\n+\t\t\tdefault_size = RTE_MIN(\n+\t\t\t\tdefault_size, get_socket_mem_size(socket));\n+\n+\t\t\t/* Update sizes */\n+\t\t\tmemory[socket] = default_size;\n+\t\t\ttotal_size -= default_size;\n+\t\t}\n+\n+\t\t/*\n+\t\t * If some memory is remaining, try to allocate it by getting\n+\t\t * all available memory from sockets, one after the other.\n+\t\t */\n+\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;\n+\t\t\t\tsocket++) {\n+\t\t\t/* take whatever is available */\n+\t\t\tdefault_size = RTE_MIN(\n+\t\t\t\tget_socket_mem_size(socket) - memory[socket],\n+\t\t\t\ttotal_size);\n+\n+\t\t\t/* Update sizes */\n+\t\t\tmemory[socket] += default_size;\n+\t\t\ttotal_size -= default_size;\n+\t\t}\n+#else\n+\t\t/* in 32-bit mode, allocate all of the memory only on master\n+\t\t * lcore socket\n+\t\t */\n+\t\ttotal_size = internal_config.memory;\n+\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;\n+\t\t\t\tsocket++) {\n+\t\t\tstruct rte_config *cfg = rte_eal_get_configuration();\n+\t\t\tunsigned int master_lcore_socket;\n+\n+\t\t\tmaster_lcore_socket =\n+\t\t\t\trte_lcore_to_socket_id(cfg->master_lcore);\n+\n+\t\t\tif (master_lcore_socket != socket)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* Update sizes */\n+\t\t\tmemory[socket] = total_size;\n+\t\t\tbreak;\n+\t\t}\n+#endif\n+\t}\n+\n+\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0;\n+\t\t\tsocket++) {\n+\t\t/* skips if the memory on specific socket wasn't requested */\n+\t\tfor (i = 0; i < num_hp_info && memory[socket] != 0; i++) {\n+\t\t\trte_strscpy(hp_used[i].hugedir, hp_info[i].hugedir,\n+\t\t\t\tsizeof(hp_used[i].hugedir));\n+\t\t\thp_used[i].num_pages[socket] = RTE_MIN(\n+\t\t\t\t\tmemory[socket] / hp_info[i].hugepage_sz,\n+\t\t\t\t\thp_info[i].num_pages[socket]);\n+\n+\t\t\tcur_mem = hp_used[i].num_pages[socket] *\n+\t\t\t\t\thp_used[i].hugepage_sz;\n+\n+\t\t\tmemory[socket] -= cur_mem;\n+\t\t\ttotal_mem -= cur_mem;\n+\n+\t\t\ttotal_num_pages += hp_used[i].num_pages[socket];\n+\n+\t\t\t/* check if we have met all memory requests */\n+\t\t\tif (memory[socket] == 0)\n+\t\t\t\tbreak;\n+\n+\t\t\t/* Check if we have any more pages left at this size,\n+\t\t\t * if so, move on to next size.\n+\t\t\t */\n+\t\t\tif (hp_used[i].num_pages[socket] ==\n+\t\t\t\t\thp_info[i].num_pages[socket])\n+\t\t\t\tcontinue;\n+\t\t\t/* At this point we know that there are more pages\n+\t\t\t * available that are bigger than the memory we want,\n+\t\t\t * so lets see if we can get enough from other page\n+\t\t\t * sizes.\n+\t\t\t */\n+\t\t\tremaining_mem = 0;\n+\t\t\tfor (j = i+1; j < num_hp_info; j++)\n+\t\t\t\tremaining_mem += hp_info[j].hugepage_sz *\n+\t\t\t\thp_info[j].num_pages[socket];\n+\n+\t\t\t/* Is there enough other memory?\n+\t\t\t * If not, allocate another page and quit.\n+\t\t\t */\n+\t\t\tif (remaining_mem < memory[socket]) {\n+\t\t\t\tcur_mem = RTE_MIN(\n+\t\t\t\t\tmemory[socket], hp_info[i].hugepage_sz);\n+\t\t\t\tmemory[socket] -= cur_mem;\n+\t\t\t\ttotal_mem -= cur_mem;\n+\t\t\t\thp_used[i].num_pages[socket]++;\n+\t\t\t\ttotal_num_pages++;\n+\t\t\t\tbreak; /* we are done with this socket*/\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* if we didn't satisfy all memory requirements per socket */\n+\t\tif (memory[socket] > 0 &&\n+\t\t\t\tinternal_config.socket_mem[socket] != 0) {\n+\t\t\t/* to prevent icc errors */\n+\t\t\trequested = (unsigned int)(\n+\t\t\t\tinternal_config.socket_mem[socket] / 0x100000);\n+\t\t\tavailable = requested -\n+\t\t\t\t((unsigned int)(memory[socket] / 0x100000));\n+\t\t\tRTE_LOG(ERR, EAL, \"Not enough memory available on \"\n+\t\t\t\t\"socket %u! Requested: %uMB, available: %uMB\\n\",\n+\t\t\t\tsocket, requested, available);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* if we didn't satisfy total memory requirements */\n+\tif (total_mem > 0) {\n+\t\trequested = (unsigned int)(internal_config.memory / 0x100000);\n+\t\tavailable = requested - (unsigned int)(total_mem / 0x100000);\n+\t\tRTE_LOG(ERR, EAL, \"Not enough memory available! \"\n+\t\t\t\"Requested: %uMB, available: %uMB\\n\",\n+\t\t\trequested, available);\n+\t\treturn -1;\n+\t}\n+\treturn total_num_pages;\n+}\ndiff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h\nindex 75521d086..0592fcd69 100644\n--- a/lib/librte_eal/common/eal_private.h\n+++ b/lib/librte_eal/common/eal_private.h\n@@ -13,6 +13,8 @@\n #include <rte_lcore.h>\n #include <rte_memory.h>\n \n+#include \"eal_internal_cfg.h\"\n+\n /**\n  * Structure storing internal configuration (per-lcore)\n  */\n@@ -316,6 +318,45 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);\n void\n eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);\n \n+/**\n+ * Distribute available memory between MSLs.\n+ *\n+ * @return\n+ *  0 on success, (-1) on failure.\n+ */\n+int\n+eal_dynmem_memseg_lists_init(void);\n+\n+/**\n+ * Preallocate hugepages for dynamic allocation.\n+ *\n+ * @return\n+ *  0 on success, (-1) on failure.\n+ */\n+int\n+eal_dynmem_hugepage_init(void);\n+\n+/**\n+ * Given the list of hugepage sizes and the number of pages thereof,\n+ * calculate the best number of pages of each size to fulfill the request\n+ * for RAM on each NUMA node.\n+ *\n+ * @param memory\n+ *  Amounts of memory requested for each NUMA node of RTE_MAX_NUMA_NODES.\n+ * @param hp_info\n+ *  Information about hugepages of different size.\n+ * @param hp_used\n+ *  Receives information about used hugepages of each size.\n+ * @param num_hp_info\n+ *  Number of elements in hp_info and hp_used.\n+ * @return\n+ *  0 on success, (-1) on failure.\n+ */\n+int\n+eal_dynmem_calc_num_pages_per_socket(\n+\t\tuint64_t *memory, struct hugepage_info *hp_info,\n+\t\tstruct hugepage_info *hp_used, unsigned int num_hp_info);\n+\n /**\n  * Get cpu core_id.\n  *\n@@ -595,7 +636,7 @@ void *\n eal_mem_reserve(void *requested_addr, size_t size, int flags);\n \n /**\n- * Free memory obtained by eal_mem_reserve() or eal_mem_alloc().\n+ * Free memory obtained by eal_mem_reserve() and possibly allocated.\n  *\n  * If *virt* and *size* describe a part of the reserved region,\n  * only this part of the region is freed (accurately up to the system\ndiff --git a/lib/librte_eal/common/meson.build b/lib/librte_eal/common/meson.build\nindex 55aaeb18e..d91c22220 100644\n--- a/lib/librte_eal/common/meson.build\n+++ b/lib/librte_eal/common/meson.build\n@@ -56,3 +56,7 @@ sources += files(\n \t'rte_reciprocal.c',\n \t'rte_service.c',\n )\n+\n+if is_linux\n+\tsources += files('eal_common_dynmem.c')\n+endif\ndiff --git a/lib/librte_eal/freebsd/eal_memory.c b/lib/librte_eal/freebsd/eal_memory.c\nindex 2eb70c2fe..72a30f21a 100644\n--- a/lib/librte_eal/freebsd/eal_memory.c\n+++ b/lib/librte_eal/freebsd/eal_memory.c\n@@ -315,14 +315,6 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)\n \treturn RTE_ALIGN(area_sz, page_sz);\n }\n \n-static int\n-memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,\n-\t\tint n_segs, int socket_id, int type_msl_idx)\n-{\n-\treturn eal_memseg_list_init(\n-\t\tmsl, page_sz, n_segs, socket_id, type_msl_idx, false);\n-}\n-\n static int\n memseg_list_alloc(struct rte_memseg_list *msl)\n {\n@@ -419,8 +411,8 @@ memseg_primary_init(void)\n \t\t\t\t\tcur_max_mem);\n \t\t\tn_segs = cur_mem / hugepage_sz;\n \n-\t\t\tif (memseg_list_init(msl, hugepage_sz, n_segs,\n-\t\t\t\t\t0, type_msl_idx))\n+\t\t\tif (eal_memseg_list_init(msl, hugepage_sz, n_segs,\n+\t\t\t\t\t0, type_msl_idx, false))\n \t\t\t\treturn -1;\n \n \t\t\ttotal_segs += msl->memseg_arr.len;\ndiff --git a/lib/librte_eal/linux/Makefile b/lib/librte_eal/linux/Makefile\nindex 8febf2212..07ce643ba 100644\n--- a/lib/librte_eal/linux/Makefile\n+++ b/lib/librte_eal/linux/Makefile\n@@ -50,6 +50,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_timer.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memzone.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_log.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_launch.c\n+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_dynmem.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_mcfg.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memalloc.c\n SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memory.c\ndiff --git a/lib/librte_eal/linux/eal_memory.c b/lib/librte_eal/linux/eal_memory.c\nindex 9cc39e6fb..5986dab23 100644\n--- a/lib/librte_eal/linux/eal_memory.c\n+++ b/lib/librte_eal/linux/eal_memory.c\n@@ -812,20 +812,6 @@ memseg_list_free(struct rte_memseg_list *msl)\n \treturn 0;\n }\n \n-static int\n-memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,\n-\t\tint n_segs, int socket_id, int type_msl_idx)\n-{\n-\treturn eal_memseg_list_init(\n-\t\tmsl, page_sz, n_segs, socket_id, type_msl_idx, true);\n-}\n-\n-static int\n-memseg_list_alloc(struct rte_memseg_list *msl)\n-{\n-\treturn eal_memseg_list_alloc(msl, 0);\n-}\n-\n /*\n  * Our VA space is not preallocated yet, so preallocate it here. We need to know\n  * how many segments there are in order to map all pages into one address space,\n@@ -969,12 +955,12 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages)\n \t\t\t}\n \n \t\t\t/* now, allocate fbarray itself */\n-\t\t\tif (memseg_list_init(msl, page_sz, n_segs, socket,\n-\t\t\t\t\t\tmsl_idx) < 0)\n+\t\t\tif (eal_memseg_list_init(msl, page_sz, n_segs,\n+\t\t\t\t\tsocket, msl_idx, true) < 0)\n \t\t\t\treturn -1;\n \n \t\t\t/* finally, allocate VA space */\n-\t\t\tif (memseg_list_alloc(msl) < 0) {\n+\t\t\tif (eal_memseg_list_alloc(msl, 0) < 0) {\n \t\t\t\tRTE_LOG(ERR, EAL, \"Cannot preallocate 0x%\"PRIx64\"kB hugepages\\n\",\n \t\t\t\t\tpage_sz >> 10);\n \t\t\t\treturn -1;\n@@ -1048,182 +1034,6 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)\n \treturn 0;\n }\n \n-__rte_unused /* function is unused on 32-bit builds */\n-static inline uint64_t\n-get_socket_mem_size(int socket)\n-{\n-\tuint64_t size = 0;\n-\tunsigned i;\n-\n-\tfor (i = 0; i < internal_config.num_hugepage_sizes; i++){\n-\t\tstruct hugepage_info *hpi = &internal_config.hugepage_info[i];\n-\t\tsize += hpi->hugepage_sz * hpi->num_pages[socket];\n-\t}\n-\n-\treturn size;\n-}\n-\n-/*\n- * This function is a NUMA-aware equivalent of calc_num_pages.\n- * It takes in the list of hugepage sizes and the\n- * number of pages thereof, and calculates the best number of\n- * pages of each size to fulfill the request for <memory> ram\n- */\n-static int\n-calc_num_pages_per_socket(uint64_t * memory,\n-\t\tstruct hugepage_info *hp_info,\n-\t\tstruct hugepage_info *hp_used,\n-\t\tunsigned num_hp_info)\n-{\n-\tunsigned socket, j, i = 0;\n-\tunsigned requested, available;\n-\tint total_num_pages = 0;\n-\tuint64_t remaining_mem, cur_mem;\n-\tuint64_t total_mem = internal_config.memory;\n-\n-\tif (num_hp_info == 0)\n-\t\treturn -1;\n-\n-\t/* if specific memory amounts per socket weren't requested */\n-\tif (internal_config.force_sockets == 0) {\n-\t\tsize_t total_size;\n-#ifdef RTE_ARCH_64\n-\t\tint cpu_per_socket[RTE_MAX_NUMA_NODES];\n-\t\tsize_t default_size;\n-\t\tunsigned lcore_id;\n-\n-\t\t/* Compute number of cores per socket */\n-\t\tmemset(cpu_per_socket, 0, sizeof(cpu_per_socket));\n-\t\tRTE_LCORE_FOREACH(lcore_id) {\n-\t\t\tcpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;\n-\t\t}\n-\n-\t\t/*\n-\t\t * Automatically spread requested memory amongst detected sockets according\n-\t\t * to number of cores from cpu mask present on each socket\n-\t\t */\n-\t\ttotal_size = internal_config.memory;\n-\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {\n-\n-\t\t\t/* Set memory amount per socket */\n-\t\t\tdefault_size = (internal_config.memory * cpu_per_socket[socket])\n-\t\t\t\t\t/ rte_lcore_count();\n-\n-\t\t\t/* Limit to maximum available memory on socket */\n-\t\t\tdefault_size = RTE_MIN(default_size, get_socket_mem_size(socket));\n-\n-\t\t\t/* Update sizes */\n-\t\t\tmemory[socket] = default_size;\n-\t\t\ttotal_size -= default_size;\n-\t\t}\n-\n-\t\t/*\n-\t\t * If some memory is remaining, try to allocate it by getting all\n-\t\t * available memory from sockets, one after the other\n-\t\t */\n-\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {\n-\t\t\t/* take whatever is available */\n-\t\t\tdefault_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],\n-\t\t\t\t\t       total_size);\n-\n-\t\t\t/* Update sizes */\n-\t\t\tmemory[socket] += default_size;\n-\t\t\ttotal_size -= default_size;\n-\t\t}\n-#else\n-\t\t/* in 32-bit mode, allocate all of the memory only on master\n-\t\t * lcore socket\n-\t\t */\n-\t\ttotal_size = internal_config.memory;\n-\t\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;\n-\t\t\t\tsocket++) {\n-\t\t\tstruct rte_config *cfg = rte_eal_get_configuration();\n-\t\t\tunsigned int master_lcore_socket;\n-\n-\t\t\tmaster_lcore_socket =\n-\t\t\t\trte_lcore_to_socket_id(cfg->master_lcore);\n-\n-\t\t\tif (master_lcore_socket != socket)\n-\t\t\t\tcontinue;\n-\n-\t\t\t/* Update sizes */\n-\t\t\tmemory[socket] = total_size;\n-\t\t\tbreak;\n-\t\t}\n-#endif\n-\t}\n-\n-\tfor (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {\n-\t\t/* skips if the memory on specific socket wasn't requested */\n-\t\tfor (i = 0; i < num_hp_info && memory[socket] != 0; i++){\n-\t\t\tstrlcpy(hp_used[i].hugedir, hp_info[i].hugedir,\n-\t\t\t\tsizeof(hp_used[i].hugedir));\n-\t\t\thp_used[i].num_pages[socket] = RTE_MIN(\n-\t\t\t\t\tmemory[socket] / hp_info[i].hugepage_sz,\n-\t\t\t\t\thp_info[i].num_pages[socket]);\n-\n-\t\t\tcur_mem = hp_used[i].num_pages[socket] *\n-\t\t\t\t\thp_used[i].hugepage_sz;\n-\n-\t\t\tmemory[socket] -= cur_mem;\n-\t\t\ttotal_mem -= cur_mem;\n-\n-\t\t\ttotal_num_pages += hp_used[i].num_pages[socket];\n-\n-\t\t\t/* check if we have met all memory requests */\n-\t\t\tif (memory[socket] == 0)\n-\t\t\t\tbreak;\n-\n-\t\t\t/* check if we have any more pages left at this size, if so\n-\t\t\t * move on to next size */\n-\t\t\tif (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])\n-\t\t\t\tcontinue;\n-\t\t\t/* At this point we know that there are more pages available that are\n-\t\t\t * bigger than the memory we want, so lets see if we can get enough\n-\t\t\t * from other page sizes.\n-\t\t\t */\n-\t\t\tremaining_mem = 0;\n-\t\t\tfor (j = i+1; j < num_hp_info; j++)\n-\t\t\t\tremaining_mem += hp_info[j].hugepage_sz *\n-\t\t\t\thp_info[j].num_pages[socket];\n-\n-\t\t\t/* is there enough other memory, if not allocate another page and quit */\n-\t\t\tif (remaining_mem < memory[socket]){\n-\t\t\t\tcur_mem = RTE_MIN(memory[socket],\n-\t\t\t\t\t\thp_info[i].hugepage_sz);\n-\t\t\t\tmemory[socket] -= cur_mem;\n-\t\t\t\ttotal_mem -= cur_mem;\n-\t\t\t\thp_used[i].num_pages[socket]++;\n-\t\t\t\ttotal_num_pages++;\n-\t\t\t\tbreak; /* we are done with this socket*/\n-\t\t\t}\n-\t\t}\n-\t\t/* if we didn't satisfy all memory requirements per socket */\n-\t\tif (memory[socket] > 0 &&\n-\t\t\t\tinternal_config.socket_mem[socket] != 0) {\n-\t\t\t/* to prevent icc errors */\n-\t\t\trequested = (unsigned) (internal_config.socket_mem[socket] /\n-\t\t\t\t\t0x100000);\n-\t\t\tavailable = requested -\n-\t\t\t\t\t((unsigned) (memory[socket] / 0x100000));\n-\t\t\tRTE_LOG(ERR, EAL, \"Not enough memory available on socket %u! \"\n-\t\t\t\t\t\"Requested: %uMB, available: %uMB\\n\", socket,\n-\t\t\t\t\trequested, available);\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\t/* if we didn't satisfy total memory requirements */\n-\tif (total_mem > 0) {\n-\t\trequested = (unsigned) (internal_config.memory / 0x100000);\n-\t\tavailable = requested - (unsigned) (total_mem / 0x100000);\n-\t\tRTE_LOG(ERR, EAL, \"Not enough memory available! Requested: %uMB,\"\n-\t\t\t\t\" available: %uMB\\n\", requested, available);\n-\t\treturn -1;\n-\t}\n-\treturn total_num_pages;\n-}\n-\n static inline size_t\n eal_get_hugepage_mem_size(void)\n {\n@@ -1529,7 +1339,7 @@ eal_legacy_hugepage_init(void)\n \t\tmemory[i] = internal_config.socket_mem[i];\n \n \t/* calculate final number of pages */\n-\tnr_hugepages = calc_num_pages_per_socket(memory,\n+\tnr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,\n \t\t\tinternal_config.hugepage_info, used_hp,\n \t\t\tinternal_config.num_hugepage_sizes);\n \n@@ -1656,140 +1466,6 @@ eal_legacy_hugepage_init(void)\n \treturn -1;\n }\n \n-static int __rte_unused\n-hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)\n-{\n-\tstruct hugepage_info *hpi = arg;\n-\n-\tif (msl->page_sz != hpi->hugepage_sz)\n-\t\treturn 0;\n-\n-\thpi->num_pages[msl->socket_id] += msl->memseg_arr.len;\n-\treturn 0;\n-}\n-\n-static int\n-limits_callback(int socket_id, size_t cur_limit, size_t new_len)\n-{\n-\tRTE_SET_USED(socket_id);\n-\tRTE_SET_USED(cur_limit);\n-\tRTE_SET_USED(new_len);\n-\treturn -1;\n-}\n-\n-static int\n-eal_hugepage_init(void)\n-{\n-\tstruct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];\n-\tuint64_t memory[RTE_MAX_NUMA_NODES];\n-\tint hp_sz_idx, socket_id;\n-\n-\tmemset(used_hp, 0, sizeof(used_hp));\n-\n-\tfor (hp_sz_idx = 0;\n-\t\t\thp_sz_idx < (int) internal_config.num_hugepage_sizes;\n-\t\t\thp_sz_idx++) {\n-#ifndef RTE_ARCH_64\n-\t\tstruct hugepage_info dummy;\n-\t\tunsigned int i;\n-#endif\n-\t\t/* also initialize used_hp hugepage sizes in used_hp */\n-\t\tstruct hugepage_info *hpi;\n-\t\thpi = &internal_config.hugepage_info[hp_sz_idx];\n-\t\tused_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;\n-\n-#ifndef RTE_ARCH_64\n-\t\t/* for 32-bit, limit number of pages on socket to whatever we've\n-\t\t * preallocated, as we cannot allocate more.\n-\t\t */\n-\t\tmemset(&dummy, 0, sizeof(dummy));\n-\t\tdummy.hugepage_sz = hpi->hugepage_sz;\n-\t\tif (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)\n-\t\t\treturn -1;\n-\n-\t\tfor (i = 0; i < RTE_DIM(dummy.num_pages); i++) {\n-\t\t\thpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],\n-\t\t\t\t\tdummy.num_pages[i]);\n-\t\t}\n-#endif\n-\t}\n-\n-\t/* make a copy of socket_mem, needed for balanced allocation. */\n-\tfor (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)\n-\t\tmemory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];\n-\n-\t/* calculate final number of pages */\n-\tif (calc_num_pages_per_socket(memory,\n-\t\t\tinternal_config.hugepage_info, used_hp,\n-\t\t\tinternal_config.num_hugepage_sizes) < 0)\n-\t\treturn -1;\n-\n-\tfor (hp_sz_idx = 0;\n-\t\t\thp_sz_idx < (int)internal_config.num_hugepage_sizes;\n-\t\t\thp_sz_idx++) {\n-\t\tfor (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;\n-\t\t\t\tsocket_id++) {\n-\t\t\tstruct rte_memseg **pages;\n-\t\t\tstruct hugepage_info *hpi = &used_hp[hp_sz_idx];\n-\t\t\tunsigned int num_pages = hpi->num_pages[socket_id];\n-\t\t\tunsigned int num_pages_alloc;\n-\n-\t\t\tif (num_pages == 0)\n-\t\t\t\tcontinue;\n-\n-\t\t\tRTE_LOG(DEBUG, EAL, \"Allocating %u pages of size %\" PRIu64 \"M on socket %i\\n\",\n-\t\t\t\tnum_pages, hpi->hugepage_sz >> 20, socket_id);\n-\n-\t\t\t/* we may not be able to allocate all pages in one go,\n-\t\t\t * because we break up our memory map into multiple\n-\t\t\t * memseg lists. therefore, try allocating multiple\n-\t\t\t * times and see if we can get the desired number of\n-\t\t\t * pages from multiple allocations.\n-\t\t\t */\n-\n-\t\t\tnum_pages_alloc = 0;\n-\t\t\tdo {\n-\t\t\t\tint i, cur_pages, needed;\n-\n-\t\t\t\tneeded = num_pages - num_pages_alloc;\n-\n-\t\t\t\tpages = malloc(sizeof(*pages) * needed);\n-\n-\t\t\t\t/* do not request exact number of pages */\n-\t\t\t\tcur_pages = eal_memalloc_alloc_seg_bulk(pages,\n-\t\t\t\t\t\tneeded, hpi->hugepage_sz,\n-\t\t\t\t\t\tsocket_id, false);\n-\t\t\t\tif (cur_pages <= 0) {\n-\t\t\t\t\tfree(pages);\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\n-\t\t\t\t/* mark preallocated pages as unfreeable */\n-\t\t\t\tfor (i = 0; i < cur_pages; i++) {\n-\t\t\t\t\tstruct rte_memseg *ms = pages[i];\n-\t\t\t\t\tms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;\n-\t\t\t\t}\n-\t\t\t\tfree(pages);\n-\n-\t\t\t\tnum_pages_alloc += cur_pages;\n-\t\t\t} while (num_pages_alloc != num_pages);\n-\t\t}\n-\t}\n-\t/* if socket limits were specified, set them */\n-\tif (internal_config.force_socket_limits) {\n-\t\tunsigned int i;\n-\t\tfor (i = 0; i < RTE_MAX_NUMA_NODES; i++) {\n-\t\t\tuint64_t limit = internal_config.socket_limit[i];\n-\t\t\tif (limit == 0)\n-\t\t\t\tcontinue;\n-\t\t\tif (rte_mem_alloc_validator_register(\"socket-limit\",\n-\t\t\t\t\tlimits_callback, i, limit))\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Failed to register socket limits validator callback\\n\");\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n /*\n  * uses fstat to report the size of a file on disk\n  */\n@@ -1948,7 +1624,7 @@ rte_eal_hugepage_init(void)\n {\n \treturn internal_config.legacy_mem ?\n \t\t\teal_legacy_hugepage_init() :\n-\t\t\teal_hugepage_init();\n+\t\t\teal_dynmem_hugepage_init();\n }\n \n int\n@@ -2127,8 +1803,9 @@ memseg_primary_init_32(void)\n \t\t\t\t\t\tmax_pagesz_mem);\n \t\t\t\tn_segs = cur_mem / hugepage_sz;\n \n-\t\t\t\tif (memseg_list_init(msl, hugepage_sz, n_segs,\n-\t\t\t\t\t\tsocket_id, type_msl_idx)) {\n+\t\t\t\tif (eal_memseg_list_init(msl, hugepage_sz,\n+\t\t\t\t\t\tn_segs, socket_id, type_msl_idx,\n+\t\t\t\t\t\ttrue)) {\n \t\t\t\t\t/* failing to allocate a memseg list is\n \t\t\t\t\t * a serious error.\n \t\t\t\t\t */\n@@ -2136,7 +1813,7 @@ memseg_primary_init_32(void)\n \t\t\t\t\treturn -1;\n \t\t\t\t}\n \n-\t\t\t\tif (memseg_list_alloc(msl)) {\n+\t\t\t\tif (eal_memseg_list_alloc(msl, 0)) {\n \t\t\t\t\t/* if we couldn't allocate VA space, we\n \t\t\t\t\t * can try with smaller page sizes.\n \t\t\t\t\t */\n@@ -2167,185 +1844,7 @@ memseg_primary_init_32(void)\n static int __rte_unused\n memseg_primary_init(void)\n {\n-\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n-\tstruct memtype {\n-\t\tuint64_t page_sz;\n-\t\tint socket_id;\n-\t} *memtypes = NULL;\n-\tint i, hpi_idx, msl_idx, ret = -1; /* fail unless told to succeed */\n-\tstruct rte_memseg_list *msl;\n-\tuint64_t max_mem, max_mem_per_type;\n-\tunsigned int max_seglists_per_type;\n-\tunsigned int n_memtypes, cur_type;\n-\n-\t/* no-huge does not need this at all */\n-\tif (internal_config.no_hugetlbfs)\n-\t\treturn 0;\n-\n-\t/*\n-\t * figuring out amount of memory we're going to have is a long and very\n-\t * involved process. the basic element we're operating with is a memory\n-\t * type, defined as a combination of NUMA node ID and page size (so that\n-\t * e.g. 2 sockets with 2 page sizes yield 4 memory types in total).\n-\t *\n-\t * deciding amount of memory going towards each memory type is a\n-\t * balancing act between maximum segments per type, maximum memory per\n-\t * type, and number of detected NUMA nodes. the goal is to make sure\n-\t * each memory type gets at least one memseg list.\n-\t *\n-\t * the total amount of memory is limited by RTE_MAX_MEM_MB value.\n-\t *\n-\t * the total amount of memory per type is limited by either\n-\t * RTE_MAX_MEM_MB_PER_TYPE, or by RTE_MAX_MEM_MB divided by the number\n-\t * of detected NUMA nodes. additionally, maximum number of segments per\n-\t * type is also limited by RTE_MAX_MEMSEG_PER_TYPE. this is because for\n-\t * smaller page sizes, it can take hundreds of thousands of segments to\n-\t * reach the above specified per-type memory limits.\n-\t *\n-\t * additionally, each type may have multiple memseg lists associated\n-\t * with it, each limited by either RTE_MAX_MEM_MB_PER_LIST for bigger\n-\t * page sizes, or RTE_MAX_MEMSEG_PER_LIST segments for smaller ones.\n-\t *\n-\t * the number of memseg lists per type is decided based on the above\n-\t * limits, and also taking number of detected NUMA nodes, to make sure\n-\t * that we don't run out of memseg lists before we populate all NUMA\n-\t * nodes with memory.\n-\t *\n-\t * we do this in three stages. first, we collect the number of types.\n-\t * then, we figure out memory constraints and populate the list of\n-\t * would-be memseg lists. then, we go ahead and allocate the memseg\n-\t * lists.\n-\t */\n-\n-\t/* create space for mem types */\n-\tn_memtypes = internal_config.num_hugepage_sizes * rte_socket_count();\n-\tmemtypes = calloc(n_memtypes, sizeof(*memtypes));\n-\tif (memtypes == NULL) {\n-\t\tRTE_LOG(ERR, EAL, \"Cannot allocate space for memory types\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* populate mem types */\n-\tcur_type = 0;\n-\tfor (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;\n-\t\t\thpi_idx++) {\n-\t\tstruct hugepage_info *hpi;\n-\t\tuint64_t hugepage_sz;\n-\n-\t\thpi = &internal_config.hugepage_info[hpi_idx];\n-\t\thugepage_sz = hpi->hugepage_sz;\n-\n-\t\tfor (i = 0; i < (int) rte_socket_count(); i++, cur_type++) {\n-\t\t\tint socket_id = rte_socket_id_by_idx(i);\n-\n-#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n-\t\t\t/* we can still sort pages by socket in legacy mode */\n-\t\t\tif (!internal_config.legacy_mem && socket_id > 0)\n-\t\t\t\tbreak;\n-#endif\n-\t\t\tmemtypes[cur_type].page_sz = hugepage_sz;\n-\t\t\tmemtypes[cur_type].socket_id = socket_id;\n-\n-\t\t\tRTE_LOG(DEBUG, EAL, \"Detected memory type: \"\n-\t\t\t\t\"socket_id:%u hugepage_sz:%\" PRIu64 \"\\n\",\n-\t\t\t\tsocket_id, hugepage_sz);\n-\t\t}\n-\t}\n-\t/* number of memtypes could have been lower due to no NUMA support */\n-\tn_memtypes = cur_type;\n-\n-\t/* set up limits for types */\n-\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n-\tmax_mem_per_type = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20,\n-\t\t\tmax_mem / n_memtypes);\n-\t/*\n-\t * limit maximum number of segment lists per type to ensure there's\n-\t * space for memseg lists for all NUMA nodes with all page sizes\n-\t */\n-\tmax_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes;\n-\n-\tif (max_seglists_per_type == 0) {\n-\t\tRTE_LOG(ERR, EAL, \"Cannot accommodate all memory types, please increase %s\\n\",\n-\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n-\t\tgoto out;\n-\t}\n-\n-\t/* go through all mem types and create segment lists */\n-\tmsl_idx = 0;\n-\tfor (cur_type = 0; cur_type < n_memtypes; cur_type++) {\n-\t\tunsigned int cur_seglist, n_seglists, n_segs;\n-\t\tunsigned int max_segs_per_type, max_segs_per_list;\n-\t\tstruct memtype *type = &memtypes[cur_type];\n-\t\tuint64_t max_mem_per_list, pagesz;\n-\t\tint socket_id;\n-\n-\t\tpagesz = type->page_sz;\n-\t\tsocket_id = type->socket_id;\n-\n-\t\t/*\n-\t\t * we need to create segment lists for this type. we must take\n-\t\t * into account the following things:\n-\t\t *\n-\t\t * 1. total amount of memory we can use for this memory type\n-\t\t * 2. total amount of memory per memseg list allowed\n-\t\t * 3. number of segments needed to fit the amount of memory\n-\t\t * 4. number of segments allowed per type\n-\t\t * 5. number of segments allowed per memseg list\n-\t\t * 6. number of memseg lists we are allowed to take up\n-\t\t */\n-\n-\t\t/* calculate how much segments we will need in total */\n-\t\tmax_segs_per_type = max_mem_per_type / pagesz;\n-\t\t/* limit number of segments to maximum allowed per type */\n-\t\tmax_segs_per_type = RTE_MIN(max_segs_per_type,\n-\t\t\t\t(unsigned int)RTE_MAX_MEMSEG_PER_TYPE);\n-\t\t/* limit number of segments to maximum allowed per list */\n-\t\tmax_segs_per_list = RTE_MIN(max_segs_per_type,\n-\t\t\t\t(unsigned int)RTE_MAX_MEMSEG_PER_LIST);\n-\n-\t\t/* calculate how much memory we can have per segment list */\n-\t\tmax_mem_per_list = RTE_MIN(max_segs_per_list * pagesz,\n-\t\t\t\t(uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20);\n-\n-\t\t/* calculate how many segments each segment list will have */\n-\t\tn_segs = RTE_MIN(max_segs_per_list, max_mem_per_list / pagesz);\n-\n-\t\t/* calculate how many segment lists we can have */\n-\t\tn_seglists = RTE_MIN(max_segs_per_type / n_segs,\n-\t\t\t\tmax_mem_per_type / max_mem_per_list);\n-\n-\t\t/* limit number of segment lists according to our maximum */\n-\t\tn_seglists = RTE_MIN(n_seglists, max_seglists_per_type);\n-\n-\t\tRTE_LOG(DEBUG, EAL, \"Creating %i segment lists: \"\n-\t\t\t\t\"n_segs:%i socket_id:%i hugepage_sz:%\" PRIu64 \"\\n\",\n-\t\t\tn_seglists, n_segs, socket_id, pagesz);\n-\n-\t\t/* create all segment lists */\n-\t\tfor (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {\n-\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n-\t\t\t\tRTE_LOG(ERR, EAL,\n-\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n-\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n-\t\t\t\tgoto out;\n-\t\t\t}\n-\t\t\tmsl = &mcfg->memsegs[msl_idx++];\n-\n-\t\t\tif (memseg_list_init(msl, pagesz, n_segs,\n-\t\t\t\t\tsocket_id, cur_seglist))\n-\t\t\t\tgoto out;\n-\n-\t\t\tif (memseg_list_alloc(msl)) {\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list\\n\");\n-\t\t\t\tgoto out;\n-\t\t\t}\n-\t\t}\n-\t}\n-\t/* we're successful */\n-\tret = 0;\n-out:\n-\tfree(memtypes);\n-\treturn ret;\n+\treturn eal_dynmem_memseg_lists_init();\n }\n \n static int\n@@ -2369,7 +1868,7 @@ memseg_secondary_init(void)\n \t\t}\n \n \t\t/* preallocate VA space */\n-\t\tif (memseg_list_alloc(msl)) {\n+\t\tif (eal_memseg_list_alloc(msl, 0)) {\n \t\t\tRTE_LOG(ERR, EAL, \"Cannot preallocate VA space for hugepage memory\\n\");\n \t\t\treturn -1;\n \t\t}\n",
    "prefixes": [
        "v9",
        "05/12"
    ]
}