get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/100998/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 100998,
    "url": "http://patches.dpdk.org/api/patches/100998/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211011085644.2716490-3-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211011085644.2716490-3-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211011085644.2716490-3-dkozlyuk@nvidia.com",
    "date": "2021-10-11T08:56:43",
    "name": "[v6,2/3] eal: add memory pre-allocation from existing files",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": true,
    "hash": "457660c701be72f5abea331b3e974f57cbf5d8ae",
    "submitter": {
        "id": 2367,
        "url": "http://patches.dpdk.org/api/people/2367/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@oss.nvidia.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211011085644.2716490-3-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 19509,
            "url": "http://patches.dpdk.org/api/series/19509/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19509",
            "date": "2021-10-11T08:56:41",
            "name": "eal: add memory pre-allocation from existing files",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/19509/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/100998/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/100998/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4A0DDA034F;\n\tMon, 11 Oct 2021 10:57:17 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 978A041109;\n\tMon, 11 Oct 2021 10:57:08 +0200 (CEST)",
            "from AZHDRRW-EX01.nvidia.com (azhdrrw-ex01.nvidia.com\n [20.51.104.162]) by mails.dpdk.org (Postfix) with ESMTP id C0202410F0\n for <dev@dpdk.org>; Mon, 11 Oct 2021 10:57:06 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com (104.47.57.172)\n by mxs.oss.nvidia.com (10.13.234.36) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.2.858.15; Mon, 11 Oct 2021 01:57:06 -0700",
            "from BN8PR04CA0011.namprd04.prod.outlook.com (2603:10b6:408:70::24)\n by BN6PR12MB1332.namprd12.prod.outlook.com (2603:10b6:404:15::10)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4587.18; Mon, 11 Oct\n 2021 08:57:04 +0000",
            "from BN8NAM11FT067.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:70:cafe::1f) by BN8PR04CA0011.outlook.office365.com\n (2603:10b6:408:70::24) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4587.25 via Frontend\n Transport; Mon, 11 Oct 2021 08:57:04 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT067.mail.protection.outlook.com (10.13.177.159) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4587.18 via Frontend Transport; Mon, 11 Oct 2021 08:57:03 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 11 Oct\n 2021 08:57:01 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=PZdrJzKSsCPkYT8Dcuo1tLFng0gxNWKI0SdlubrjnuKrUAchGKNKB2ItfKD1UGcwIdF0HCZ0JX8LMFNxJIQoaF8QSEEh76xPz+NfVRlj7oRMs7cFWlC4Y9K+u3UY/my6yaYry+ixG24TMr9DSs8S/yKLaytWp7FHKQ3oaFlDBVn72SNJpIU7B8Qc20pff7gBivgUd3c8upVYSM3hkFmJqd7RWUFtEZX8jUBSU43JMiS9q5enhLhIJzlsmoG9v2nr8oAC5ETTz0EGc1f7seTs/zvw9siDv6JyZVXIFLoUUQACG2AK7l01BuZbuqvNo4Ar5SdhlMCARWn1Vuwx6Qy/2g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=RrS/gUz9/i2zn3pDFF9ZrxuKdod83y1YBpZIEt62Qrw=;\n b=hAP9dbjgY66LxLbSjyvuhXicGMV9YyPiu+bE8ST8y75vYU5BTsF8oDMDWihvbDuww1naSAupc+/pyPXnzH6dzAFkDZ2v7P24SxweDhBcZUPq3NU61EJ+ldaRVksmDJeBMIOwSVNEUBjamtyt+3aPbYq3ESiGUxWg63hzT5/drGo/P20l1wCIYER9Q7+oVNt+8zGQaYS23TSb5Hu+X+fJkTs2uCyoXyj5pDKy2rUDJ+QsbmZoH/Lj7jsm2ULNiraj/EQDiQdSDY9z9HElcVzv1RDFlbLWIafvWALirSV4XI007fsqC9uzo3P0YXXq11AkkFnHrEn6/QW21jtRG34RRg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=RrS/gUz9/i2zn3pDFF9ZrxuKdod83y1YBpZIEt62Qrw=;\n b=arA69jIERI8ZL5IgcLUrNFBj678HMQOi2naoS54jRh8eSAisxAZ794EVyo38AFvzNdrsKzvF/oanvv8hzo9z0sldFMxNOeaTNNnC2ObV7dNZexqLNYuzSl5O5yLb1gLmJ9pOWU/TQ57/ZRWBAVNB1ih+x6ZkNbNf59U7CC1hC4laXojNsWuFDX+T2lgU7rP+K/Fx3qjHBhYz4h1cFpbAFl6h7HgNN9ji22m7LZRQrhQgeDYAuRuyI6HmwmJWqNV5Okf2d9zR0voz91BZ77/mKMJ7Hw2PmvtFk6xJODaWVUTQQ6UpZEBu7+ToCHNhfLZD3Uix2kQ8rO0bFU7DRWfKPw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Viacheslav Ovsiienko <viacheslavo@oss.nvidia.com>, Anatoly Burakov\n <anatoly.burakov@intel.com>",
        "Date": "Mon, 11 Oct 2021 11:56:43 +0300",
        "Message-ID": "<20211011085644.2716490-3-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211011085644.2716490-1-dkozlyuk@nvidia.com>",
        "References": "<20210921081632.858873-1-dkozlyuk@nvidia.com>\n <20211011085644.2716490-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "d3faa9a3-14cd-4cf2-6384-08d98c951897",
        "X-MS-TrafficTypeDiagnostic": "BN6PR12MB1332:",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR12MB1332E92933FF3240286A3E8EB9B59@BN6PR12MB1332.namprd12.prod.outlook.com>",
        "X-MS-Exchange-Transport-Forked": "True",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:4502;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n uDnwPII3YmKZq1l5/GBh1xSZjB3kbQ1vnLQpclOqY2AxF1cxioCUveYIEEv0PVgegntybxWN7UNRfPQUDJ9O1yLSj+F1IT6HLU74vErPTu8g6faY7PNjod5Z99Awxy5g7cOnO4BJrEwwAjg1EbUa9+ZxfTYVKfI/+3SRuQdkxf9JIDJlK55bZ5hsB5LrzlRYHDU3MZqo7jeYcFiT/wubiVosbpp5+hsPM8Jmcsu/mkgq5ldOy9YGajcMQor9nb8uftRBionYxNIaQU+2tf/FOmIZ6RvEgOgx2udzZE9udcr75VJAY1CzjgVaCeTzGNWmEkDcpqLD8ndjln71YwzPNRsP34vEYUXh8Lc8UVC7Kpw7/D+Mgs0BpJOVAn39+HdAq/7REi145gss6qT9X0Hk7XqTbyxbrsNhu0J58O6YN4100HeLnfe4Un/QAZzOrxvXM5yAMQJUirIhvw7FJT7sqFa3YcQ6TVkv5SQ84xzXXAze/hRWPfhafBDVyIzcPqQBT4A8i4sx8kgssnw2dOHHgp00oFtuZ7Yzkqp6gcIEArJzyfEF0Ex0FtK0svqAE6+VF+Uktp0qSe/4N+Ge/qNEWlZi5SENleJYS7ZNSfIl74HBHOfmL/hYv36/LbW7pX4u/FFco/sf92mMcvNnxpYuDKYE2Yro843n9+//pROsHwq4eeKJRrhYpX5vmxI6s0T/lNNWoHzmXCYfOQPzdW9JPg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(7636003)(508600001)(36756003)(30864003)(54906003)(5660300002)(356005)(4326008)(55016002)(70206006)(70586007)(26005)(8936002)(8676002)(1076003)(186003)(16526019)(7696005)(6286002)(316002)(6666004)(336012)(426003)(2906002)(107886003)(36860700001)(82310400003)(47076005)(2616005)(86362001)(6916009)(83380400001);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "11 Oct 2021 08:57:03.8457 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n d3faa9a3-14cd-4cf2-6384-08d98c951897",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT067.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR12MB1332",
        "Subject": "[dpdk-dev] [PATCH v6 2/3] eal: add memory pre-allocation from\n existing files",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n\nThe primary DPDK process launch might take a long time if initially\nallocated memory is large. From practice allocation of 1 TB of memory\nover 1 GB hugepages on Linux takes tens of seconds. Fast restart\nis highly desired for some applications and launch delay presents\na problem.\n\nThe primary delay happens in this call trace:\n  rte_eal_init()\n    rte_eal_memory_init()\n      rte_eal_hugepage_init()\n        eal_dynmem_hugepage_init()\n\t  eal_memalloc_alloc_seg_bulk()\n\t    alloc_seg()\n              mmap()\n\nThe largest part of the time spent in mmap() is filling the memory\nwith zeros. Kernel does so to prevent data leakage from a process\nthat was last using the page. However, in a controlled environment\nit may not be the issue, while performance is. (Linux-specific\nMAP_UNINITIALIZED flag allows mapping without clearing, but it is\ndisabled in all popular distributions for the reason above.)\n\nIt is proposed to add a new EAL option: --mem-file FILE1,FILE2,...\nto map hugepages \"as is\" from specified FILEs in hugetlbfs.\nCompared to using external memory for the task, EAL option requires\nno change to application code, while allowing administrator\nto control hugepage sizes and their NUMA affinity.\n\nLimitations of the feature:\n\n* Linux-specific (only Linux maps hugepages from files).\n* Incompatible with --legacy-mem (partially replaces it).\n* Incompatible with --single-file-segments\n  (--mem-file FILEs can contain as many segments as needed).\n* Incompatible with --in-memory (logically).\n\nA warning about possible security implications is printed\nwhen --mem-file is used.\n\nUntil this patch DPDK allocator always cleared memory on freeing,\nso that it did not have to do that on allocation, while new memory\nwas cleared by the kernel. When --mem-file is in use, DPDK clears memory\nafter allocation in rte_zmalloc() and does not clean it on freeing.\nEffectively user trades fast startup for occasional allocation slowdown\nwhenever it is absolutely necessary. When memory is recycled, it is\ncleared again, which is suboptimal par se, but saves complication\nof memory management.\n\nSigned-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\n---\n doc/guides/linux_gsg/linux_eal_parameters.rst |  17 +\n lib/eal/common/eal_common_dynmem.c            |   6 +\n lib/eal/common/eal_common_options.c           |  23 ++\n lib/eal/common/eal_internal_cfg.h             |   4 +\n lib/eal/common/eal_memalloc.h                 |   8 +-\n lib/eal/common/eal_options.h                  |   2 +\n lib/eal/common/malloc_elem.c                  |   5 +\n lib/eal/common/malloc_heap.h                  |   8 +\n lib/eal/common/rte_malloc.c                   |  16 +-\n lib/eal/include/rte_memory.h                  |   4 +-\n lib/eal/linux/eal.c                           |  28 ++\n lib/eal/linux/eal_hugepage_info.c             |   5 +\n lib/eal/linux/eal_memalloc.c                  | 328 +++++++++++++++++-\n 13 files changed, 441 insertions(+), 13 deletions(-)",
    "diff": "diff --git a/doc/guides/linux_gsg/linux_eal_parameters.rst b/doc/guides/linux_gsg/linux_eal_parameters.rst\nindex bd3977cb3d..b465feaea8 100644\n--- a/doc/guides/linux_gsg/linux_eal_parameters.rst\n+++ b/doc/guides/linux_gsg/linux_eal_parameters.rst\n@@ -92,6 +92,23 @@ Memory-related options\n \n     Free hugepages back to system exactly as they were originally allocated.\n \n+*   ``--mem-file <pre-allocated files>``\n+\n+    Use memory from pre-allocated files in ``hugetlbfs`` without clearing it;\n+    when this memory is exhausted, switch to default dynamic allocation.\n+    This speeds up startup compared to ``--legacy-mem`` while also avoiding\n+    later delays for allocating new hugepages. One downside is slowdown\n+    of all zeroed memory allocations. Security warning: an application\n+    can access contents left by previous users of hugepages. Multiple files\n+    can be pre-allocated in ``hugetlbfs`` with different page sizes,\n+    on desired NUMA nodes, using ``mount`` options and ``numactl``:\n+\n+        --mem-file /mnt/huge-1G/node0,/mnt/huge-1G/node1,/mnt/huge-2M/extra\n+\n+    This option is incompatible with ``--legacy-mem``, ``--in-memory``,\n+    and ``--single-file-segments``. Primary and secondary processes\n+    must specify exactly the same list of files.\n+\n Other options\n ~~~~~~~~~~~~~\n \ndiff --git a/lib/eal/common/eal_common_dynmem.c b/lib/eal/common/eal_common_dynmem.c\nindex 7c5437ddfa..abcf22f097 100644\n--- a/lib/eal/common/eal_common_dynmem.c\n+++ b/lib/eal/common/eal_common_dynmem.c\n@@ -272,6 +272,12 @@ eal_dynmem_hugepage_init(void)\n \t\t\tinternal_conf->num_hugepage_sizes) < 0)\n \t\treturn -1;\n \n+#ifdef RTE_EXEC_ENV_LINUX\n+\t/* pre-allocate pages from --mem-file option files */\n+\tif (eal_memalloc_memfile_alloc(used_hp) < 0)\n+\t\treturn -1;\n+#endif\n+\n \tfor (hp_sz_idx = 0;\n \t\t\thp_sz_idx < (int)internal_conf->num_hugepage_sizes;\n \t\t\thp_sz_idx++) {\ndiff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c\nindex 1802e3d9e1..1265720484 100644\n--- a/lib/eal/common/eal_common_options.c\n+++ b/lib/eal/common/eal_common_options.c\n@@ -84,6 +84,7 @@ eal_long_options[] = {\n \t{OPT_TRACE_MODE,        1, NULL, OPT_TRACE_MODE_NUM       },\n \t{OPT_MAIN_LCORE,        1, NULL, OPT_MAIN_LCORE_NUM       },\n \t{OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM},\n+\t{OPT_MEM_FILE,          1, NULL, OPT_MEM_FILE_NUM         },\n \t{OPT_NO_HPET,           0, NULL, OPT_NO_HPET_NUM          },\n \t{OPT_NO_HUGE,           0, NULL, OPT_NO_HUGE_NUM          },\n \t{OPT_NO_PCI,            0, NULL, OPT_NO_PCI_NUM           },\n@@ -1879,6 +1880,8 @@ eal_cleanup_config(struct internal_config *internal_cfg)\n \t\tfree(internal_cfg->hugepage_dir);\n \tif (internal_cfg->user_mbuf_pool_ops_name != NULL)\n \t\tfree(internal_cfg->user_mbuf_pool_ops_name);\n+\tif (internal_cfg->mem_file[0])\n+\t\tfree(internal_cfg->mem_file[0]);\n \n \treturn 0;\n }\n@@ -1999,6 +2002,26 @@ eal_check_common_options(struct internal_config *internal_cfg)\n \t\t\t\"amount of reserved memory can be adjusted with \"\n \t\t\t\"-m or --\"OPT_SOCKET_MEM\"\\n\");\n \t}\n+\tif (internal_cfg->mem_file[0] && internal_conf->legacy_mem) {\n+\t\tRTE_LOG(ERR, EAL, \"Option --\"OPT_MEM_FILE\" is not compatible \"\n+\t\t\t\t\"with --\"OPT_LEGACY_MEM\"\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (internal_cfg->mem_file[0] && internal_conf->no_hugetlbfs) {\n+\t\tRTE_LOG(ERR, EAL, \"Option --\"OPT_MEM_FILE\" is not compatible \"\n+\t\t\t\t\"with --\"OPT_NO_HUGE\"\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (internal_cfg->mem_file[0] && internal_conf->in_memory) {\n+\t\tRTE_LOG(ERR, EAL, \"Option --\"OPT_MEM_FILE\" is not compatible \"\n+\t\t\t\t\"with --\"OPT_IN_MEMORY\"\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (internal_cfg->mem_file[0] && internal_conf->single_file_segments) {\n+\t\tRTE_LOG(ERR, EAL, \"Option --\"OPT_MEM_FILE\" is not compatible \"\n+\t\t\t\t\"with --\"OPT_SINGLE_FILE_SEGMENTS\"\\n\");\n+\t\treturn -1;\n+\t}\n \n \treturn 0;\n }\ndiff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h\nindex d6c0470eb8..814d5c66e1 100644\n--- a/lib/eal/common/eal_internal_cfg.h\n+++ b/lib/eal/common/eal_internal_cfg.h\n@@ -22,6 +22,9 @@\n #define MAX_HUGEPAGE_SIZES 3  /**< support up to 3 page sizes */\n #endif\n \n+#define MAX_MEMFILE_ITEMS (MAX_HUGEPAGE_SIZES * RTE_MAX_NUMA_NODES)\n+/**< Maximal number of mem-file parameters. */\n+\n /*\n  * internal configuration structure for the number, size and\n  * mount points of hugepages\n@@ -83,6 +86,7 @@ struct internal_config {\n \trte_uuid_t vfio_vf_token;\n \tchar *hugefile_prefix;      /**< the base filename of hugetlbfs files */\n \tchar *hugepage_dir;         /**< specific hugetlbfs directory to use */\n+\tchar *mem_file[MAX_MEMFILE_ITEMS]; /**< pre-allocated memory files */\n \tchar *user_mbuf_pool_ops_name;\n \t\t\t/**< user defined mbuf pool ops name */\n \tunsigned num_hugepage_sizes;      /**< how many sizes on this system */\ndiff --git a/lib/eal/common/eal_memalloc.h b/lib/eal/common/eal_memalloc.h\nindex ebc3a6f6c1..d92c9a167b 100644\n--- a/lib/eal/common/eal_memalloc.h\n+++ b/lib/eal/common/eal_memalloc.h\n@@ -8,7 +8,7 @@\n #include <stdbool.h>\n \n #include <rte_memory.h>\n-\n+#include \"eal_internal_cfg.h\"\n /*\n  * Allocate segment of specified page size.\n  */\n@@ -96,4 +96,10 @@ eal_memalloc_init(void);\n int\n eal_memalloc_cleanup(void);\n \n+int\n+eal_memalloc_memfile_init(void);\n+\n+int\n+eal_memalloc_memfile_alloc(struct hugepage_info *hpa);\n+\n #endif /* EAL_MEMALLOC_H */\ndiff --git a/lib/eal/common/eal_options.h b/lib/eal/common/eal_options.h\nindex 8e4f7202a2..5c012c8125 100644\n--- a/lib/eal/common/eal_options.h\n+++ b/lib/eal/common/eal_options.h\n@@ -87,6 +87,8 @@ enum {\n \tOPT_NO_TELEMETRY_NUM,\n #define OPT_FORCE_MAX_SIMD_BITWIDTH  \"force-max-simd-bitwidth\"\n \tOPT_FORCE_MAX_SIMD_BITWIDTH_NUM,\n+#define OPT_MEM_FILE          \"mem-file\"\n+\tOPT_MEM_FILE_NUM,\n \n \tOPT_LONG_MAX_NUM\n };\ndiff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c\nindex c2c9461f1d..6e71029a3c 100644\n--- a/lib/eal/common/malloc_elem.c\n+++ b/lib/eal/common/malloc_elem.c\n@@ -578,8 +578,13 @@ malloc_elem_free(struct malloc_elem *elem)\n \t/* decrease heap's count of allocated elements */\n \telem->heap->alloc_count--;\n \n+#ifdef MALLOC_DEBUG\n \t/* poison memory */\n \tmemset(ptr, MALLOC_POISON, data_len);\n+#else\n+\tif (!malloc_clear_on_alloc())\n+\t\tmemset(ptr, 0, data_len);\n+#endif\n \n \treturn elem;\n }\ndiff --git a/lib/eal/common/malloc_heap.h b/lib/eal/common/malloc_heap.h\nindex 3a6ec6ecf0..cb1b5a5dd5 100644\n--- a/lib/eal/common/malloc_heap.h\n+++ b/lib/eal/common/malloc_heap.h\n@@ -10,6 +10,7 @@\n \n #include <rte_malloc.h>\n #include <rte_spinlock.h>\n+#include \"eal_private.h\"\n \n /* Number of free lists per heap, grouped by size. */\n #define RTE_HEAP_NUM_FREELISTS  13\n@@ -44,6 +45,13 @@ malloc_get_numa_socket(void)\n \treturn socket_id;\n }\n \n+static inline bool\n+malloc_clear_on_alloc(void)\n+{\n+\tconst struct internal_config *cfg = eal_get_internal_configuration();\n+\treturn cfg->mem_file[0] != NULL;\n+}\n+\n void *\n malloc_heap_alloc(const char *type, size_t size, int socket, unsigned int flags,\n \t\tsize_t align, size_t bound, bool contig);\ndiff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c\nindex 9d39e58c08..ce94268aca 100644\n--- a/lib/eal/common/rte_malloc.c\n+++ b/lib/eal/common/rte_malloc.c\n@@ -113,17 +113,23 @@ rte_malloc(const char *type, size_t size, unsigned align)\n void *\n rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)\n {\n+\tbool zero;\n \tvoid *ptr = rte_malloc_socket(type, size, align, socket);\n \n-#ifdef RTE_MALLOC_DEBUG\n \t/*\n \t * If DEBUG is enabled, then freed memory is marked with poison\n-\t * value and set to zero on allocation.\n-\t * If DEBUG is not enabled then  memory is already zeroed.\n+\t * value and must be set to zero on allocation.\n+\t * If DEBUG is not enabled then it is configurable\n+\t * whether memory comes already set to zero by memalloc or on free\n+\t * or it must be set to zero here.\n \t */\n-\tif (ptr != NULL)\n-\t\tmemset(ptr, 0, size);\n+#ifdef RTE_MALLOC_DEBUG\n+\tzero = true;\n+#else\n+\tzero = malloc_clear_on_alloc();\n #endif\n+\tif (ptr != NULL && zero)\n+\t\tmemset(ptr, 0, size);\n \n \trte_eal_trace_mem_zmalloc(type, size, align, socket, ptr);\n \treturn ptr;\ndiff --git a/lib/eal/include/rte_memory.h b/lib/eal/include/rte_memory.h\nindex 6d018629ae..579358e29e 100644\n--- a/lib/eal/include/rte_memory.h\n+++ b/lib/eal/include/rte_memory.h\n@@ -40,7 +40,9 @@ extern \"C\" {\n /**\n  * Physical memory segment descriptor.\n  */\n-#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)\n+#define RTE_MEMSEG_FLAG_DO_NOT_FREE   (1 << 0)\n+#define RTE_MEMSEG_FLAG_PRE_ALLOCATED (1 << 1)\n+\n /**< Prevent this segment from being freed back to the OS. */\n struct rte_memseg {\n \trte_iova_t iova;            /**< Start IO address. */\ndiff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c\nindex 3577eaeaa4..d0afcd8326 100644\n--- a/lib/eal/linux/eal.c\n+++ b/lib/eal/linux/eal.c\n@@ -548,6 +548,7 @@ eal_usage(const char *prgname)\n \t       \"  --\"OPT_LEGACY_MEM\"        Legacy memory mode (no dynamic allocation, contiguous segments)\\n\"\n \t       \"  --\"OPT_SINGLE_FILE_SEGMENTS\" Put all hugepage memory in single files\\n\"\n \t       \"  --\"OPT_MATCH_ALLOCATIONS\" Free hugepages exactly as allocated\\n\"\n+\t       \"  --\"OPT_MEM_FILE\"          Comma-separated list of files in hugetlbfs.\\n\"\n \t       \"\\n\");\n \t/* Allow the application to print its usage message too if hook is set */\n \tif (hook) {\n@@ -678,6 +679,22 @@ eal_log_level_parse(int argc, char **argv)\n \toptarg = old_optarg;\n }\n \n+static int\n+eal_parse_memfile_arg(const char *arg, char **mem_file)\n+{\n+\tint ret;\n+\n+\tchar *copy = strdup(arg);\n+\tif (copy == NULL) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot store --\"OPT_MEM_FILE\" names\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_strsplit(copy, strlen(copy), mem_file,\n+\t\t\tMAX_MEMFILE_ITEMS, ',');\n+\treturn ret <= 0 ? -1 : 0;\n+}\n+\n /* Parse the argument given in the command line of the application */\n static int\n eal_parse_args(int argc, char **argv)\n@@ -819,6 +836,17 @@ eal_parse_args(int argc, char **argv)\n \t\t\tinternal_conf->match_allocations = 1;\n \t\t\tbreak;\n \n+\t\tcase OPT_MEM_FILE_NUM:\n+\t\t\tif (eal_parse_memfile_arg(optarg,\n+\t\t\t\t\tinternal_conf->mem_file) < 0) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"invalid parameters for --\"\n+\t\t\t\t\t\tOPT_MEM_FILE \"\\n\");\n+\t\t\t\teal_usage(prgname);\n+\t\t\t\tret = -1;\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\t\t\tbreak;\n+\n \t\tdefault:\n \t\t\tif (opt < OPT_LONG_MIN_NUM && isprint(opt)) {\n \t\t\t\tRTE_LOG(ERR, EAL, \"Option %c is not supported \"\ndiff --git a/lib/eal/linux/eal_hugepage_info.c b/lib/eal/linux/eal_hugepage_info.c\nindex 193282e779..dfbb49ada9 100644\n--- a/lib/eal/linux/eal_hugepage_info.c\n+++ b/lib/eal/linux/eal_hugepage_info.c\n@@ -37,6 +37,7 @@\n #include \"eal_hugepages.h\"\n #include \"eal_hugepage_info.h\"\n #include \"eal_filesystem.h\"\n+#include \"eal_memalloc.h\"\n \n static const char sys_dir_path[] = \"/sys/kernel/mm/hugepages\";\n static const char sys_pages_numa_dir_path[] = \"/sys/devices/system/node\";\n@@ -515,6 +516,10 @@ hugepage_info_init(void)\n \tqsort(&internal_conf->hugepage_info[0], num_sizes,\n \t      sizeof(internal_conf->hugepage_info[0]), compare_hpi);\n \n+\t/* add pre-allocated pages with --mem-file option to available ones */\n+\tif (eal_memalloc_memfile_init())\n+\t\treturn -1;\n+\n \t/* now we have all info, check we have at least one valid size */\n \tfor (i = 0; i < num_sizes; i++) {\n \t\t/* pages may no longer all be on socket 0, so check all */\ndiff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c\nindex 0ec8542283..c2b3586204 100644\n--- a/lib/eal/linux/eal_memalloc.c\n+++ b/lib/eal/linux/eal_memalloc.c\n@@ -18,6 +18,7 @@\n #include <unistd.h>\n #include <limits.h>\n #include <fcntl.h>\n+#include <mntent.h>\n #include <sys/ioctl.h>\n #include <sys/time.h>\n #include <signal.h>\n@@ -41,6 +42,7 @@\n #include <rte_spinlock.h>\n \n #include \"eal_filesystem.h\"\n+#include \"eal_hugepage_info.h\"\n #include \"eal_internal_cfg.h\"\n #include \"eal_memalloc.h\"\n #include \"eal_memcfg.h\"\n@@ -102,6 +104,19 @@ static struct {\n \tint count; /**< entries used in an array */\n } fd_list[RTE_MAX_MEMSEG_LISTS];\n \n+struct memfile {\n+\tchar *fname;\t\t/**< file name */\n+\tuint64_t hugepage_sz;\t/**< size of a huge page */\n+\tuint32_t num_pages;\t/**< number of pages */\n+\tuint32_t num_allocated;\t/**< number of already allocated pages */\n+\tint socket_id;\t\t/**< Socket ID  */\n+\tint fd;\t\t\t/**< file descriptor */\n+};\n+\n+struct memfile mem_file[MAX_MEMFILE_ITEMS];\n+\n+static int alloc_memfile;\n+\n /** local copy of a memory map, used to synchronize memory hotplug in MP */\n static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];\n \n@@ -542,6 +557,26 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\t * stage.\n \t\t */\n \t\tmap_offset = 0;\n+\t} else if (alloc_memfile) {\n+\t\tuint32_t mf;\n+\n+\t\tfor (mf = 0; mf < RTE_DIM(mem_file); mf++) {\n+\t\t\tif (alloc_sz == mem_file[mf].hugepage_sz &&\n+\t\t\t    socket_id == mem_file[mf].socket_id &&\n+\t\t\t    mem_file[mf].num_allocated < mem_file[mf].num_pages)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tif (mf >= RTE_DIM(mem_file)) {\n+\t\t\tRTE_LOG(ERR, EAL,\n+\t\t\t\t\"%s() cannot allocate from memfile\\n\",\n+\t\t\t\t__func__);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tfd = mem_file[mf].fd;\n+\t\tfd_list[list_idx].fds[seg_idx] = fd;\n+\t\tmap_offset = mem_file[mf].num_allocated * alloc_sz;\n+\t\tmmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;\n+\t\tmem_file[mf].num_allocated++;\n \t} else {\n \t\t/* takes out a read lock on segment or segment list */\n \t\tfd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);\n@@ -683,6 +718,10 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \tif (fd < 0)\n \t\treturn -1;\n \n+\t/* don't cleanup pre-allocated files */\n+\tif (alloc_memfile)\n+\t\treturn -1;\n+\n \tif (internal_conf->single_file_segments) {\n \t\tresize_hugefile(fd, map_offset, alloc_sz, false);\n \t\t/* ignore failure, can't make it any worse */\n@@ -712,8 +751,9 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,\n \tconst struct internal_config *internal_conf =\n \t\teal_get_internal_configuration();\n \n-\t/* erase page data */\n-\tmemset(ms->addr, 0, ms->len);\n+\t/* Erase page data unless it's pre-allocated files. */\n+\tif (!alloc_memfile)\n+\t\tmemset(ms->addr, 0, ms->len);\n \n \tif (mmap(ms->addr, ms->len, PROT_NONE,\n \t\t\tMAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==\n@@ -724,8 +764,12 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,\n \n \teal_mem_set_dump(ms->addr, ms->len, false);\n \n-\t/* if we're using anonymous hugepages, nothing to be done */\n-\tif (internal_conf->in_memory && !memfd_create_supported) {\n+\t/*\n+\t * if we're using anonymous hugepages or pre-allocated files,\n+\t * nothing to be done\n+\t */\n+\tif ((internal_conf->in_memory && !memfd_create_supported) ||\n+\t\t\talloc_memfile) {\n \t\tmemset(ms, 0, sizeof(*ms));\n \t\treturn 0;\n \t}\n@@ -838,7 +882,9 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)\n \t * during init, we already hold a write lock, so don't try to take out\n \t * another one.\n \t */\n-\tif (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {\n+\tif (wa->hi->lock_descriptor == -1 &&\n+\t    !internal_conf->in_memory &&\n+\t    !alloc_memfile) {\n \t\tdir_fd = open(wa->hi->hugedir, O_RDONLY);\n \t\tif (dir_fd < 0) {\n \t\t\tRTE_LOG(ERR, EAL, \"%s(): Cannot open '%s': %s\\n\",\n@@ -868,7 +914,7 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)\n \t\t\t\tneed, i);\n \n \t\t\t/* if exact number wasn't requested, stop */\n-\t\t\tif (!wa->exact)\n+\t\t\tif (!wa->exact || alloc_memfile)\n \t\t\t\tgoto out;\n \n \t\t\t/* clean up */\n@@ -1120,6 +1166,262 @@ eal_memalloc_free_seg(struct rte_memseg *ms)\n \treturn eal_memalloc_free_seg_bulk(&ms, 1);\n }\n \n+static int\n+memfile_fill_socket_id(struct memfile *mf)\n+{\n+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES\n+\tvoid *va;\n+\tint ret;\n+\n+\tva = mmap(NULL, mf->hugepage_sz, PROT_READ | PROT_WRITE,\n+\t\t\tMAP_SHARED | MAP_POPULATE, mf->fd, 0);\n+\tif (va == MAP_FAILED) {\n+\t\tRTE_LOG(ERR, EAL, \"%s(): %s: mmap(): %s\\n\",\n+\t\t\t\t__func__, mf->fname, strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\tret = 0;\n+\tif (check_numa()) {\n+\t\tif (get_mempolicy(&mf->socket_id, NULL, 0, va,\n+\t\t\t\tMPOL_F_NODE | MPOL_F_ADDR) < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: get_mempolicy(): %s\\n\",\n+\t\t\t\t__func__, mf->fname, strerror(errno));\n+\t\t\tret = -1;\n+\t\t}\n+\t} else\n+\t\tmf->socket_id = 0;\n+\n+\tmunmap(va, mf->hugepage_sz);\n+\treturn ret;\n+#else\n+\tmf->socket_id = 0;\n+\treturn 0;\n+#endif\n+}\n+\n+struct match_memfile_path_arg {\n+\tconst char *path;\n+\tuint64_t file_sz;\n+\tuint64_t hugepage_sz;\n+\tsize_t best_len;\n+};\n+\n+/*\n+ * While it is unlikely for hugetlbfs, mount points can be nested.\n+ * Find the deepest mount point that contains the file.\n+ */\n+static int\n+match_memfile_path(const char *path, uint64_t hugepage_sz, void *cb_arg)\n+{\n+\tstruct match_memfile_path_arg *arg = cb_arg;\n+\tsize_t dir_len = strlen(path);\n+\n+\tif (dir_len < arg->best_len)\n+\t\treturn 0;\n+\tif (strncmp(path, arg->path, dir_len) != 0)\n+\t\treturn 0;\n+\tif (arg->file_sz % hugepage_sz != 0)\n+\t\treturn 0;\n+\n+\targ->hugepage_sz = hugepage_sz;\n+\targ->best_len = dir_len;\n+\treturn 0;\n+}\n+\n+/* Determine hugepage size from the path to a file in hugetlbfs. */\n+static int\n+memfile_fill_hugepage_sz(struct memfile *mf, uint64_t file_sz)\n+{\n+\tchar abspath[PATH_MAX];\n+\tstruct match_memfile_path_arg arg;\n+\n+\tif (realpath(mf->fname, abspath) == NULL) {\n+\t\tRTE_LOG(ERR, EAL, \"%s(): realpath(): %s\\n\",\n+\t\t\t\t__func__, strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\tmemset(&arg, 0, sizeof(arg));\n+\targ.path = abspath;\n+\targ.file_sz = file_sz;\n+\tif (eal_hugepage_mount_walk(match_memfile_path, &arg) == 0 &&\n+\t\t\targ.hugepage_sz != 0) {\n+\t\tmf->hugepage_sz = arg.hugepage_sz;\n+\t\treturn 0;\n+\t}\n+\treturn -1;\n+}\n+\n+int\n+eal_memalloc_memfile_init(void)\n+{\n+\tstruct internal_config *internal_conf =\n+\t\t\teal_get_internal_configuration();\n+\tint err = -1, fd;\n+\tuint32_t i;\n+\n+\tif (internal_conf->mem_file[0] == NULL)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < RTE_DIM(internal_conf->mem_file); i++) {\n+\t\tstruct memfile *mf = &mem_file[i];\n+\t\tuint64_t fsize;\n+\n+\t\tif (internal_conf->mem_file[i] == NULL) {\n+\t\t\terr = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t\tmf->fname = internal_conf->mem_file[i];\n+\t\tfd = open(mf->fname, O_RDWR, 0600);\n+\t\tmf->fd = fd;\n+\t\tif (fd < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: open(): %s\\n\",\n+\t\t\t\t\t__func__, mf->fname, strerror(errno));\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* take out a read lock and keep it indefinitely */\n+\t\tif (lock(fd, LOCK_SH) != 1) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: cannot lock file\\n\",\n+\t\t\t\t\t__func__, mf->fname);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tfsize = get_file_size(fd);\n+\t\tif (!fsize) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: zero file length\\n\",\n+\t\t\t\t\t__func__, mf->fname);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (memfile_fill_hugepage_sz(mf, fsize) < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: cannot detect page size\\n\",\n+\t\t\t\t\t__func__, mf->fname);\n+\t\t\tbreak;\n+\t\t}\n+\t\tmf->num_pages = fsize / mf->hugepage_sz;\n+\n+\t\tif (memfile_fill_socket_id(mf) < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: cannot detect NUMA node\\n\",\n+\t\t\t\t\t__func__, mf->fname);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\t/* check if some problem happened */\n+\tif (err && i < RTE_DIM(internal_conf->mem_file)) {\n+\t\t/* some error occurred, do rollback */\n+\t\tdo {\n+\t\t\tfd = mem_file[i].fd;\n+\t\t\t/* closing fd drops the lock */\n+\t\t\tif (fd >= 0)\n+\t\t\t\tclose(fd);\n+\t\t\tmem_file[i].fd = -1;\n+\t\t} while (i--);\n+\t\treturn -1;\n+\t}\n+\n+\t/* update hugepage_info with pages allocated in files */\n+\tfor (i = 0; i < RTE_DIM(mem_file); i++) {\n+\t\tconst struct memfile *mf = &mem_file[i];\n+\t\tstruct hugepage_info *hpi = NULL;\n+\t\tuint64_t sz;\n+\n+\t\tif (!mf->hugepage_sz)\n+\t\t\tbreak;\n+\n+\t\tfor (sz = 0; sz < internal_conf->num_hugepage_sizes; sz++) {\n+\t\t\thpi = &internal_conf->hugepage_info[sz];\n+\n+\t\t\tif (mf->hugepage_sz == hpi->hugepage_sz) {\n+\t\t\t\thpi->num_pages[mf->socket_id] += mf->num_pages;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* it seems hugepage info is not socket aware yet */\n+\t\tif (hpi != NULL && sz >= internal_conf->num_hugepage_sizes)\n+\t\t\thpi->num_pages[0] += mf->num_pages;\n+\t}\n+\treturn 0;\n+}\n+\n+int\n+eal_memalloc_memfile_alloc(struct hugepage_info *hpa)\n+{\n+\tstruct internal_config *internal_conf =\n+\t\t\teal_get_internal_configuration();\n+\tuint32_t i, sz;\n+\n+\tif (internal_conf->mem_file[0] == NULL ||\n+\t\t\trte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < RTE_DIM(mem_file); i++) {\n+\t\tstruct memfile *mf = &mem_file[i];\n+\t\tuint64_t hugepage_sz = mf->hugepage_sz;\n+\t\tint socket_id = mf->socket_id;\n+\t\tstruct rte_memseg **pages;\n+\n+\t\tif (!hugepage_sz)\n+\t\t\tbreak;\n+\n+\t\twhile (mf->num_allocated < mf->num_pages) {\n+\t\t\tint needed, allocated, j;\n+\t\t\tuint32_t prev;\n+\n+\t\t\tprev = mf->num_allocated;\n+\t\t\tneeded = mf->num_pages - mf->num_allocated;\n+\t\t\tpages = malloc(sizeof(*pages) * needed);\n+\t\t\tif (pages == NULL)\n+\t\t\t\treturn -1;\n+\n+\t\t\t/* memalloc is locked, it's safe to switch allocator */\n+\t\t\talloc_memfile = 1;\n+\t\t\tallocated = eal_memalloc_alloc_seg_bulk(pages,\n+\t\t\t\t\tneeded, hugepage_sz, socket_id,\tfalse);\n+\t\t\t/* switch allocator back */\n+\t\t\talloc_memfile = 0;\n+\t\t\tif (allocated <= 0) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: allocation failed\\n\",\n+\t\t\t\t\t\t__func__, mf->fname);\n+\t\t\t\tfree(pages);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\t/* mark preallocated pages as unfreeable */\n+\t\t\tfor (j = 0; j < allocated; j++) {\n+\t\t\t\tstruct rte_memseg *ms = pages[j];\n+\n+\t\t\t\tms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE |\n+\t\t\t\t\t     RTE_MEMSEG_FLAG_PRE_ALLOCATED;\n+\t\t\t}\n+\n+\t\t\tfree(pages);\n+\n+\t\t\t/* check whether we allocated from expected file */\n+\t\t\tif (prev + allocated != mf->num_allocated) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"%s(): %s: incorrect allocation\\n\",\n+\t\t\t\t\t\t__func__, mf->fname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* reflect we pre-allocated some memory */\n+\t\tfor (sz = 0; sz < internal_conf->num_hugepage_sizes; sz++) {\n+\t\t\tstruct hugepage_info *hpi = &hpa[sz];\n+\n+\t\t\tif (hpi->hugepage_sz != hugepage_sz)\n+\t\t\t\tcontinue;\n+\t\t\thpi->num_pages[socket_id] -=\n+\t\t\t\t\tRTE_MIN(hpi->num_pages[socket_id],\n+\t\t\t\t\t\tmf->num_allocated);\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n static int\n sync_chunk(struct rte_memseg_list *primary_msl,\n \t\tstruct rte_memseg_list *local_msl, struct hugepage_info *hi,\n@@ -1178,6 +1480,14 @@ sync_chunk(struct rte_memseg_list *primary_msl,\n \t\tif (l_ms == NULL || p_ms == NULL)\n \t\t\treturn -1;\n \n+\t\t/*\n+\t\t * Switch allocator for this segment.\n+\t\t * This function is only called during init,\n+\t\t * so don't try to restore allocator on failure.\n+\t\t */\n+\t\tif (p_ms->flags & RTE_MEMSEG_FLAG_PRE_ALLOCATED)\n+\t\t\talloc_memfile = 1;\n+\n \t\tif (used) {\n \t\t\tret = alloc_seg(l_ms, p_ms->addr,\n \t\t\t\t\tp_ms->socket_id, hi,\n@@ -1191,6 +1501,9 @@ sync_chunk(struct rte_memseg_list *primary_msl,\n \t\t\tif (ret < 0)\n \t\t\t\treturn -1;\n \t\t}\n+\n+\t\t/* Reset the allocator. */\n+\t\talloc_memfile = 0;\n \t}\n \n \t/* if we just allocated memory, notify the application */\n@@ -1392,6 +1705,9 @@ eal_memalloc_sync_with_primary(void)\n \tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n \t\treturn 0;\n \n+\tif (eal_memalloc_memfile_init() < 0)\n+\t\treturn -1;\n+\n \t/* memalloc is locked, so it's safe to call thread-unsafe version */\n \tif (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))\n \t\treturn -1;\n",
    "prefixes": [
        "v6",
        "2/3"
    ]
}