get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/43045/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 43045,
    "url": "http://patches.dpdk.org/api/patches/43045/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/3e5056f09adc3d4613b354e36531cd8103e9a869.1531485955.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<3e5056f09adc3d4613b354e36531cd8103e9a869.1531485955.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/3e5056f09adc3d4613b354e36531cd8103e9a869.1531485955.git.anatoly.burakov@intel.com",
    "date": "2018-07-13T12:48:04",
    "name": "[v3,8/8] mem: support in-memory mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "9cae3e72de980f2477e5ce2e436eab5951c78e16",
    "submitter": {
        "id": 4,
        "url": "http://patches.dpdk.org/api/people/4/?format=api",
        "name": "Burakov, Anatoly",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/3e5056f09adc3d4613b354e36531cd8103e9a869.1531485955.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 564,
            "url": "http://patches.dpdk.org/api/series/564/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=564",
            "date": "2018-07-13T12:47:56",
            "name": "Support running DPDK without hugetlbfs mountpoint",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/564/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/43045/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/43045/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4125B3772;\n\tFri, 13 Jul 2018 14:48:17 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id AE8D32C16\n\tfor <dev@dpdk.org>; Fri, 13 Jul 2018 14:48:09 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t13 Jul 2018 05:48:08 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby orsmga004.jf.intel.com with ESMTP; 13 Jul 2018 05:48:06 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tw6DCm5QD016851; Fri, 13 Jul 2018 13:48:05 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id w6DCm5Uq006294;\n\tFri, 13 Jul 2018 13:48:05 +0100",
            "(from aburakov@localhost)\n\tby sivswdev01.ir.intel.com with LOCAL id w6DCm5kk006290;\n\tFri, 13 Jul 2018 13:48:05 +0100"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,347,1526367600\"; d=\"scan'208\";a=\"215740285\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "ray.kinsella@intel.com, kuralamudhan.ramakrishnan@intel.com,\n\tlouise.m.daly@intel.com, bruce.richardson@intel.com,\n\tferruh.yigit@intel.com, konstantin.ananyev@intel.com, thomas@monjalon.net",
        "Date": "Fri, 13 Jul 2018 13:48:04 +0100",
        "Message-Id": "<3e5056f09adc3d4613b354e36531cd8103e9a869.1531485955.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": [
            "<cover.1531485955.git.anatoly.burakov@intel.com>",
            "<cover.1531485955.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<cover.1531485955.git.anatoly.burakov@intel.com>",
            "<cover.1531477505.git.anatoly.burakov@intel.com>\n\t<cover.1531485955.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v3 8/8] mem: support in-memory mode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Implement the final piece of the in-memory mode puzzle - enable running\nDPDK entirely in memory, without creating any files.\n\nTo do it, use mmap with MAP_HUGETLB and size flags to enable DPDK to work\nwithout hugetlbfs mountpoints. In order to enable this, a few things needed\nto be changed.\n\nFirst of all, we need to allow empty hugetlbfs mountpoints in\nhugepage_info, and handle them correctly (by not trying to create any\nfiles and lock any directories).\n\nNext, we need to reorder the mapping sequence, because the page is not\nreally allocated until the page fault, and we cannot get its IOVA\naddress before we trigger the page fault.\n\nFinally, decide at compile time whether we are going to be supporting\nanonymous hugepages or not, because we cannot check for it at runtime.\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n\nNotes:\n    RFC->v1:\n    - Drop memfd and instead use mmap() with MAP_HUGETLB. This will drop the\n      kernel requirements down to 3.8, and does not impose any restrictions\n      glibc (as far as i known).\n    \n      Unfortunately, there's a bit of an issue with this approach, because\n      mmap() is stupid and will happily ignore unsupported arguments. This\n      means that if the binary were to be compiled on a 3.8+ kernel but run\n      on a pre-3.8 kernel (such as currently supported minimum of 3.2), then\n      most likely the memory would be allocated using regular pages, causing\n      unthinkable performance degradation. No solution to this problem is\n      currently known to me.\n\n .../linuxapp/eal/eal_hugepage_info.c          |  91 ++++++++----\n lib/librte_eal/linuxapp/eal/eal_memalloc.c    | 140 +++++++++++-------\n lib/librte_eal/linuxapp/eal/eal_memory.c      |   3 +-\n 3 files changed, 149 insertions(+), 85 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c\nindex 7f8e2fd9c..3a7d4b222 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c\n@@ -18,6 +18,8 @@\n #include <sys/queue.h>\n #include <sys/stat.h>\n \n+#include <linux/mman.h> /* for hugetlb-related flags */\n+\n #include <rte_memory.h>\n #include <rte_eal.h>\n #include <rte_launch.h>\n@@ -313,11 +315,49 @@ compare_hpi(const void *a, const void *b)\n \treturn hpi_b->hugepage_sz - hpi_a->hugepage_sz;\n }\n \n+static void\n+calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)\n+{\n+\tuint64_t total_pages = 0;\n+\tunsigned int i;\n+\n+\t/*\n+\t * first, try to put all hugepages into relevant sockets, but\n+\t * if first attempts fails, fall back to collecting all pages\n+\t * in one socket and sorting them later\n+\t */\n+\ttotal_pages = 0;\n+\t/* we also don't want to do this for legacy init */\n+\tif (!internal_config.legacy_mem)\n+\t\tfor (i = 0; i < rte_socket_count(); i++) {\n+\t\t\tint socket = rte_socket_id_by_idx(i);\n+\t\t\tunsigned int num_pages =\n+\t\t\t\t\tget_num_hugepages_on_node(\n+\t\t\t\t\t\tdirent->d_name, socket);\n+\t\t\thpi->num_pages[socket] = num_pages;\n+\t\t\ttotal_pages += num_pages;\n+\t\t}\n+\t/*\n+\t * we failed to sort memory from the get go, so fall\n+\t * back to old way\n+\t */\n+\tif (total_pages == 0) {\n+\t\thpi->num_pages[0] = get_num_hugepages(dirent->d_name);\n+\n+#ifndef RTE_ARCH_64\n+\t\t/* for 32-bit systems, limit number of hugepages to\n+\t\t * 1GB per page size */\n+\t\thpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],\n+\t\t\t\tRTE_PGSIZE_1G / hpi->hugepage_sz);\n+#endif\n+\t}\n+}\n+\n static int\n hugepage_info_init(void)\n {\tconst char dirent_start_text[] = \"hugepages-\";\n \tconst size_t dirent_start_len = sizeof(dirent_start_text) - 1;\n-\tunsigned int i, total_pages, num_sizes = 0;\n+\tunsigned int i, num_sizes = 0;\n \tDIR *dir;\n \tstruct dirent *dirent;\n \n@@ -355,6 +395,22 @@ hugepage_info_init(void)\n \t\t\t\t\t\"%\" PRIu64 \" reserved, but no mounted \"\n \t\t\t\t\t\"hugetlbfs found for that size\\n\",\n \t\t\t\t\tnum_pages, hpi->hugepage_sz);\n+\t\t\t/* if we have kernel support for reserving hugepages\n+\t\t\t * through mmap, and we're in in-memory mode, treat this\n+\t\t\t * page size as valid. we cannot be in legacy mode at\n+\t\t\t * this point because we've checked this earlier in the\n+\t\t\t * init process.\n+\t\t\t */\n+#ifdef MAP_HUGE_SHIFT\n+\t\t\tif (internal_config.in_memory) {\n+\t\t\t\tRTE_LOG(DEBUG, EAL, \"In-memory mode enabled, \"\n+\t\t\t\t\t\"hugepages of size %\" PRIu64 \" bytes \"\n+\t\t\t\t\t\"will be allocated anonymously\\n\",\n+\t\t\t\t\thpi->hugepage_sz);\n+\t\t\t\tcalc_num_pages(hpi, dirent);\n+\t\t\t\tnum_sizes++;\n+\t\t\t}\n+#endif\n \t\t\tcontinue;\n \t\t}\n \n@@ -371,35 +427,7 @@ hugepage_info_init(void)\n \t\tif (clear_hugedir(hpi->hugedir) == -1)\n \t\t\tbreak;\n \n-\t\t/*\n-\t\t * first, try to put all hugepages into relevant sockets, but\n-\t\t * if first attempts fails, fall back to collecting all pages\n-\t\t * in one socket and sorting them later\n-\t\t */\n-\t\ttotal_pages = 0;\n-\t\t/* we also don't want to do this for legacy init */\n-\t\tif (!internal_config.legacy_mem)\n-\t\t\tfor (i = 0; i < rte_socket_count(); i++) {\n-\t\t\t\tint socket = rte_socket_id_by_idx(i);\n-\t\t\t\tunsigned int num_pages =\n-\t\t\t\t\t\tget_num_hugepages_on_node(\n-\t\t\t\t\t\t\tdirent->d_name, socket);\n-\t\t\t\thpi->num_pages[socket] = num_pages;\n-\t\t\t\ttotal_pages += num_pages;\n-\t\t\t}\n-\t\t/*\n-\t\t * we failed to sort memory from the get go, so fall\n-\t\t * back to old way\n-\t\t */\n-\t\tif (total_pages == 0)\n-\t\t\thpi->num_pages[0] = get_num_hugepages(dirent->d_name);\n-\n-#ifndef RTE_ARCH_64\n-\t\t/* for 32-bit systems, limit number of hugepages to\n-\t\t * 1GB per page size */\n-\t\thpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],\n-\t\t\t\t\t    RTE_PGSIZE_1G / hpi->hugepage_sz);\n-#endif\n+\t\tcalc_num_pages(hpi, dirent);\n \n \t\tnum_sizes++;\n \t}\n@@ -423,8 +451,7 @@ hugepage_info_init(void)\n \n \t\tfor (j = 0; j < RTE_MAX_NUMA_NODES; j++)\n \t\t\tnum_pages += hpi->num_pages[j];\n-\t\tif (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0 &&\n-\t\t\t\tnum_pages > 0)\n+\t\tif (num_pages > 0)\n \t\t\treturn 0;\n \t}\n \ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c\nindex d610923b8..79443c56a 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c\n@@ -28,6 +28,7 @@\n #include <numaif.h>\n #endif\n #include <linux/falloc.h>\n+#include <linux/mman.h> /* for hugetlb-related mmap flags */\n \n #include <rte_common.h>\n #include <rte_log.h>\n@@ -41,6 +42,15 @@\n #include \"eal_memalloc.h\"\n #include \"eal_private.h\"\n \n+const int anonymous_hugepages_supported =\n+#ifdef MAP_HUGE_SHIFT\n+\t\t1;\n+#define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT\n+#else\n+\t\t0;\n+#define RTE_MAP_HUGE_SHIFT 26\n+#endif\n+\n /*\n  * not all kernel version support fallocate on hugetlbfs, so fall back to\n  * ftruncate and disallow deallocation if fallocate is not supported.\n@@ -461,6 +471,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \tint cur_socket_id = 0;\n #endif\n \tuint64_t map_offset;\n+\trte_iova_t iova;\n+\tvoid *va;\n \tchar path[PATH_MAX];\n \tint ret = 0;\n \tint fd;\n@@ -468,43 +480,66 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \tint flags;\n \tvoid *new_addr;\n \n-\t/* takes out a read lock on segment or segment list */\n-\tfd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);\n-\tif (fd < 0) {\n-\t\tRTE_LOG(ERR, EAL, \"Couldn't get fd on hugepage file\\n\");\n-\t\treturn -1;\n-\t}\n-\n \talloc_sz = hi->hugepage_sz;\n-\tif (internal_config.single_file_segments) {\n-\t\tmap_offset = seg_idx * alloc_sz;\n-\t\tret = resize_hugefile(fd, path, list_idx, seg_idx, map_offset,\n-\t\t\t\talloc_sz, true);\n-\t\tif (ret < 0)\n-\t\t\tgoto resized;\n-\t} else {\n+\tif (internal_config.in_memory && anonymous_hugepages_supported) {\n+\t\tint log2, flags;\n+\n+\t\tlog2 = rte_log2_u32(alloc_sz);\n+\t\t/* as per mmap() manpage, all page sizes are log2 of page size\n+\t\t * shifted by MAP_HUGE_SHIFT\n+\t\t */\n+\t\tflags = (log2 << RTE_MAP_HUGE_SHIFT) | MAP_HUGETLB | MAP_FIXED |\n+\t\t\t\tMAP_PRIVATE | MAP_ANONYMOUS;\n+\t\tfd = -1;\n+\t\tva = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, flags, -1, 0);\n+\n+\t\t/* single-file segments codepath will never be active because\n+\t\t * in-memory mode is incompatible with it and it's stopped at\n+\t\t * EAL initialization stage, however the compiler doesn't know\n+\t\t * that and complains about map_offset being used uninitialized\n+\t\t * on failure codepaths while having in-memory mode enabled. so,\n+\t\t * assign a value here.\n+\t\t */\n \t\tmap_offset = 0;\n-\t\tif (ftruncate(fd, alloc_sz) < 0) {\n-\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): ftruncate() failed: %s\\n\",\n-\t\t\t\t__func__, strerror(errno));\n-\t\t\tgoto resized;\n+\t} else {\n+\t\t/* takes out a read lock on segment or segment list */\n+\t\tfd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);\n+\t\tif (fd < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Couldn't get fd on hugepage file\\n\");\n+\t\t\treturn -1;\n \t\t}\n-\t\tif (internal_config.hugepage_unlink) {\n-\t\t\tif (unlink(path)) {\n-\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): unlink() failed: %s\\n\",\n+\n+\t\tif (internal_config.single_file_segments) {\n+\t\t\tmap_offset = seg_idx * alloc_sz;\n+\t\t\tret = resize_hugefile(fd, path, list_idx, seg_idx,\n+\t\t\t\t\tmap_offset, alloc_sz, true);\n+\t\t\tif (ret < 0)\n+\t\t\t\tgoto resized;\n+\t\t} else {\n+\t\t\tmap_offset = 0;\n+\t\t\tif (ftruncate(fd, alloc_sz) < 0) {\n+\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): ftruncate() failed: %s\\n\",\n \t\t\t\t\t__func__, strerror(errno));\n \t\t\t\tgoto resized;\n \t\t\t}\n+\t\t\tif (internal_config.hugepage_unlink) {\n+\t\t\t\tif (unlink(path)) {\n+\t\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): unlink() failed: %s\\n\",\n+\t\t\t\t\t\t__func__, strerror(errno));\n+\t\t\t\t\tgoto resized;\n+\t\t\t\t}\n+\t\t\t}\n \t\t}\n+\n+\t\t/*\n+\t\t * map the segment, and populate page tables, the kernel fills\n+\t\t * this segment with zeros if it's a new page.\n+\t\t */\n+\t\tva = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,\n+\t\t\t\tMAP_SHARED | MAP_POPULATE | MAP_FIXED, fd,\n+\t\t\t\tmap_offset);\n \t}\n \n-\t/*\n-\t * map the segment, and populate page tables, the kernel fills this\n-\t * segment with zeros if it's a new page.\n-\t */\n-\tvoid *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,\n-\t\t\tMAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);\n-\n \tif (va == MAP_FAILED) {\n \t\tRTE_LOG(DEBUG, EAL, \"%s(): mmap() failed: %s\\n\", __func__,\n \t\t\tstrerror(errno));\n@@ -519,24 +554,6 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\tgoto resized;\n \t}\n \n-\trte_iova_t iova = rte_mem_virt2iova(addr);\n-\tif (iova == RTE_BAD_PHYS_ADDR) {\n-\t\tRTE_LOG(DEBUG, EAL, \"%s(): can't get IOVA addr\\n\",\n-\t\t\t__func__);\n-\t\tgoto mapped;\n-\t}\n-\n-#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES\n-\tmove_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);\n-\n-\tif (cur_socket_id != socket_id) {\n-\t\tRTE_LOG(DEBUG, EAL,\n-\t\t\t\t\"%s(): allocation happened on wrong socket (wanted %d, got %d)\\n\",\n-\t\t\t__func__, socket_id, cur_socket_id);\n-\t\tgoto mapped;\n-\t}\n-#endif\n-\n \t/* In linux, hugetlb limitations, like cgroup, are\n \t * enforced at fault time instead of mmap(), even\n \t * with the option of MAP_POPULATE. Kernel will send\n@@ -549,9 +566,6 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\t\t(unsigned int)(alloc_sz >> 20));\n \t\tgoto mapped;\n \t}\n-\t/* for non-single file segments, we can close fd here */\n-\tif (!internal_config.single_file_segments)\n-\t\tclose(fd);\n \n \t/* we need to trigger a write to the page to enforce page fault and\n \t * ensure that page is accessible to us, but we can't overwrite value\n@@ -560,6 +574,28 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t */\n \t*(volatile int *)addr = *(volatile int *)addr;\n \n+\tiova = rte_mem_virt2iova(addr);\n+\tif (iova == RTE_BAD_PHYS_ADDR) {\n+\t\tRTE_LOG(DEBUG, EAL, \"%s(): can't get IOVA addr\\n\",\n+\t\t\t__func__);\n+\t\tgoto mapped;\n+\t}\n+\n+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES\n+\tmove_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);\n+\n+\tif (cur_socket_id != socket_id) {\n+\t\tRTE_LOG(DEBUG, EAL,\n+\t\t\t\t\"%s(): allocation happened on wrong socket (wanted %d, got %d)\\n\",\n+\t\t\t__func__, socket_id, cur_socket_id);\n+\t\tgoto mapped;\n+\t}\n+#endif\n+\t/* for non-single file segments that aren't in-memory, we can close fd\n+\t * here */\n+\tif (!internal_config.single_file_segments && !internal_config.in_memory)\n+\t\tclose(fd);\n+\n \tms->addr = addr;\n \tms->hugepage_sz = alloc_sz;\n \tms->len = alloc_sz;\n@@ -588,6 +624,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\tRTE_LOG(CRIT, EAL, \"Can't mmap holes in our virtual address space\\n\");\n \t}\n resized:\n+\t/* in-memory mode will never be single-file-segments mode */\n \tif (internal_config.single_file_segments) {\n \t\tresize_hugefile(fd, path, list_idx, seg_idx, map_offset,\n \t\t\t\talloc_sz, false);\n@@ -595,6 +632,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t} else {\n \t\t/* only remove file if we can take out a write lock */\n \t\tif (internal_config.hugepage_unlink == 0 &&\n+\t\t\t\tinternal_config.in_memory == 0 &&\n \t\t\t\tlock(fd, LOCK_EX) == 1)\n \t\t\tunlink(path);\n \t\tclose(fd);\n@@ -705,7 +743,7 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)\n \t * during init, we already hold a write lock, so don't try to take out\n \t * another one.\n \t */\n-\tif (wa->hi->lock_descriptor == -1) {\n+\tif (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {\n \t\tdir_fd = open(wa->hi->hugedir, O_RDONLY);\n \t\tif (dir_fd < 0) {\n \t\t\tRTE_LOG(ERR, EAL, \"%s(): Cannot open '%s': %s\\n\",\n@@ -809,7 +847,7 @@ free_seg_walk(const struct rte_memseg_list *msl, void *arg)\n \t * during init, we already hold a write lock, so don't try to take out\n \t * another one.\n \t */\n-\tif (wa->hi->lock_descriptor == -1) {\n+\tif (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {\n \t\tdir_fd = open(wa->hi->hugedir, O_RDONLY);\n \t\tif (dir_fd < 0) {\n \t\t\tRTE_LOG(ERR, EAL, \"%s(): Cannot open '%s': %s\\n\",\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c\nindex ddfa8b133..dbf19499e 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memory.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c\n@@ -1088,8 +1088,7 @@ get_socket_mem_size(int socket)\n \n \tfor (i = 0; i < internal_config.num_hugepage_sizes; i++){\n \t\tstruct hugepage_info *hpi = &internal_config.hugepage_info[i];\n-\t\tif (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0)\n-\t\t\tsize += hpi->hugepage_sz * hpi->num_pages[socket];\n+\t\tsize += hpi->hugepage_sz * hpi->num_pages[socket];\n \t}\n \n \treturn size;\n",
    "prefixes": [
        "v3",
        "8/8"
    ]
}