get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44270/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44270,
    "url": "http://patches.dpdk.org/api/patches/44270/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/11b27e424ea385b19874d315a18277dc44444ee0.1536072550.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<11b27e424ea385b19874d315a18277dc44444ee0.1536072550.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/11b27e424ea385b19874d315a18277dc44444ee0.1536072550.git.anatoly.burakov@intel.com",
    "date": "2018-09-04T15:02:02",
    "name": "[v2,9/9] mem: support using memfd segments for in-memory mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5b247ff255588e99577b7b35352b57a5e2b86fc5",
    "submitter": {
        "id": 4,
        "url": "http://patches.dpdk.org/api/people/4/?format=api",
        "name": "Burakov, Anatoly",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/11b27e424ea385b19874d315a18277dc44444ee0.1536072550.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 1176,
            "url": "http://patches.dpdk.org/api/series/1176/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1176",
            "date": "2018-09-04T15:01:53",
            "name": "Improve running DPDK without hugetlbfs mounpoint",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/1176/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44270/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/44270/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 097834F94;\n\tTue,  4 Sep 2018 17:02:16 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n\tby dpdk.org (Postfix) with ESMTP id B3DF42C4F\n\tfor <dev@dpdk.org>; Tue,  4 Sep 2018 17:02:11 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t04 Sep 2018 08:02:10 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby orsmga004.jf.intel.com with ESMTP; 04 Sep 2018 08:02:04 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tw84F23uj020033; Tue, 4 Sep 2018 16:02:03 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id w84F23rk008602;\n\tTue, 4 Sep 2018 16:02:03 +0100",
            "(from aburakov@localhost)\n\tby sivswdev01.ir.intel.com with LOCAL id w84F23r1008598;\n\tTue, 4 Sep 2018 16:02:03 +0100"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.53,329,1531810800\"; d=\"scan'208\";a=\"230109391\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "tiwei.bie@intel.com, ray.kinsella@intel.com, zhihong.wang@intel.com,\n\tmaxime.coquelin@redhat.com, kuralamudhan.ramakrishnan@intel.com",
        "Date": "Tue,  4 Sep 2018 16:02:02 +0100",
        "Message-Id": "<11b27e424ea385b19874d315a18277dc44444ee0.1536072550.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": [
            "<cover.1536072550.git.anatoly.burakov@intel.com>",
            "<cover.1536072550.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<cover.1536072550.git.anatoly.burakov@intel.com>",
            "<cover.1535041359.git.anatoly.burakov@intel.com>\n\t<cover.1536072550.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v2 9/9] mem: support using memfd segments for\n\tin-memory mode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Enable using memfd-created segments if supported by the system.\n\nThis will allow having real fd's for pages but without hugetlbfs\nmounts, which will enable in-memory mode to be used with virtio.\n\nThe implementation is mostly piggy-backing on existing real-fd\ncode, except that we no longer need to unlink any files or track\nper-page locks in single-file segments mode, because in-memory\nmode does not support secondary processes anyway.\n\nWe move some checks from EAL command-line parsing code to memalloc\nbecause it is now possible to use single-file segments mode with\nin-memory mode, but only if memfd is supported.\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n lib/librte_eal/common/eal_common_options.c |   6 +-\n lib/librte_eal/linuxapp/eal/eal_memalloc.c | 265 ++++++++++++++++++---\n 2 files changed, 235 insertions(+), 36 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c\nindex 873099acc..ddd624110 100644\n--- a/lib/librte_eal/common/eal_common_options.c\n+++ b/lib/librte_eal/common/eal_common_options.c\n@@ -1384,10 +1384,10 @@ eal_check_common_options(struct internal_config *internal_cfg)\n \t\t\t\" is only supported in non-legacy memory mode\\n\");\n \t}\n \tif (internal_cfg->single_file_segments &&\n-\t\t\tinternal_cfg->hugepage_unlink) {\n+\t\t\tinternal_cfg->hugepage_unlink &&\n+\t\t\t!internal_cfg->in_memory) {\n \t\tRTE_LOG(ERR, EAL, \"Option --\"OPT_SINGLE_FILE_SEGMENTS\" is \"\n-\t\t\t\"not compatible with neither --\"OPT_IN_MEMORY\" nor \"\n-\t\t\t\"--\"OPT_HUGE_UNLINK\"\\n\");\n+\t\t\t\"not compatible with --\"OPT_HUGE_UNLINK\"\\n\");\n \t\treturn -1;\n \t}\n \tif (internal_cfg->legacy_mem &&\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c\nindex 66e1d87b6..0422cbd8d 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c\n@@ -52,6 +52,23 @@ const int anonymous_hugepages_supported =\n #define RTE_MAP_HUGE_SHIFT 26\n #endif\n \n+/*\n+ * we don't actually care if memfd itself is supported - we only need to check\n+ * if memfd supports hugetlbfs, as that already implies memfd support.\n+ *\n+ * also, this is not a constant, because while we may be *compiled* with memfd\n+ * hugetlbfs support, we might not be *running* on a system that supports memfd\n+ * and/or memfd with hugetlbfs, so we need to be able to adjust this flag at\n+ * runtime, and fall back to anonymous memory.\n+ */\n+int memfd_create_supported =\n+#ifdef MFD_HUGETLB\n+#define MEMFD_SUPPORTED\n+\t\t1;\n+#else\n+\t\t0;\n+#endif\n+\n /*\n  * not all kernel version support fallocate on hugetlbfs, so fall back to\n  * ftruncate and disallow deallocation if fallocate is not supported.\n@@ -191,6 +208,31 @@ get_file_size(int fd)\n \treturn st.st_size;\n }\n \n+static inline uint32_t\n+bsf64(uint64_t v)\n+{\n+\treturn (uint32_t)__builtin_ctzll(v);\n+}\n+\n+static inline uint32_t\n+log2_u64(uint64_t v)\n+{\n+\tif (v == 0)\n+\t\treturn 0;\n+\tv = rte_align64pow2(v);\n+\treturn bsf64(v);\n+}\n+\n+static int\n+pagesz_flags(uint64_t page_sz)\n+{\n+\t/* as per mmap() manpage, all page sizes are log2 of page size\n+\t * shifted by MAP_HUGE_SHIFT\n+\t */\n+\tint log2 = log2_u64(page_sz);\n+\treturn log2 << RTE_MAP_HUGE_SHIFT;\n+}\n+\n /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */\n static int lock(int fd, int type)\n {\n@@ -287,12 +329,64 @@ static int unlock_segment(int list_idx, int seg_idx)\n \treturn 0;\n }\n \n+static int\n+get_seg_memfd(struct hugepage_info *hi __rte_unused,\n+\t\tunsigned int list_idx __rte_unused,\n+\t\tunsigned int seg_idx __rte_unused)\n+{\n+#ifdef MEMFD_SUPPORTED\n+\tint fd;\n+\tchar segname[250]; /* as per manpage, limit is 249 bytes plus null */\n+\n+\tif (internal_config.single_file_segments) {\n+\t\tfd = fd_list[list_idx].memseg_list_fd;\n+\n+\t\tif (fd < 0) {\n+\t\t\tint flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);\n+\n+\t\t\tsnprintf(segname, sizeof(segname), \"seg_%i\", list_idx);\n+\t\t\tfd = memfd_create(segname, flags);\n+\t\t\tif (fd < 0) {\n+\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): memfd create failed: %s\\n\",\n+\t\t\t\t\t__func__, strerror(errno));\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tfd_list[list_idx].memseg_list_fd = fd;\n+\t\t}\n+\t} else {\n+\t\tfd = fd_list[list_idx].fds[seg_idx];\n+\n+\t\tif (fd < 0) {\n+\t\t\tint flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);\n+\n+\t\t\tsnprintf(segname, sizeof(segname), \"seg_%i-%i\",\n+\t\t\t\t\tlist_idx, seg_idx);\n+\t\t\tfd = memfd_create(segname, flags);\n+\t\t\tif (fd < 0) {\n+\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): memfd create failed: %s\\n\",\n+\t\t\t\t\t__func__, strerror(errno));\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tfd_list[list_idx].fds[seg_idx] = fd;\n+\t\t}\n+\t}\n+\treturn fd;\n+#endif\n+\treturn -1;\n+}\n+\n static int\n get_seg_fd(char *path, int buflen, struct hugepage_info *hi,\n \t\tunsigned int list_idx, unsigned int seg_idx)\n {\n \tint fd;\n \n+\t/* for in-memory mode, we only make it here when we're sure we support\n+\t * memfd, and this is a special case.\n+\t */\n+\tif (internal_config.in_memory)\n+\t\treturn get_seg_memfd(hi, list_idx, seg_idx);\n+\n \tif (internal_config.single_file_segments) {\n \t\t/* create a hugepage file path */\n \t\teal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);\n@@ -347,6 +441,33 @@ resize_hugefile(int fd, char *path, int list_idx, int seg_idx,\n \t\tuint64_t fa_offset, uint64_t page_sz, bool grow)\n {\n \tbool again = false;\n+\n+\t/* in-memory mode is a special case, because we don't need to perform\n+\t * any locking, and we can be sure that fallocate() is supported.\n+\t */\n+\tif (internal_config.in_memory) {\n+\t\tint flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |\n+\t\t\t\tFALLOC_FL_KEEP_SIZE;\n+\t\tint ret;\n+\n+\t\t/* grow or shrink the file */\n+\t\tret = fallocate(fd, flags, fa_offset, page_sz);\n+\n+\t\tif (ret < 0) {\n+\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): fallocate() failed: %s\\n\",\n+\t\t\t\t\t__func__,\n+\t\t\t\t\tstrerror(errno));\n+\t\t\treturn -1;\n+\t\t}\n+\t\t/* increase/decrease total segment count */\n+\t\tfd_list[list_idx].count += (grow ? 1 : -1);\n+\t\tif (!grow && fd_list[list_idx].count == 0) {\n+\t\t\tclose(fd_list[list_idx].memseg_list_fd);\n+\t\t\tfd_list[list_idx].memseg_list_fd = -1;\n+\t\t}\n+\t\treturn 0;\n+\t}\n+\n \tdo {\n \t\tif (fallocate_supported == 0) {\n \t\t\t/* we cannot deallocate memory if fallocate() is not\n@@ -496,26 +617,34 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \tvoid *new_addr;\n \n \talloc_sz = hi->hugepage_sz;\n-\tif (!internal_config.single_file_segments &&\n-\t\t\tinternal_config.in_memory &&\n-\t\t\tanonymous_hugepages_supported) {\n-\t\tint log2, flags;\n-\n-\t\tlog2 = rte_log2_u32(alloc_sz);\n-\t\t/* as per mmap() manpage, all page sizes are log2 of page size\n-\t\t * shifted by MAP_HUGE_SHIFT\n-\t\t */\n-\t\tflags = (log2 << RTE_MAP_HUGE_SHIFT) | MAP_HUGETLB | MAP_FIXED |\n+\n+\t/* these are checked at init, but code analyzers don't know that */\n+\tif (internal_config.in_memory && !anonymous_hugepages_supported) {\n+\t\tRTE_LOG(ERR, EAL, \"Anonymous hugepages not supported, in-memory mode cannot allocate memory\\n\");\n+\t\treturn -1;\n+\t}\n+\tif (internal_config.in_memory && !memfd_create_supported &&\n+\t\t\tinternal_config.single_file_segments) {\n+\t\tRTE_LOG(ERR, EAL, \"Single-file segments are not supported without memfd support\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* in-memory without memfd is a special case */\n+\tint mmap_flags;\n+\n+\tif (internal_config.in_memory && !memfd_create_supported) {\n+\t\tint pagesz_flag, flags;\n+\n+\t\tpagesz_flag = pagesz_flags(alloc_sz);\n+\t\tflags = pagesz_flag | MAP_HUGETLB | MAP_FIXED |\n \t\t\t\tMAP_PRIVATE | MAP_ANONYMOUS;\n \t\tfd = -1;\n-\t\tva = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, flags, -1, 0);\n+\t\tmmap_flags = flags;\n \n-\t\t/* single-file segments codepath will never be active because\n-\t\t * in-memory mode is incompatible with it and it's stopped at\n-\t\t * EAL initialization stage, however the compiler doesn't know\n-\t\t * that and complains about map_offset being used uninitialized\n-\t\t * on failure codepaths while having in-memory mode enabled. so,\n-\t\t * assign a value here.\n+\t\t/* single-file segments codepath will never be active\n+\t\t * here because in-memory mode is incompatible with the\n+\t\t * fallback path, and it's stopped at EAL initialization\n+\t\t * stage.\n \t\t */\n \t\tmap_offset = 0;\n \t} else {\n@@ -539,7 +668,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\t\t\t\t__func__, strerror(errno));\n \t\t\t\tgoto resized;\n \t\t\t}\n-\t\t\tif (internal_config.hugepage_unlink) {\n+\t\t\tif (internal_config.hugepage_unlink &&\n+\t\t\t\t\t!internal_config.in_memory) {\n \t\t\t\tif (unlink(path)) {\n \t\t\t\t\tRTE_LOG(DEBUG, EAL, \"%s(): unlink() failed: %s\\n\",\n \t\t\t\t\t\t__func__, strerror(errno));\n@@ -547,16 +677,16 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,\n \t\t\t\t}\n \t\t\t}\n \t\t}\n-\n-\t\t/*\n-\t\t * map the segment, and populate page tables, the kernel fills\n-\t\t * this segment with zeros if it's a new page.\n-\t\t */\n-\t\tva = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,\n-\t\t\t\tMAP_SHARED | MAP_POPULATE | MAP_FIXED, fd,\n-\t\t\t\tmap_offset);\n+\t\tmmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;\n \t}\n \n+\t/*\n+\t * map the segment, and populate page tables, the kernel fills\n+\t * this segment with zeros if it's a new page.\n+\t */\n+\tva = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, mmap_flags, fd,\n+\t\t\tmap_offset);\n+\n \tif (va == MAP_FAILED) {\n \t\tRTE_LOG(DEBUG, EAL, \"%s(): mmap() failed: %s\\n\", __func__,\n \t\t\tstrerror(errno));\n@@ -663,7 +793,8 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,\n {\n \tuint64_t map_offset;\n \tchar path[PATH_MAX];\n-\tint fd, ret;\n+\tint fd, ret = 0;\n+\tbool exit_early;\n \n \t/* erase page data */\n \tmemset(ms->addr, 0, ms->len);\n@@ -675,8 +806,17 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,\n \t\treturn -1;\n \t}\n \n+\texit_early = false;\n+\n+\t/* if we're using anonymous hugepages, nothing to be done */\n+\tif (internal_config.in_memory && !memfd_create_supported)\n+\t\texit_early = true;\n+\n \t/* if we've already unlinked the page, nothing needs to be done */\n-\tif (internal_config.hugepage_unlink) {\n+\tif (!internal_config.in_memory && internal_config.hugepage_unlink)\n+\t\texit_early = true;\n+\n+\tif (exit_early) {\n \t\tmemset(ms, 0, sizeof(*ms));\n \t\treturn 0;\n \t}\n@@ -699,11 +839,13 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,\n \t\t/* if we're able to take out a write lock, we're the last one\n \t\t * holding onto this page.\n \t\t */\n-\t\tret = lock(fd, LOCK_EX);\n-\t\tif (ret >= 0) {\n-\t\t\t/* no one else is using this page */\n-\t\t\tif (ret == 1)\n-\t\t\t\tunlink(path);\n+\t\tif (!internal_config.in_memory) {\n+\t\t\tret = lock(fd, LOCK_EX);\n+\t\t\tif (ret >= 0) {\n+\t\t\t\t/* no one else is using this page */\n+\t\t\t\tif (ret == 1)\n+\t\t\t\t\tunlink(path);\n+\t\t\t}\n \t\t}\n \t\t/* closing fd will drop the lock */\n \t\tclose(fd);\n@@ -1406,6 +1548,35 @@ eal_memalloc_get_seg_fd(int list_idx, int seg_idx)\n \treturn fd;\n }\n \n+static int\n+test_memfd_create(void)\n+{\n+#ifdef MEMFD_SUPPORTED\n+\tunsigned int i;\n+\tfor (i = 0; i < internal_config.num_hugepage_sizes; i++) {\n+\t\tuint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;\n+\t\tint pagesz_flag = pagesz_flags(pagesz);\n+\t\tint flags;\n+\n+\t\tflags = pagesz_flag | MFD_HUGETLB;\n+\t\tint fd = memfd_create(\"test\", flags);\n+\t\tif (fd < 0) {\n+\t\t\t/* we failed - let memalloc know this isn't working */\n+\t\t\tif (errno == EINVAL) {\n+\t\t\t\tmemfd_create_supported = 0;\n+\t\t\t\treturn 0; /* not supported */\n+\t\t\t}\n+\n+\t\t\t/* we got other error - something's wrong */\n+\t\t\treturn -1; /* error */\n+\t\t}\n+\t\tclose(fd);\n+\t\treturn 1; /* supported */\n+\t}\n+#endif\n+\treturn 0; /* not supported */\n+}\n+\n int\n eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)\n {\n@@ -1433,6 +1604,34 @@ eal_memalloc_init(void)\n \tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n \t\tif (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)\n \t\t\treturn -1;\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY &&\n+\t\t\tinternal_config.in_memory) {\n+\t\tint mfd_res = test_memfd_create();\n+\n+\t\tif (mfd_res < 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Unable to check if memfd is supported\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (mfd_res == 1)\n+\t\t\tRTE_LOG(DEBUG, EAL, \"Using memfd for anonymous memory\\n\");\n+\t\telse\n+\t\t\tRTE_LOG(INFO, EAL, \"Using memfd is not supported, falling back to anonymous hugepages\\n\");\n+\n+\t\t/* we only support single-file segments mode with in-memory mode\n+\t\t * if we support hugetlbfs with memfd_create. this code will\n+\t\t * test if we do.\n+\t\t */\n+\t\tif (internal_config.single_file_segments &&\n+\t\t\t\tmfd_res != 1) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Single-file segments mode cannot be used without memfd support\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\t/* this cannot ever happen but better safe than sorry */\n+\t\tif (!anonymous_hugepages_supported) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Using anonymous memory is not supported\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n \n \t/* initialize all of the fd lists */\n \tif (rte_memseg_list_walk(fd_list_create_walk, NULL))\n",
    "prefixes": [
        "v2",
        "9/9"
    ]
}