get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/35719/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 35719,
    "url": "https://patches.dpdk.org/api/patches/35719/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/b32aabe8ac819b23944daaa36af196794a0e9ed5.1520428025.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<b32aabe8ac819b23944daaa36af196794a0e9ed5.1520428025.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/b32aabe8ac819b23944daaa36af196794a0e9ed5.1520428025.git.anatoly.burakov@intel.com",
    "date": "2018-03-07T16:56:29",
    "name": "[dpdk-dev,v2,01/41] eal: move get_virtual_area out of linuxapp eal_memory.c",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "605562b20589506af38a1bf745165c91df0141df",
    "submitter": {
        "id": 4,
        "url": "https://patches.dpdk.org/api/people/4/?format=api",
        "name": "Burakov, Anatoly",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/b32aabe8ac819b23944daaa36af196794a0e9ed5.1520428025.git.anatoly.burakov@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/35719/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/35719/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 59E1A5F57;\n\tWed,  7 Mar 2018 17:57:16 +0100 (CET)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id 64EE24C8F\n\tfor <dev@dpdk.org>; Wed,  7 Mar 2018 17:57:14 +0100 (CET)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t07 Mar 2018 08:57:13 -0800",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby fmsmga004.fm.intel.com with ESMTP; 07 Mar 2018 08:57:10 -0800",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tw27Gv9M1032343; Wed, 7 Mar 2018 16:57:09 GMT",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id w27Gv9pg006617;\n\tWed, 7 Mar 2018 16:57:09 GMT",
            "(from aburakov@localhost)\n\tby sivswdev01.ir.intel.com with LOCAL id w27Gv9BF006613;\n\tWed, 7 Mar 2018 16:57:09 GMT"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.47,436,1515484800\"; d=\"scan'208\";a=\"35368066\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "keith.wiles@intel.com, jianfeng.tan@intel.com, andras.kovacs@ericsson.com,\n\tlaszlo.vadkeri@ericsson.com, benjamin.walker@intel.com,\n\tbruce.richardson@intel.com, thomas@monjalon.net,\n\tkonstantin.ananyev@intel.com, kuralamudhan.ramakrishnan@intel.com,\n\tlouise.m.daly@intel.com, nelio.laranjeiro@6wind.com,\n\tyskoh@mellanox.com, pepperjo@japf.ch, jerin.jacob@caviumnetworks.com, \n\themant.agrawal@nxp.com, olivier.matz@6wind.com",
        "Date": "Wed,  7 Mar 2018 16:56:29 +0000",
        "Message-Id": "<b32aabe8ac819b23944daaa36af196794a0e9ed5.1520428025.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": [
            "<cover.1520428025.git.anatoly.burakov@intel.com>",
            "<cover.1520428025.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<cover.1520428025.git.anatoly.burakov@intel.com>",
            "<cover.1520083504.git.anatoly.burakov@intel.com>\n\t<cover.1520428025.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v2 01/41] eal: move get_virtual_area out of\n\tlinuxapp eal_memory.c",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Move get_virtual_area out of linuxapp EAL memory and make it\ncommon to EAL, so that other code could reserve virtual areas\nas well.\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n lib/librte_eal/common/eal_common_memory.c | 101 ++++++++++++++++++++++\n lib/librte_eal/common/eal_private.h       |  33 +++++++\n lib/librte_eal/linuxapp/eal/eal_memory.c  | 137 ++++++------------------------\n 3 files changed, 161 insertions(+), 110 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c\nindex 852f3bb..042881b 100644\n--- a/lib/librte_eal/common/eal_common_memory.c\n+++ b/lib/librte_eal/common/eal_common_memory.c\n@@ -2,10 +2,12 @@\n  * Copyright(c) 2010-2014 Intel Corporation\n  */\n \n+#include <errno.h>\n #include <stdio.h>\n #include <stdint.h>\n #include <stdlib.h>\n #include <stdarg.h>\n+#include <string.h>\n #include <unistd.h>\n #include <inttypes.h>\n #include <sys/mman.h>\n@@ -14,12 +16,111 @@\n #include <rte_memory.h>\n #include <rte_eal.h>\n #include <rte_eal_memconfig.h>\n+#include <rte_errno.h>\n #include <rte_log.h>\n \n #include \"eal_private.h\"\n #include \"eal_internal_cfg.h\"\n \n /*\n+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the\n+ * pointer to the mmap'd area and keep *size unmodified. Else, retry\n+ * with a smaller zone: decrease *size by hugepage_sz until it reaches\n+ * 0. In this case, return NULL. Note: this function returns an address\n+ * which is a multiple of hugepage size.\n+ */\n+\n+static uint64_t baseaddr_offset;\n+static uint64_t system_page_sz;\n+\n+void *\n+eal_get_virtual_area(void *requested_addr, uint64_t *size,\n+\t\tuint64_t page_sz, int flags, int mmap_flags)\n+{\n+\tbool addr_is_hint, allow_shrink, unmap, no_align;\n+\tuint64_t map_sz;\n+\tvoid *mapped_addr, *aligned_addr;\n+\n+\tif (system_page_sz == 0)\n+\t\tsystem_page_sz = sysconf(_SC_PAGESIZE);\n+\n+\tmmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;\n+\n+\tRTE_LOG(DEBUG, EAL, \"Ask a virtual area of 0x%zx bytes\\n\", *size);\n+\n+\taddr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;\n+\tallow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;\n+\tunmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;\n+\n+\tif (requested_addr == NULL && internal_config.base_virtaddr != 0) {\n+\t\trequested_addr = (void *) (internal_config.base_virtaddr +\n+\t\t\t\tbaseaddr_offset);\n+\t\trequested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);\n+\t\taddr_is_hint = true;\n+\t}\n+\n+\t/* if requested address is not aligned by page size, or if requested\n+\t * address is NULL, add page size to requested length as we may get an\n+\t * address that's aligned by system page size, which can be smaller than\n+\t * our requested page size. additionally, we shouldn't try to align if\n+\t * system page size is the same as requested page size.\n+\t */\n+\tno_align = (requested_addr != NULL &&\n+\t\t((uintptr_t)requested_addr & (page_sz - 1)) == 0) ||\n+\t\tpage_sz == system_page_sz;\n+\n+\tdo {\n+\t\tmap_sz = no_align ? *size : *size + page_sz;\n+\n+\t\tmapped_addr = mmap(requested_addr, map_sz, PROT_READ,\n+\t\t\t\tmmap_flags, -1, 0);\n+\t\tif (mapped_addr == MAP_FAILED && allow_shrink)\n+\t\t\t*size -= page_sz;\n+\t} while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);\n+\n+\t/* align resulting address - if map failed, we will ignore the value\n+\t * anyway, so no need to add additional checks.\n+\t */\n+\taligned_addr = no_align ? mapped_addr :\n+\t\t\tRTE_PTR_ALIGN(mapped_addr, page_sz);\n+\n+\tif (*size == 0) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot get a virtual area of any size: %s\\n\",\n+\t\t\tstrerror(errno));\n+\t\trte_errno = errno;\n+\t\treturn NULL;\n+\t} else if (mapped_addr == MAP_FAILED) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot get a virtual area: %s\\n\",\n+\t\t\tstrerror(errno));\n+\t\t/* pass errno up the call chain */\n+\t\trte_errno = errno;\n+\t\treturn NULL;\n+\t} else if (requested_addr != NULL && !addr_is_hint &&\n+\t\t\taligned_addr != requested_addr) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot get a virtual area at requested address: %p (got %p)\\n\",\n+\t\t\trequested_addr, aligned_addr);\n+\t\tmunmap(mapped_addr, map_sz);\n+\t\trte_errno = EADDRNOTAVAIL;\n+\t\treturn NULL;\n+\t} else if (requested_addr != NULL && addr_is_hint &&\n+\t\t\taligned_addr != requested_addr) {\n+\t\tRTE_LOG(WARNING, EAL, \"WARNING! Base virtual address hint (%p != %p) not respected!\\n\",\n+\t\t\trequested_addr, aligned_addr);\n+\t\tRTE_LOG(WARNING, EAL, \"   This may cause issues with mapping memory into secondary processes\\n\");\n+\t}\n+\n+\tif (unmap)\n+\t\tmunmap(mapped_addr, map_sz);\n+\n+\tRTE_LOG(DEBUG, EAL, \"Virtual area found at %p (size = 0x%zx)\\n\",\n+\t\taligned_addr, *size);\n+\n+\tbaseaddr_offset += *size;\n+\n+\treturn aligned_addr;\n+}\n+\n+/*\n  * Return a pointer to a read-only table of struct rte_physmem_desc\n  * elements, containing the layout of all addressable physical\n  * memory. The last element of the table contains a NULL address.\ndiff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h\nindex 0b28770..96cebb7 100644\n--- a/lib/librte_eal/common/eal_private.h\n+++ b/lib/librte_eal/common/eal_private.h\n@@ -127,6 +127,39 @@ int rte_eal_alarm_init(void);\n int rte_eal_check_module(const char *module_name);\n \n /**\n+ * Get virtual area of specified size from the OS.\n+ *\n+ * This function is private to the EAL.\n+ *\n+ * @param requested_addr\n+ *   Address where to request address space.\n+ * @param size\n+ *   Size of requested area.\n+ * @param page_sz\n+ *   Page size on which to align requested virtual area.\n+ * @param flags\n+ *   EAL_VIRTUAL_AREA_* flags.\n+ * @param mmap_flags\n+ *   Extra flags passed directly to mmap().\n+ *\n+ * @return\n+ *   Virtual area address if successful.\n+ *   NULL if unsuccessful.\n+ */\n+\n+#define EAL_VIRTUAL_AREA_ADDR_IS_HINT (1 << 0)\n+/**< don't fail if cannot get exact requested address. */\n+#define EAL_VIRTUAL_AREA_ALLOW_SHRINK (1 << 1)\n+/**< try getting smaller sized (decrement by page size) virtual areas if cannot\n+ * get area of requested size.\n+ */\n+#define EAL_VIRTUAL_AREA_UNMAP (1 << 2)\n+/**< immediately unmap reserved virtual area. */\n+void *\n+eal_get_virtual_area(void *requested_addr, uint64_t *size,\n+\t\tuint64_t page_sz, int flags, int mmap_flags);\n+\n+/**\n  * Get cpu core_id.\n  *\n  * This function is private to the EAL.\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c\nindex 38853b7..5c11d77 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memory.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c\n@@ -28,6 +28,7 @@\n #include <numaif.h>\n #endif\n \n+#include <rte_errno.h>\n #include <rte_log.h>\n #include <rte_memory.h>\n #include <rte_launch.h>\n@@ -57,8 +58,6 @@\n  * zone as well as a physical contiguous zone.\n  */\n \n-static uint64_t baseaddr_offset;\n-\n static bool phys_addrs_available = true;\n \n #define RANDOMIZE_VA_SPACE_FILE \"/proc/sys/kernel/randomize_va_space\"\n@@ -221,82 +220,6 @@ aslr_enabled(void)\n \t}\n }\n \n-/*\n- * Try to mmap *size bytes in /dev/zero. If it is successful, return the\n- * pointer to the mmap'd area and keep *size unmodified. Else, retry\n- * with a smaller zone: decrease *size by hugepage_sz until it reaches\n- * 0. In this case, return NULL. Note: this function returns an address\n- * which is a multiple of hugepage size.\n- */\n-static void *\n-get_virtual_area(size_t *size, size_t hugepage_sz)\n-{\n-\tvoid *addr;\n-\tvoid *addr_hint;\n-\tint fd;\n-\tlong aligned_addr;\n-\n-\tif (internal_config.base_virtaddr != 0) {\n-\t\tint page_size = sysconf(_SC_PAGE_SIZE);\n-\t\taddr_hint = (void *) (uintptr_t)\n-\t\t\t(internal_config.base_virtaddr + baseaddr_offset);\n-\t\taddr_hint = RTE_PTR_ALIGN_FLOOR(addr_hint, page_size);\n-\t} else {\n-\t\taddr_hint = NULL;\n-\t}\n-\n-\tRTE_LOG(DEBUG, EAL, \"Ask a virtual area of 0x%zx bytes\\n\", *size);\n-\n-\n-\tfd = open(\"/dev/zero\", O_RDONLY);\n-\tif (fd < 0){\n-\t\tRTE_LOG(ERR, EAL, \"Cannot open /dev/zero\\n\");\n-\t\treturn NULL;\n-\t}\n-\tdo {\n-\t\taddr = mmap(addr_hint, (*size) + hugepage_sz, PROT_READ,\n-#ifdef RTE_ARCH_PPC_64\n-\t\t\t\tMAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,\n-#else\n-\t\t\t\tMAP_PRIVATE,\n-#endif\n-\t\t\t\tfd, 0);\n-\t\tif (addr == MAP_FAILED) {\n-\t\t\t*size -= hugepage_sz;\n-\t\t} else if (addr_hint != NULL && addr != addr_hint) {\n-\t\t\tRTE_LOG(WARNING, EAL, \"WARNING! Base virtual address \"\n-\t\t\t\t\"hint (%p != %p) not respected!\\n\",\n-\t\t\t\taddr_hint, addr);\n-\t\t\tRTE_LOG(WARNING, EAL, \"   This may cause issues with \"\n-\t\t\t\t\"mapping memory into secondary processes\\n\");\n-\t\t}\n-\t} while (addr == MAP_FAILED && *size > 0);\n-\n-\tif (addr == MAP_FAILED) {\n-\t\tclose(fd);\n-\t\tRTE_LOG(ERR, EAL, \"Cannot get a virtual area: %s\\n\",\n-\t\t\tstrerror(errno));\n-\t\treturn NULL;\n-\t}\n-\n-\tmunmap(addr, (*size) + hugepage_sz);\n-\tclose(fd);\n-\n-\t/* align addr to a huge page size boundary */\n-\taligned_addr = (long)addr;\n-\taligned_addr += (hugepage_sz - 1);\n-\taligned_addr &= (~(hugepage_sz - 1));\n-\taddr = (void *)(aligned_addr);\n-\n-\tRTE_LOG(DEBUG, EAL, \"Virtual area found at %p (size = 0x%zx)\\n\",\n-\t\taddr, *size);\n-\n-\t/* increment offset */\n-\tbaseaddr_offset += *size;\n-\n-\treturn addr;\n-}\n-\n static sigjmp_buf huge_jmpenv;\n \n static void huge_sigbus_handler(int signo __rte_unused)\n@@ -445,7 +368,16 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,\n \t\t\t/* get the biggest virtual memory area up to\n \t\t\t * vma_len. If it fails, vma_addr is NULL, so\n \t\t\t * let the kernel provide the address. */\n-\t\t\tvma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);\n+\t\t\tvma_addr = eal_get_virtual_area(NULL, &vma_len,\n+\t\t\t\t\thpi->hugepage_sz,\n+\t\t\t\t\tEAL_VIRTUAL_AREA_ALLOW_SHRINK |\n+\t\t\t\t\tEAL_VIRTUAL_AREA_UNMAP,\n+#ifdef RTE_ARCH_PPC_64\n+\t\t\t\t\tMAP_HUGETLB\n+#else\n+\t\t\t\t\t0\n+#endif\n+\t\t\t\t\t);\n \t\t\tif (vma_addr == NULL)\n \t\t\t\tvma_len = hugepage_sz;\n \t\t}\n@@ -1339,7 +1271,7 @@ rte_eal_hugepage_attach(void)\n \tunsigned i, s = 0; /* s used to track the segment number */\n \tunsigned max_seg = RTE_MAX_MEMSEG;\n \toff_t size = 0;\n-\tint fd, fd_zero = -1, fd_hugepage = -1;\n+\tint fd, fd_hugepage = -1;\n \n \tif (aslr_enabled() > 0) {\n \t\tRTE_LOG(WARNING, EAL, \"WARNING: Address Space Layout Randomization \"\n@@ -1350,11 +1282,6 @@ rte_eal_hugepage_attach(void)\n \n \ttest_phys_addrs_available();\n \n-\tfd_zero = open(\"/dev/zero\", O_RDONLY);\n-\tif (fd_zero < 0) {\n-\t\tRTE_LOG(ERR, EAL, \"Could not open /dev/zero\\n\");\n-\t\tgoto error;\n-\t}\n \tfd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);\n \tif (fd_hugepage < 0) {\n \t\tRTE_LOG(ERR, EAL, \"Could not open %s\\n\", eal_hugepage_info_path());\n@@ -1364,6 +1291,8 @@ rte_eal_hugepage_attach(void)\n \t/* map all segments into memory to make sure we get the addrs */\n \tfor (s = 0; s < RTE_MAX_MEMSEG; ++s) {\n \t\tvoid *base_addr;\n+\t\tuint64_t mmap_sz;\n+\t\tint mmap_flags = 0;\n \n \t\t/*\n \t\t * the first memory segment with len==0 is the one that\n@@ -1372,35 +1301,26 @@ rte_eal_hugepage_attach(void)\n \t\tif (mcfg->memseg[s].len == 0)\n \t\t\tbreak;\n \n-\t\t/*\n-\t\t * fdzero is mmapped to get a contiguous block of virtual\n-\t\t * addresses of the appropriate memseg size.\n-\t\t * use mmap to get identical addresses as the primary process.\n+\t\t/* get identical addresses as the primary process.\n \t\t */\n-\t\tbase_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,\n-\t\t\t\t PROT_READ,\n #ifdef RTE_ARCH_PPC_64\n-\t\t\t\t MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,\n-#else\n-\t\t\t\t MAP_PRIVATE,\n+\t\tmmap_flags |= MAP_HUGETLB;\n #endif\n-\t\t\t\t fd_zero, 0);\n-\t\tif (base_addr == MAP_FAILED ||\n-\t\t    base_addr != mcfg->memseg[s].addr) {\n+\t\tmmap_sz = mcfg->memseg[s].len;\n+\t\tbase_addr = eal_get_virtual_area(mcfg->memseg[s].addr,\n+\t\t\t\t&mmap_sz, mcfg->memseg[s].hugepage_sz, 0,\n+\t\t\t\tmmap_flags);\n+\t\tif (base_addr == NULL) {\n \t\t\tmax_seg = s;\n-\t\t\tif (base_addr != MAP_FAILED) {\n-\t\t\t\t/* errno is stale, don't use */\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes \"\n-\t\t\t\t\t\"in /dev/zero at [%p], got [%p] - \"\n-\t\t\t\t\t\"please use '--base-virtaddr' option\\n\",\n+\t\t\tif (rte_errno == EADDRNOTAVAIL) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\\n\",\n \t\t\t\t\t(unsigned long long)mcfg->memseg[s].len,\n-\t\t\t\t\tmcfg->memseg[s].addr, base_addr);\n-\t\t\t\tmunmap(base_addr, mcfg->memseg[s].len);\n+\t\t\t\t\tmcfg->memseg[s].addr);\n \t\t\t} else {\n-\t\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes \"\n-\t\t\t\t\t\"in /dev/zero at [%p]: '%s'\\n\",\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes at [%p]: '%s'\\n\",\n \t\t\t\t\t(unsigned long long)mcfg->memseg[s].len,\n-\t\t\t\t\tmcfg->memseg[s].addr, strerror(errno));\n+\t\t\t\t\tmcfg->memseg[s].addr,\n+\t\t\t\t\trte_strerror(rte_errno));\n \t\t\t}\n \t\t\tif (aslr_enabled() > 0) {\n \t\t\t\tRTE_LOG(ERR, EAL, \"It is recommended to \"\n@@ -1465,7 +1385,6 @@ rte_eal_hugepage_attach(void)\n \t}\n \t/* unmap the hugepage config file, since we are done using it */\n \tmunmap(hp, size);\n-\tclose(fd_zero);\n \tclose(fd_hugepage);\n \treturn 0;\n \n@@ -1474,8 +1393,6 @@ rte_eal_hugepage_attach(void)\n \t\tmunmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);\n \tif (hp != NULL && hp != MAP_FAILED)\n \t\tmunmap(hp, size);\n-\tif (fd_zero >= 0)\n-\t\tclose(fd_zero);\n \tif (fd_hugepage >= 0)\n \t\tclose(fd_hugepage);\n \treturn -1;\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "01/41"
    ]
}