[dpdk-dev,1/3] eal/linux: add function for checking hugepages within device supported address range
Commit Message
- This is needed for avoiding problems with devices not being able to address
all the physical available memory.
Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
---
lib/librte_eal/common/include/rte_memory.h | 6 ++++++
lib/librte_eal/linuxapp/eal/eal_memory.c | 27 +++++++++++++++++++++++++++
2 files changed, 33 insertions(+)
Comments
On Thu, 12 May 2016 15:33:58 +0100
"Alejandro.Lucero" <alejandro.lucero@netronome.com> wrote:
> - This is needed for avoiding problems with devices not being able to address
> all the physical available memory.
WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#14:
- This is needed for avoiding problems with devices not being able to address
WARNING:LONG_LINE: line over 80 characters
#57: FILE: lib/librte_eal/linuxapp/eal/eal_memory.c:1048:
+ const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
ERROR:SPACING: spaces required around that '=' (ctx:WxV)
#59: FILE: lib/librte_eal/linuxapp/eal/eal_memory.c:1050:
+ int i =0;
^
WARNING:LONG_LINE_STRING: line over 80 characters
#63: FILE: lib/librte_eal/linuxapp/eal/eal_memory.c:1054:
+ RTE_LOG(DEBUG, EAL, "Checking page with address %"PRIx64" and device"
WARNING:LONG_LINE_STRING: line over 80 characters
#66: FILE: lib/librte_eal/linuxapp/eal/eal_memory.c:1057:
+ RTE_LOG(ERR, EAL, "Allocated hugepages are out of device address"
total: 1 errors, 4 warnings, 45 lines checked
Regards
Jan
>
> Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
>
> ---
> lib/librte_eal/common/include/rte_memory.h | 6 ++++++
> lib/librte_eal/linuxapp/eal/eal_memory.c | 27 +++++++++++++++++++++++++++
> 2 files changed, 33 insertions(+)
>
> diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
> index f8dbece..67b0b28 100644
> --- a/lib/librte_eal/common/include/rte_memory.h
> +++ b/lib/librte_eal/common/include/rte_memory.h
> @@ -256,6 +256,12 @@ rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
> }
> #endif
>
> +/**
> + * Check hugepages are within the supported
> + * device address space range.
> + */
> +int rte_eal_hugepage_check_address_mask(uint64_t dma_mask);
> +
> #ifdef __cplusplus
> }
> #endif
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index 5b9132c..2cd046d 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -1037,6 +1037,33 @@ calc_num_pages_per_socket(uint64_t * memory,
> }
>
> /*
> + * Some devices have addressing limitations. A PMD will indirectly call this
> + * function raising an error if any hugepage is out of address range supported.
> + * As hugepages are ordered by physical address, there is nothing to do as
> + * any other hugepage available will be out of range as well.
> + */
> +int
> +rte_eal_hugepage_check_address_mask(uint64_t dma_mask)
> +{
> + const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
> + phys_addr_t physaddr;
> + int i =0;
> +
> + while (i < RTE_MAX_MEMSEG && mcfg->memseg[i].len > 0) {
> + physaddr = mcfg->memseg[i].phys_addr + mcfg->memseg[i].len;
> + RTE_LOG(DEBUG, EAL, "Checking page with address %"PRIx64" and device"
> + " mask 0x%"PRIx64"\n", physaddr, dma_mask);
> + if (physaddr & ~dma_mask) {
> + RTE_LOG(ERR, EAL, "Allocated hugepages are out of device address"
> + " range.");
> + return -1;
> + }
> + i++;
> + }
> + return 0;
> +}
> +
> +/*
> * Prepare physical memory mapping: fill configuration structure with
> * these infos, return 0 on success.
> * 1. map N huge pages in separate files in hugetlbfs
@@ -256,6 +256,12 @@ rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
}
#endif
+/**
+ * Check hugepages are within the supported
+ * device address space range.
+ */
+int rte_eal_hugepage_check_address_mask(uint64_t dma_mask);
+
#ifdef __cplusplus
}
#endif
@@ -1037,6 +1037,33 @@ calc_num_pages_per_socket(uint64_t * memory,
}
/*
+ * Some devices have addressing limitations. A PMD will indirectly call this
+ * function raising an error if any hugepage is out of address range supported.
+ * As hugepages are ordered by physical address, there is nothing to do as
+ * any other hugepage available will be out of range as well.
+ */
+int
+rte_eal_hugepage_check_address_mask(uint64_t dma_mask)
+{
+ const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ phys_addr_t physaddr;
+ int i =0;
+
+ while (i < RTE_MAX_MEMSEG && mcfg->memseg[i].len > 0) {
+ physaddr = mcfg->memseg[i].phys_addr + mcfg->memseg[i].len;
+ RTE_LOG(DEBUG, EAL, "Checking page with address %"PRIx64" and device"
+ " mask 0x%"PRIx64"\n", physaddr, dma_mask);
+ if (physaddr & ~dma_mask) {
+ RTE_LOG(ERR, EAL, "Allocated hugepages are out of device address"
+ " range.");
+ return -1;
+ }
+ i++;
+ }
+ return 0;
+}
+
+/*
* Prepare physical memory mapping: fill configuration structure with
* these infos, return 0 on success.
* 1. map N huge pages in separate files in hugetlbfs