[v2] mem: fix undefined behavior in NUMA code
Checks
Commit Message
When NUMA-aware hugepages config option is set, we rely on
libnuma to tell the kernel to allocate hugepages on a specific
NUMA node. However, we allocate node mask before we check if
NUMA is available in the first place, which, according to
the manpage [1], causes undefined behaviour.
Fix by only using nodemask when we have NUMA available.
[1] https://linux.die.net/man/3/numa_alloc_onnode
Bugzilla ID: 20
Fixes: 1b72605d2416 ("mem: balanced allocation of hugepages")
Cc: i.maximets@samsung.com
Cc: stable@dpdk.org
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/linuxapp/eal/eal_memory.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
Comments
On 20.09.2018 15:50, Anatoly Burakov wrote:
> When NUMA-aware hugepages config option is set, we rely on
> libnuma to tell the kernel to allocate hugepages on a specific
> NUMA node. However, we allocate node mask before we check if
> NUMA is available in the first place, which, according to
> the manpage [1], causes undefined behaviour.
>
> Fix by only using nodemask when we have NUMA available.
>
> [1] https://linux.die.net/man/3/numa_alloc_onnode
>
> Bugzilla ID: 20
>
> Fixes: 1b72605d2416 ("mem: balanced allocation of hugepages")
> Cc: i.maximets@samsung.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
> lib/librte_eal/linuxapp/eal/eal_memory.c | 5 +++--
> 1 file changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index dbf19499e..1a2a84a65 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -263,7 +263,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
> int node_id = -1;
> int essential_prev = 0;
> int oldpolicy;
> - struct bitmask *oldmask = numa_allocate_nodemask();
> + struct bitmask *oldmask = NULL;
> bool have_numa = true;
> unsigned long maxnode = 0;
>
> @@ -275,6 +275,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>
> if (have_numa) {
> RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
> + oldmask = numa_allocate_nodemask();
> if (get_mempolicy(&oldpolicy, oldmask->maskp,
> oldmask->size + 1, 0, 0) < 0) {
> RTE_LOG(ERR, EAL,
> @@ -401,8 +402,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
> strerror(errno));
> numa_set_localalloc();
> }
> + numa_free_cpumask(oldmask);
> }
> - numa_free_cpumask(oldmask);
There will be 'oldmask' leak in case no 'socket-mem' requested.
> #endif
> return i;
> }
>
On 21-Sep-18 7:47 AM, Ilya Maximets wrote:
> On 20.09.2018 15:50, Anatoly Burakov wrote:
>> When NUMA-aware hugepages config option is set, we rely on
>> libnuma to tell the kernel to allocate hugepages on a specific
>> NUMA node. However, we allocate node mask before we check if
>> NUMA is available in the first place, which, according to
>> the manpage [1], causes undefined behaviour.
>>
>> Fix by only using nodemask when we have NUMA available.
>>
>> [1] https://linux.die.net/man/3/numa_alloc_onnode
>>
>> Bugzilla ID: 20
>>
>> Fixes: 1b72605d2416 ("mem: balanced allocation of hugepages")
>> Cc: i.maximets@samsung.com
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
>> ---
>> lib/librte_eal/linuxapp/eal/eal_memory.c | 5 +++--
>> 1 file changed, 3 insertions(+), 2 deletions(-)
>>
>> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
>> index dbf19499e..1a2a84a65 100644
>> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
>> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
>> @@ -263,7 +263,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>> int node_id = -1;
>> int essential_prev = 0;
>> int oldpolicy;
>> - struct bitmask *oldmask = numa_allocate_nodemask();
>> + struct bitmask *oldmask = NULL;
>> bool have_numa = true;
>> unsigned long maxnode = 0;
>>
>> @@ -275,6 +275,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>>
>> if (have_numa) {
>> RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
>> + oldmask = numa_allocate_nodemask();
>> if (get_mempolicy(&oldpolicy, oldmask->maskp,
>> oldmask->size + 1, 0, 0) < 0) {
>> RTE_LOG(ERR, EAL,
>> @@ -401,8 +402,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>> strerror(errno));
>> numa_set_localalloc();
>> }
>> + numa_free_cpumask(oldmask);
>> }
>> - numa_free_cpumask(oldmask);
>
> There will be 'oldmask' leak in case no 'socket-mem' requested.
Oh, right, you're correct!
>
>> #endif
>> return i;
>> }
>>
>
@@ -263,7 +263,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
int node_id = -1;
int essential_prev = 0;
int oldpolicy;
- struct bitmask *oldmask = numa_allocate_nodemask();
+ struct bitmask *oldmask = NULL;
bool have_numa = true;
unsigned long maxnode = 0;
@@ -275,6 +275,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
if (have_numa) {
RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ oldmask = numa_allocate_nodemask();
if (get_mempolicy(&oldpolicy, oldmask->maskp,
oldmask->size + 1, 0, 0) < 0) {
RTE_LOG(ERR, EAL,
@@ -401,8 +402,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
strerror(errno));
numa_set_localalloc();
}
+ numa_free_cpumask(oldmask);
}
- numa_free_cpumask(oldmask);
#endif
return i;
}