[v4,4/5] malloc: codeql fixes

Message ID 20230710170800.12478-5-stephen@networkplumber.org (mailing list archive)
State New
Delegated to: Thomas Monjalon
Headers
Series fixes for problems found by codeql analyzer |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Stephen Hemminger July 10, 2023, 5:07 p.m. UTC
  From: Sinan Kaya <okaya@kernel.org>

In malloc_heap_add_memory result of call to malloc_elem_join_adjacent_free
is dereferenced here and may be null.

In alloc_pages_on_heap result of call to rte_mem_virt2memseg_list
is dereferenced here and may be null.

In eal_memalloc_is_contig result of call to rte_fbarray_get
is dereferenced here and may be null.

In malloc_elem_find_max_iova_contig result of call to rte_mem_virt2memseg
is dereferenced here and may be null.

In malloc_heap_free result of call to malloc_elem_free is dereferenced
here and may be null.

In malloc_elem_alloc result of call to elem_start_pt is dereferenced
here and may be null.

Signed-off-by: Sinan Kaya <okaya@kernel.org>
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 lib/eal/common/eal_common_memalloc.c |  5 ++++-
 lib/eal/common/malloc_elem.c         | 14 +++++++++++---
 lib/eal/common/malloc_heap.c         |  9 ++++++++-
 3 files changed, 23 insertions(+), 5 deletions(-)
  

Patch

diff --git a/lib/eal/common/eal_common_memalloc.c b/lib/eal/common/eal_common_memalloc.c
index ab04479c1cc5..24506f8447d7 100644
--- a/lib/eal/common/eal_common_memalloc.c
+++ b/lib/eal/common/eal_common_memalloc.c
@@ -126,6 +126,9 @@  eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
 
 		/* skip first iteration */
 		ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
+		if (ms == NULL)
+			return false;
+
 		cur = ms->iova;
 		expected = cur + pgsz;
 
@@ -137,7 +140,7 @@  eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
 				cur_seg++, expected += pgsz) {
 			ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
 
-			if (ms->iova != expected)
+			if ((ms != NULL) && (ms->iova != expected))
 				return false;
 		}
 	}
diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c
index 619c040aa3e8..443ae26d283a 100644
--- a/lib/eal/common/malloc_elem.c
+++ b/lib/eal/common/malloc_elem.c
@@ -63,6 +63,8 @@  malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
 
 	cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
 	ms = rte_mem_virt2memseg(cur_page, elem->msl);
+	if (ms == NULL)
+		return 0;
 
 	/* do first iteration outside the loop */
 	page_end = RTE_PTR_ADD(cur_page, page_sz);
@@ -91,9 +93,12 @@  malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
 			 * we're not blowing past data end.
 			 */
 			ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
-			cur_page = ms->addr;
-			/* don't trigger another recalculation */
-			expected_iova = ms->iova;
+			if (ms != NULL) {
+				cur_page = ms->addr;
+
+				/* don't trigger another recalculation */
+				expected_iova = ms->iova;
+			}
 			continue;
 		}
 		/* cur_seg_end ends on a page boundary or on data end. if we're
@@ -430,6 +435,9 @@  malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
 {
 	struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
 			contig);
+	if (new_elem == NULL)
+		return NULL;
+
 	const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
 	const size_t trailer_size = elem->size - old_elem_size - size -
 		MALLOC_ELEM_OVERHEAD;
diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index 6b6cf9174cd3..0abaaa8c57f8 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -97,6 +97,8 @@  malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
 	malloc_elem_insert(elem);
 
 	elem = malloc_elem_join_adjacent_free(elem);
+	if (elem == NULL)
+		return NULL;
 
 	malloc_elem_free_list_insert(elem);
 
@@ -321,6 +323,8 @@  alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
 
 	map_addr = ms[0]->addr;
 	msl = rte_mem_virt2memseg_list(map_addr);
+	if (msl == NULL)
+		return NULL;
 
 	/* check if we wanted contiguous memory but didn't get it */
 	if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
@@ -897,6 +901,9 @@  malloc_heap_free(struct malloc_elem *elem)
 	/* anything after this is a bonus */
 	ret = 0;
 
+	if (elem == NULL)
+		goto free_unlock;
+
 	/* ...of which we can't avail if we are in legacy mode, or if this is an
 	 * externally allocated segment.
 	 */
@@ -935,7 +942,7 @@  malloc_heap_free(struct malloc_elem *elem)
 		const struct rte_memseg *tmp =
 				rte_mem_virt2memseg(aligned_start, msl);
 
-		if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+		if ((tmp != NULL) && (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE)) {
 			/* this is an unfreeable segment, so move start */
 			aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
 		}