@@ -263,6 +263,11 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
addr, mem_sz);
+ if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+ RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg list");
+ return -1;
+ }
+
return 0;
}
@@ -1050,6 +1055,8 @@ rte_eal_memory_detach(void)
RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
rte_strerror(rte_errno));
+ eal_memseg_list_unmap_asan_shadow(msl);
+
/*
* we are detaching the fbarray rather than destroying because
* other processes might still reference this fbarray, and we
@@ -300,6 +300,41 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
void
eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ * Memory segment list.
+ * @return
+ * 0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_map_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+ return 0;
+}
+#endif
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ * Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_unmap_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
/**
* Distribute available memory between MSLs.
*
@@ -511,6 +511,21 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
grow, dirty);
}
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+ /* We need to trigger a write to the page to enforce page fault but we
+ * can't overwrite value that is already there, so read the old value
+ * and write it back. Kernel populates the page with zeroes initially.
+ *
+ * Disable ASan instrumentation here because if the segment is already
+ * allocated by another process and is marked as free in the shadow,
+ * accessing this address will cause an ASan error.
+ */
+ *(volatile int *)addr = *(volatile int *)addr;
+}
+
static int
alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
struct hugepage_info *hi, unsigned int list_idx,
@@ -636,12 +651,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
goto mapped;
}
- /* we need to trigger a write to the page to enforce page fault and
- * ensure that page is accessible to us, but we can't overwrite value
- * that is already there, so read the old value, and write itback.
- * kernel populates the page with zeroes initially.
- */
- *(volatile int *)addr = *(volatile int *)addr;
+ /* enforce page fault and ensure that page is accessible to us */
+ page_fault(addr);
iova = rte_mem_virt2iova(addr);
if (iova == RTE_BAD_PHYS_ADDR) {
@@ -41,6 +41,7 @@
#include "eal_filesystem.h"
#include "eal_hugepages.h"
#include "eal_options.h"
+#include "malloc_elem.h"
#define PFN_MASK_SIZE 8
@@ -1469,6 +1470,7 @@ eal_legacy_hugepage_init(void)
if (msl->memseg_arr.count > 0)
continue;
/* this is an unused list, deallocate it */
+ eal_memseg_list_unmap_asan_shadow(msl);
mem_sz = msl->len;
munmap(msl->base_va, mem_sz);
msl->base_va = NULL;
@@ -1956,3 +1958,102 @@ rte_eal_memseg_init(void)
#endif
memseg_secondary_init();
}
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ void *addr;
+ void *shadow_addr;
+ size_t shadow_sz;
+ int shm_oflag;
+ char shm_path[PATH_MAX];
+ int shm_fd;
+ int ret = 0;
+
+ if (!msl->heap)
+ return 0;
+
+ /* these options imply no secondary process support */
+ if (internal_conf->hugepage_file.unlink_before_mapping ||
+ internal_conf->no_shconf || internal_conf->no_hugetlbfs) {
+ RTE_ASSERT(rte_eal_process_type() != RTE_PROC_SECONDARY);
+ return 0;
+ }
+
+ shadow_addr = ASAN_MEM_TO_SHADOW(msl->base_va);
+ shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+
+ snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+ eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+ shm_oflag = O_RDWR;
+ if (internal_conf->process_type == RTE_PROC_PRIMARY)
+ shm_oflag |= O_CREAT | O_TRUNC;
+
+ shm_fd = shm_open(shm_path, shm_oflag, 0600);
+ if (shm_fd == -1) {
+ RTE_LOG(DEBUG, EAL, "shadow shm_open() failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+ ret = ftruncate(shm_fd, shadow_sz);
+ if (ret == -1) {
+ RTE_LOG(DEBUG, EAL, "shadow ftruncate() failed: %s\n",
+ strerror(errno));
+ goto out;
+ }
+ }
+
+ addr = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, shm_fd, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(DEBUG, EAL, "shadow mmap() failed: %s\n",
+ strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ if (addr != shadow_addr) {
+ RTE_LOG(DEBUG, EAL, "wrong shadow mmap() address\n");
+ munmap(addr, shadow_sz);
+ ret = -1;
+ }
+out:
+ close(shm_fd);
+ if (ret != 0) {
+ if (internal_conf->process_type == RTE_PROC_PRIMARY)
+ shm_unlink(shm_path);
+ }
+
+ return ret;
+}
+
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl)
+{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ if (!msl->heap || internal_conf->hugepage_file.unlink_before_mapping ||
+ internal_conf->no_shconf || internal_conf->no_hugetlbfs)
+ return;
+
+ if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+ msl->len >> ASAN_SHADOW_SCALE) != 0)
+ RTE_LOG(ERR, EAL, "Could not unmap asan shadow memory: %s\n",
+ strerror(errno));
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+ char shm_path[PATH_MAX];
+
+ snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+ eal_get_hugefile_prefix(),
+ msl->memseg_arr.name);
+ shm_unlink(shm_path);
+ }
+}
+#endif
@@ -23,3 +23,7 @@ deps += ['kvargs', 'telemetry']
if has_libnuma
dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+ ext_deps += cc.find_library('rt')
+endif