[v1,5/6] eal/linux: allow hugepage file reuse

Message ID 20220117081406.482210-1-dkozlyuk@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series Fast restart with many hugepages |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Dmitry Kozlyuk Jan. 17, 2022, 8:14 a.m. UTC
  Linux EAL ensured that mapped hugepages are clean
by always mapping from newly created files:
existing hugepage backing files were always removed.
In this case, the kernel clears the page to prevent data leaks,
because the mapped memory may contain leftover data
from the previous process that was using this memory.
Clearing takes the bulk of the time spent in mmap(2),
increasing EAL initialization time.

Introduce a mode to keep existing files and reuse them
in order to speed up initial memory allocation in EAL.
Hugepages mapped from such files may contain data
left by the previous process that used this memory,
so RTE_MEMSEG_FLAG_DIRTY is set for their segments.
If multiple hugepages are mapped from the same file:
1. When fallocate(2) is used, all memory mapped from this file
   is considered dirty, because it is unknown
   which parts of the file are holes.
2. When ftruncate(3) is used, memory mapped from this file
   is considered dirty unless the file is extended
   to create a new mapping, which implies clean memory.

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
---
Coverity complains that "path" may be uninitialized in get_seg_fd()
at line 327, but it is always initialized with eal_get_hugefile_path()
at lines 309-316.

 lib/eal/common/eal_internal_cfg.h |   2 +
 lib/eal/linux/eal_hugepage_info.c | 118 +++++++++++++++++----
 lib/eal/linux/eal_memalloc.c      | 164 ++++++++++++++++++------------
 3 files changed, 200 insertions(+), 84 deletions(-)
  

Comments

Thomas Monjalon Jan. 17, 2022, 2:24 p.m. UTC | #1
17/01/2022 09:14, Dmitry Kozlyuk:
> Linux EAL ensured that mapped hugepages are clean
> by always mapping from newly created files:
> existing hugepage backing files were always removed.
> In this case, the kernel clears the page to prevent data leaks,
> because the mapped memory may contain leftover data
> from the previous process that was using this memory.
> Clearing takes the bulk of the time spent in mmap(2),
> increasing EAL initialization time.
> 
> Introduce a mode to keep existing files and reuse them
> in order to speed up initial memory allocation in EAL.
> Hugepages mapped from such files may contain data
> left by the previous process that used this memory,
> so RTE_MEMSEG_FLAG_DIRTY is set for their segments.
> If multiple hugepages are mapped from the same file:
> 1. When fallocate(2) is used, all memory mapped from this file
>    is considered dirty, because it is unknown
>    which parts of the file are holes.
> 2. When ftruncate(3) is used, memory mapped from this file
>    is considered dirty unless the file is extended
>    to create a new mapping, which implies clean memory.
[...]
>  struct hugepage_file_discipline {
>  	/** Unlink files before mapping them to leave no trace in hugetlbfs. */
>  	bool unlink_before_mapping;
> +	/** Reuse existing files, never delete or re-create them. */
> +	bool keep_existing;
>  };

That's a bit confusing to mix "unlink" and "keep".
I would prefer focusing on what is done, i.e. unlink when.
I like "unlink_before_mapping" because it is a real action.
The other action should be "unlink_existing" or "unlink_before_creating".
  

Patch

diff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h
index b5e6942578..3685aa7c52 100644
--- a/lib/eal/common/eal_internal_cfg.h
+++ b/lib/eal/common/eal_internal_cfg.h
@@ -44,6 +44,8 @@  struct simd_bitwidth {
 struct hugepage_file_discipline {
 	/** Unlink files before mapping them to leave no trace in hugetlbfs. */
 	bool unlink_before_mapping;
+	/** Reuse existing files, never delete or re-create them. */
+	bool keep_existing;
 };
 
 /**
diff --git a/lib/eal/linux/eal_hugepage_info.c b/lib/eal/linux/eal_hugepage_info.c
index 9fb0e968db..6607fe5906 100644
--- a/lib/eal/linux/eal_hugepage_info.c
+++ b/lib/eal/linux/eal_hugepage_info.c
@@ -84,7 +84,7 @@  static int get_hp_sysfs_value(const char *subdir, const char *file, unsigned lon
 /* this function is only called from eal_hugepage_info_init which itself
  * is only called from a primary process */
 static uint32_t
-get_num_hugepages(const char *subdir, size_t sz)
+get_num_hugepages(const char *subdir, size_t sz, unsigned int reusable_pages)
 {
 	unsigned long resv_pages, num_pages, over_pages, surplus_pages;
 	const char *nr_hp_file = "free_hugepages";
@@ -116,7 +116,7 @@  get_num_hugepages(const char *subdir, size_t sz)
 	else
 		over_pages = 0;
 
-	if (num_pages == 0 && over_pages == 0)
+	if (num_pages == 0 && over_pages == 0 && reusable_pages)
 		RTE_LOG(WARNING, EAL, "No available %zu kB hugepages reported\n",
 				sz >> 10);
 
@@ -124,6 +124,10 @@  get_num_hugepages(const char *subdir, size_t sz)
 	if (num_pages < over_pages) /* overflow */
 		num_pages = UINT32_MAX;
 
+	num_pages += reusable_pages;
+	if (num_pages < reusable_pages) /* overflow */
+		num_pages = UINT32_MAX;
+
 	/* we want to return a uint32_t and more than this looks suspicious
 	 * anyway ... */
 	if (num_pages > UINT32_MAX)
@@ -297,20 +301,28 @@  get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len)
 	return -1;
 }
 
+struct walk_hugedir_data {
+	int dir_fd;
+	int file_fd;
+	const char *file_name;
+	void *user_data;
+};
+
+typedef void (walk_hugedir_t)(const struct walk_hugedir_data *whd);
+
 /*
- * Clear the hugepage directory of whatever hugepage files
- * there are. Checks if the file is locked (i.e.
- * if it's in use by another DPDK process).
+ * Search the hugepage directory for whatever hugepage files there are.
+ * Check if the file is in use by another DPDK process.
+ * If not, execute a callback on it.
  */
 static int
-clear_hugedir(const char * hugedir)
+walk_hugedir(const char *hugedir, walk_hugedir_t *cb, void *user_data)
 {
 	DIR *dir;
 	struct dirent *dirent;
 	int dir_fd, fd, lck_result;
 	const char filter[] = "*map_*"; /* matches hugepage files */
 
-	/* open directory */
 	dir = opendir(hugedir);
 	if (!dir) {
 		RTE_LOG(ERR, EAL, "Unable to open hugepage directory %s\n",
@@ -326,7 +338,7 @@  clear_hugedir(const char * hugedir)
 		goto error;
 	}
 
-	while(dirent != NULL){
+	while (dirent != NULL) {
 		/* skip files that don't match the hugepage pattern */
 		if (fnmatch(filter, dirent->d_name, 0) > 0) {
 			dirent = readdir(dir);
@@ -345,9 +357,15 @@  clear_hugedir(const char * hugedir)
 		/* non-blocking lock */
 		lck_result = flock(fd, LOCK_EX | LOCK_NB);
 
-		/* if lock succeeds, remove the file */
+		/* if lock succeeds, execute callback */
 		if (lck_result != -1)
-			unlinkat(dir_fd, dirent->d_name, 0);
+			cb(&(struct walk_hugedir_data){
+				.dir_fd = dir_fd,
+				.file_fd = fd,
+				.file_name = dirent->d_name,
+				.user_data = user_data,
+			});
+
 		close (fd);
 		dirent = readdir(dir);
 	}
@@ -359,12 +377,48 @@  clear_hugedir(const char * hugedir)
 	if (dir)
 		closedir(dir);
 
-	RTE_LOG(ERR, EAL, "Error while clearing hugepage dir: %s\n",
+	RTE_LOG(ERR, EAL, "Error while walking hugepage dir: %s\n",
 		strerror(errno));
 
 	return -1;
 }
 
+static void
+clear_hugedir_cb(const struct walk_hugedir_data *whd)
+{
+	unlinkat(whd->dir_fd, whd->file_name, 0);
+}
+
+/* Remove hugepage files not used by other DPDK processes from a directory. */
+static int
+clear_hugedir(const char *hugedir)
+{
+	return walk_hugedir(hugedir, clear_hugedir_cb, NULL);
+}
+
+static void
+inspect_hugedir_cb(const struct walk_hugedir_data *whd)
+{
+	uint64_t *total_size = whd->user_data;
+	struct stat st;
+
+	if (fstat(whd->file_fd, &st) < 0)
+		RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s",
+				__func__, whd->file_name, strerror(errno));
+	else
+		(*total_size) += st.st_size;
+}
+
+/*
+ * Count the total size in bytes of all files in the directory
+ * not mapped by other DPDK process.
+ */
+static int
+inspect_hugedir(const char *hugedir, uint64_t *total_size)
+{
+	return walk_hugedir(hugedir, inspect_hugedir_cb, total_size);
+}
+
 static int
 compare_hpi(const void *a, const void *b)
 {
@@ -375,7 +429,8 @@  compare_hpi(const void *a, const void *b)
 }
 
 static void
-calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
+calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent,
+		unsigned int reusable_pages)
 {
 	uint64_t total_pages = 0;
 	unsigned int i;
@@ -388,8 +443,15 @@  calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
 	 * in one socket and sorting them later
 	 */
 	total_pages = 0;
-	/* we also don't want to do this for legacy init */
-	if (!internal_conf->legacy_mem)
+
+	/*
+	 * We also don't want to do this for legacy init.
+	 * When there are hugepage files to reuse it is unknown
+	 * what NUMA node the pages are on.
+	 * This could be determined by mapping,
+	 * but it is precisely what hugepage file reuse is trying to avoid.
+	 */
+	if (!internal_conf->legacy_mem && reusable_pages == 0)
 		for (i = 0; i < rte_socket_count(); i++) {
 			int socket = rte_socket_id_by_idx(i);
 			unsigned int num_pages =
@@ -405,7 +467,7 @@  calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
 	 */
 	if (total_pages == 0) {
 		hpi->num_pages[0] = get_num_hugepages(dirent->d_name,
-				hpi->hugepage_sz);
+				hpi->hugepage_sz, reusable_pages);
 
 #ifndef RTE_ARCH_64
 		/* for 32-bit systems, limit number of hugepages to
@@ -421,6 +483,8 @@  hugepage_info_init(void)
 {	const char dirent_start_text[] = "hugepages-";
 	const size_t dirent_start_len = sizeof(dirent_start_text) - 1;
 	unsigned int i, num_sizes = 0;
+	uint64_t reusable_bytes;
+	unsigned int reusable_pages;
 	DIR *dir;
 	struct dirent *dirent;
 	struct internal_config *internal_conf =
@@ -454,7 +518,7 @@  hugepage_info_init(void)
 			uint32_t num_pages;
 
 			num_pages = get_num_hugepages(dirent->d_name,
-					hpi->hugepage_sz);
+					hpi->hugepage_sz, 0);
 			if (num_pages > 0)
 				RTE_LOG(NOTICE, EAL,
 					"%" PRIu32 " hugepages of size "
@@ -473,7 +537,7 @@  hugepage_info_init(void)
 					"hugepages of size %" PRIu64 " bytes "
 					"will be allocated anonymously\n",
 					hpi->hugepage_sz);
-				calc_num_pages(hpi, dirent);
+				calc_num_pages(hpi, dirent, 0);
 				num_sizes++;
 			}
 #endif
@@ -489,11 +553,23 @@  hugepage_info_init(void)
 				"Failed to lock hugepage directory!\n");
 			break;
 		}
-		/* clear out the hugepages dir from unused pages */
-		if (clear_hugedir(hpi->hugedir) == -1)
-			break;
 
-		calc_num_pages(hpi, dirent);
+		/*
+		 * Check for existing hugepage files and either remove them
+		 * or count how many of them can be reused.
+		 */
+		reusable_pages = 0;
+		if (internal_conf->hugepage_file.keep_existing) {
+			reusable_bytes = 0;
+			if (inspect_hugedir(hpi->hugedir,
+					&reusable_bytes) < 0)
+				break;
+			RTE_ASSERT(reusable_bytes % hpi->hugepage_sz == 0);
+			reusable_pages = reusable_bytes / hpi->hugepage_sz;
+		} else if (clear_hugedir(hpi->hugedir) < 0) {
+			break;
+		}
+		calc_num_pages(hpi, dirent, reusable_pages);
 
 		num_sizes++;
 	}
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index abbe605e49..e4cd10b195 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -287,12 +287,19 @@  get_seg_memfd(struct hugepage_info *hi __rte_unused,
 
 static int
 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
-		unsigned int list_idx, unsigned int seg_idx)
+		unsigned int list_idx, unsigned int seg_idx,
+		bool *dirty)
 {
 	int fd;
+	int *out_fd;
+	struct stat st;
+	int ret;
 	const struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	if (dirty != NULL)
+		*dirty = false;
+
 	/* for in-memory mode, we only make it here when we're sure we support
 	 * memfd, and this is a special case.
 	 */
@@ -300,66 +307,69 @@  get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
 		return get_seg_memfd(hi, list_idx, seg_idx);
 
 	if (internal_conf->single_file_segments) {
-		/* create a hugepage file path */
+		out_fd = &fd_list[list_idx].memseg_list_fd;
 		eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
-
-		fd = fd_list[list_idx].memseg_list_fd;
-
-		if (fd < 0) {
-			fd = open(path, O_CREAT | O_RDWR, 0600);
-			if (fd < 0) {
-				RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
-					__func__, strerror(errno));
-				return -1;
-			}
-			/* take out a read lock and keep it indefinitely */
-			if (lock(fd, LOCK_SH) < 0) {
-				RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
-					__func__, strerror(errno));
-				close(fd);
-				return -1;
-			}
-			fd_list[list_idx].memseg_list_fd = fd;
-		}
 	} else {
-		/* create a hugepage file path */
+		out_fd = &fd_list[list_idx].fds[seg_idx];
 		eal_get_hugefile_path(path, buflen, hi->hugedir,
 				list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+	}
+	fd = *out_fd;
+	if (fd >= 0)
+		return fd;
 
-		fd = fd_list[list_idx].fds[seg_idx];
-
-		if (fd < 0) {
-			/* A primary process is the only one creating these
-			 * files. If there is a leftover that was not cleaned
-			 * by clear_hugedir(), we must *now* make sure to drop
-			 * the file or we will remap old stuff while the rest
-			 * of the code is built on the assumption that a new
-			 * page is clean.
-			 */
-			if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
-					unlink(path) == -1 &&
-					errno != ENOENT) {
-				RTE_LOG(DEBUG, EAL, "%s(): could not remove '%s': %s\n",
-					__func__, path, strerror(errno));
-				return -1;
-			}
+	/*
+	 * There is no TOCTOU between stat() and unlink()/open()
+	 * because the hugepage directory is locked.
+	 */
+	ret = stat(path, &st);
+	if (ret < 0 && errno != ENOENT) {
+		RTE_LOG(DEBUG, EAL, "%s(): stat() for '%s' failed: %s\n",
+			__func__, path, strerror(errno));
+		return -1;
+	}
+	if (internal_conf->hugepage_file.keep_existing && ret == 0 &&
+			dirty != NULL)
+		*dirty = true;
 
-			fd = open(path, O_CREAT | O_RDWR, 0600);
-			if (fd < 0) {
-				RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
-					__func__, strerror(errno));
-				return -1;
-			}
-			/* take out a read lock */
-			if (lock(fd, LOCK_SH) < 0) {
-				RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
-					__func__, strerror(errno));
-				close(fd);
-				return -1;
-			}
-			fd_list[list_idx].fds[seg_idx] = fd;
+	/*
+	 * The kernel clears a hugepage only when it is mapped
+	 * from a particular file for the first time.
+	 * If the file already exists, the old content will be mapped.
+	 * If the memory manager assumes all mapped pages to be clean,
+	 * the file must be removed and created anew.
+	 * Otherwise, the primary caller must be notified
+	 * that mapped pages will be dirty
+	 * (secondary callers receive the segment state from the primary one).
+	 * When multiple hugepages are mapped from the same file,
+	 * whether they will be dirty depends on the part that is mapped.
+	 */
+	if (!internal_conf->single_file_segments &&
+			rte_eal_process_type() == RTE_PROC_PRIMARY &&
+			ret == 0) {
+		/* coverity[toctou] */
+		if (unlink(path) < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): could not remove '%s': %s\n",
+				__func__, path, strerror(errno));
+			return -1;
 		}
 	}
+
+	/* coverity[toctou] */
+	fd = open(path, O_CREAT | O_RDWR, 0600);
+	if (fd < 0) {
+		RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+			__func__, strerror(errno));
+		return -1;
+	}
+	/* take out a read lock */
+	if (lock(fd, LOCK_SH) < 0) {
+		RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+			__func__, strerror(errno));
+		close(fd);
+		return -1;
+	}
+	*out_fd = fd;
 	return fd;
 }
 
@@ -385,8 +395,10 @@  resize_hugefile_in_memory(int fd, uint64_t fa_offset,
 
 static int
 resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
-		bool grow)
+		bool grow, bool *dirty)
 {
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
 	bool again = false;
 
 	do {
@@ -405,6 +417,8 @@  resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
 			uint64_t cur_size = get_file_size(fd);
 
 			/* fallocate isn't supported, fall back to ftruncate */
+			if (dirty != NULL)
+				*dirty = new_size <= cur_size;
 			if (new_size > cur_size &&
 					ftruncate(fd, new_size) < 0) {
 				RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
@@ -447,8 +461,17 @@  resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
 						strerror(errno));
 					return -1;
 				}
-			} else
+			} else {
 				fallocate_supported = 1;
+				/*
+				 * It is unknown which portions of an existing
+				 * hugepage file were allocated previously,
+				 * so all pages within the file are considered
+				 * dirty, unless the file is a fresh one.
+				 */
+				if (dirty != NULL)
+					*dirty &= internal_conf->hugepage_file.keep_existing;
+			}
 		}
 	} while (again);
 
@@ -475,7 +498,8 @@  close_hugefile(int fd, char *path, int list_idx)
 }
 
 static int
-resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow)
+resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
+		bool *dirty)
 {
 	/* in-memory mode is a special case, because we can be sure that
 	 * fallocate() is supported.
@@ -483,12 +507,15 @@  resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow)
 	const struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
-	if (internal_conf->in_memory)
+	if (internal_conf->in_memory) {
+		if (dirty != NULL)
+			*dirty = false;
 		return resize_hugefile_in_memory(fd, fa_offset,
 				page_sz, grow);
+	}
 
 	return resize_hugefile_in_filesystem(fd, fa_offset, page_sz,
-				grow);
+			grow, dirty);
 }
 
 static int
@@ -505,6 +532,7 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 	char path[PATH_MAX];
 	int ret = 0;
 	int fd;
+	bool dirty;
 	size_t alloc_sz;
 	int flags;
 	void *new_addr;
@@ -534,6 +562,7 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 
 		pagesz_flag = pagesz_flags(alloc_sz);
 		fd = -1;
+		dirty = false;
 		mmap_flags = in_memory_flags | pagesz_flag;
 
 		/* single-file segments codepath will never be active
@@ -544,7 +573,8 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		map_offset = 0;
 	} else {
 		/* takes out a read lock on segment or segment list */
-		fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+		fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx,
+				&dirty);
 		if (fd < 0) {
 			RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
 			return -1;
@@ -552,7 +582,8 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 
 		if (internal_conf->single_file_segments) {
 			map_offset = seg_idx * alloc_sz;
-			ret = resize_hugefile(fd, map_offset, alloc_sz, true);
+			ret = resize_hugefile(fd, map_offset, alloc_sz, true,
+					&dirty);
 			if (ret < 0)
 				goto resized;
 
@@ -662,6 +693,7 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 	ms->nrank = rte_memory_get_nrank();
 	ms->iova = iova;
 	ms->socket_id = socket_id;
+	ms->flags = dirty ? RTE_MEMSEG_FLAG_DIRTY : 0;
 
 	return 0;
 
@@ -689,7 +721,7 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		return -1;
 
 	if (internal_conf->single_file_segments) {
-		resize_hugefile(fd, map_offset, alloc_sz, false);
+		resize_hugefile(fd, map_offset, alloc_sz, false, NULL);
 		/* ignore failure, can't make it any worse */
 
 		/* if refcount is at zero, close the file */
@@ -739,13 +771,13 @@  free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
 	 * segment and thus drop the lock on original fd, but hugepage dir is
 	 * now locked so we can take out another one without races.
 	 */
-	fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+	fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx, NULL);
 	if (fd < 0)
 		return -1;
 
 	if (internal_conf->single_file_segments) {
 		map_offset = seg_idx * ms->len;
-		if (resize_hugefile(fd, map_offset, ms->len, false))
+		if (resize_hugefile(fd, map_offset, ms->len, false, NULL))
 			return -1;
 
 		if (--(fd_list[list_idx].count) == 0)
@@ -1743,6 +1775,12 @@  eal_memalloc_init(void)
 			RTE_LOG(ERR, EAL, "Using anonymous memory is not supported\n");
 			return -1;
 		}
+		/* safety net, should be impossible to configure */
+		if (internal_conf->hugepage_file.unlink_before_mapping &&
+				internal_conf->hugepage_file.keep_existing) {
+			RTE_LOG(ERR, EAL, "Unable both to keep existing hugepage files and to unlink them.\n");
+			return -1;
+		}
 	}
 
 	/* initialize all of the fd lists */