[v5,2/3] mempool: use generic memory management

Message ID 20200705114629.2152-3-fady@mellanox.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series build mempool on Windows |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Fady Bader July 5, 2020, 11:46 a.m. UTC
  Using generic memory management calls instead of Unix memory management
calls for mempool.

Signed-off-by: Fady Bader <fady@mellanox.com>
---
 lib/librte_mempool/rte_mempool.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)
  

Comments

Andrew Rybchenko July 5, 2020, 11:50 a.m. UTC | #1
On 7/5/20 2:46 PM, Fady Bader wrote:
> Using generic memory management calls instead of Unix memory management
> calls for mempool.
> 
> Signed-off-by: Fady Bader <fady@mellanox.com>

Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
  
Dmitry Kozlyuk July 5, 2020, 12:08 p.m. UTC | #2
On Sun,  5 Jul 2020 14:46:28 +0300, Fady Bader wrote:
[snip]
>  /* populate the mempool with an anonymous mapping */
> @@ -740,20 +741,20 @@ rte_mempool_populate_anon(struct rte_mempool *mp)
>  	}
>  
>  	/* get chunk of virtually continuous memory */
> -	addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
> -		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
> -	if (addr == MAP_FAILED) {
> +	addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
> +		RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
> +	if (addr == NULL) {
>  		rte_errno = errno;

rte_errno is set by rte_mem_map(), using errno here is incorrect on Windows.

>  		return 0;
>  	}
>  	/* can't use MMAP_LOCKED, it does not exist on BSD */
> -	if (mlock(addr, size) < 0) {
> +	if (rte_mem_lock(addr, size) < 0) {
>  		rte_errno = errno;

Ditto.

[snip]
  
Fady Bader July 5, 2020, 12:25 p.m. UTC | #3
> -----Original Message-----
> From: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
> Sent: Sunday, July 5, 2020 3:08 PM
> To: Fady Bader <fady@mellanox.com>
> Cc: dev@dpdk.org; Thomas Monjalon <thomas@monjalon.net>; Tasnim Bashar
> <tbashar@mellanox.com>; Tal Shnaiderman <talshn@mellanox.com>; Yohad Tor
> <yohadt@mellanox.com>; harini.ramakrishnan@microsoft.com;
> ocardona@microsoft.com; pallavi.kadam@intel.com; ranjit.menon@intel.com;
> olivier.matz@6wind.com; arybchenko@solarflare.com; mdr@ashroe.eu;
> nhorman@tuxdriver.com
> Subject: Re: [PATCH v5 2/3] mempool: use generic memory management
> 
> On Sun,  5 Jul 2020 14:46:28 +0300, Fady Bader wrote:
> [snip]
> >  /* populate the mempool with an anonymous mapping */ @@ -740,20
> > +741,20 @@ rte_mempool_populate_anon(struct rte_mempool *mp)
> >  	}
> >
> >  	/* get chunk of virtually continuous memory */
> > -	addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
> > -		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
> > -	if (addr == MAP_FAILED) {
> > +	addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
> > +		RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
> > +	if (addr == NULL) {
> >  		rte_errno = errno;
> 
> rte_errno is set by rte_mem_map(), using errno here is incorrect on Windows.

Ok, I'll send a new version soon.

> 
> >  		return 0;
> >  	}
> >  	/* can't use MMAP_LOCKED, it does not exist on BSD */
> > -	if (mlock(addr, size) < 0) {
> > +	if (rte_mem_lock(addr, size) < 0) {
> >  		rte_errno = errno;
> 
> Ditto.
> 
> [snip]
> 
> --
> Dmitry Kozlyuk
  

Patch

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 0bde995b52..1f346dcb87 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -12,7 +12,6 @@ 
 #include <inttypes.h>
 #include <errno.h>
 #include <sys/queue.h>
-#include <sys/mman.h>
 
 #include <rte_common.h>
 #include <rte_log.h>
@@ -32,6 +31,8 @@ 
 #include <rte_spinlock.h>
 #include <rte_tailq.h>
 #include <rte_function_versioning.h>
+#include <rte_eal_paging.h>
+
 
 #include "rte_mempool.h"
 #include "rte_mempool_trace.h"
@@ -148,7 +149,7 @@  get_min_page_size(int socket_id)
 
 	rte_memseg_list_walk(find_min_pagesz, &wa);
 
-	return wa.min == SIZE_MAX ? (size_t) getpagesize() : wa.min;
+	return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
 }
 
 
@@ -526,7 +527,7 @@  rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
 	else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
 		*pg_sz = get_min_page_size(mp->socket_id);
 	else
-		*pg_sz = getpagesize();
+		*pg_sz = rte_mem_page_size();
 
 	rte_mempool_trace_get_page_size(mp, *pg_sz);
 	return 0;
@@ -686,7 +687,7 @@  get_anon_size(const struct rte_mempool *mp)
 	size_t min_chunk_size;
 	size_t align;
 
-	pg_sz = getpagesize();
+	pg_sz = rte_mem_page_size();
 	pg_shift = rte_bsf32(pg_sz);
 	size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
 					     &min_chunk_size, &align);
@@ -710,7 +711,7 @@  rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
 	if (size < 0)
 		return;
 
-	munmap(opaque, size);
+	rte_mem_unmap(opaque, size);
 }
 
 /* populate the mempool with an anonymous mapping */
@@ -740,20 +741,20 @@  rte_mempool_populate_anon(struct rte_mempool *mp)
 	}
 
 	/* get chunk of virtually continuous memory */
-	addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
-		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
-	if (addr == MAP_FAILED) {
+	addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
+		RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
+	if (addr == NULL) {
 		rte_errno = errno;
 		return 0;
 	}
 	/* can't use MMAP_LOCKED, it does not exist on BSD */
-	if (mlock(addr, size) < 0) {
+	if (rte_mem_lock(addr, size) < 0) {
 		rte_errno = errno;
-		munmap(addr, size);
+		rte_mem_unmap(addr, size);
 		return 0;
 	}
 
-	ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+	ret = rte_mempool_populate_virt(mp, addr, size, rte_mem_page_size(),
 		rte_mempool_memchunk_anon_free, addr);
 	if (ret == 0) /* should not happen */
 		ret = -ENOBUFS;