[RFC,3/4] mempool: introduce function to get mempool page size
Checks
Commit Message
In rte_mempool_populate_default(), we determine the page size,
which is needed for calc_size and allocation of memory.
Move this in a function and export it, it will be used in next
commit.
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++++++---------------
lib/librte_mempool/rte_mempool.h | 6 +++++
2 files changed, 37 insertions(+), 19 deletions(-)
Comments
On 7/19/19 4:38 PM, Olivier Matz wrote:
> In rte_mempool_populate_default(), we determine the page size,
> which is needed for calc_size and allocation of memory.
>
> Move this in a function and export it, it will be used in next
> commit.
The major change here is taking page sizes into account even in the
case of RTE_IOVA_VA. As I understand it is the main problem initially
that we should respect page boundaries even in RTE_IOVA_VA case.
It looks like we can have it even without removal of contiguous
allocation attempt.
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> ---
> lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++++++---------------
> lib/librte_mempool/rte_mempool.h | 6 +++++
> 2 files changed, 37 insertions(+), 19 deletions(-)
>
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index 335032dc8..7def0ba68 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -414,6 +414,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
> return ret;
> }
>
> +int
> +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
> +{
> + bool need_iova_contig_obj;
> + bool alloc_in_ext_mem;
> + int ret;
> +
> + /* check if we can retrieve a valid socket ID */
> + ret = rte_malloc_heap_socket_is_external(mp->socket_id);
> + if (ret < 0)
> + return -EINVAL;
> + alloc_in_ext_mem = (ret == 1);
> + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
> +
> + if (!need_iova_contig_obj)
> + *pg_sz = 0;
> + else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)
> + *pg_sz = get_min_page_size(mp->socket_id);
> + else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
> + *pg_sz = get_min_page_size(mp->socket_id);
> + else
> + *pg_sz = getpagesize();
> +
> + return 0;
> +}
> +
> /* Default function to populate the mempool: allocate memory in memzones,
> * and populate them. Return the number of objects added, or a negative
> * value on error.
> @@ -425,12 +451,11 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> char mz_name[RTE_MEMZONE_NAMESIZE];
> const struct rte_memzone *mz;
> ssize_t mem_size;
> - size_t align, pg_sz, pg_shift;
> + size_t align, pg_sz, pg_shift = 0;
> rte_iova_t iova;
> unsigned mz_id, n;
> int ret;
> bool need_iova_contig_obj;
> - bool alloc_in_ext_mem;
>
> ret = mempool_ops_alloc_once(mp);
> if (ret != 0)
> @@ -482,26 +507,13 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> * synonymous with IOVA contiguousness will not hold.
> */
>
> - /* check if we can retrieve a valid socket ID */
> - ret = rte_malloc_heap_socket_is_external(mp->socket_id);
> - if (ret < 0)
> - return -EINVAL;
> - alloc_in_ext_mem = (ret == 1);
> need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
> + ret = rte_mempool_get_page_size(mp, &pg_sz);
> + if (ret < 0)
> + return ret;
>
> - if (!need_iova_contig_obj) {
> - pg_sz = 0;
> - pg_shift = 0;
> - } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
> - pg_sz = 0;
> - pg_shift = 0;
> - } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
> - pg_sz = get_min_page_size(mp->socket_id);
> - pg_shift = rte_bsf32(pg_sz);
> - } else {
> - pg_sz = getpagesize();
> + if (pg_sz != 0)
> pg_shift = rte_bsf32(pg_sz);
> - }
>
> for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
> size_t min_chunk_size;
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 7bc10e699..00b927989 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -1692,6 +1692,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
> void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
> void *arg);
>
> +/**
> + * @internal Get page size used for mempool object allocation.
> + */
> +int
> +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
> +
> #ifdef __cplusplus
> }
> #endif
>
On Wed, Aug 07, 2019 at 06:21:41PM +0300, Andrew Rybchenko wrote:
> On 7/19/19 4:38 PM, Olivier Matz wrote:
> > In rte_mempool_populate_default(), we determine the page size,
> > which is needed for calc_size and allocation of memory.
> >
> > Move this in a function and export it, it will be used in next
> > commit.
>
> The major change here is taking page sizes into account even in the
> case of RTE_IOVA_VA.
The patch should not change any functional thing. I've fixed it in new
version.
> As I understand it is the main problem initially
> that we should respect page boundaries even in RTE_IOVA_VA case.
> It looks like we can have it even without removal of contiguous
> allocation attempt.
That's true, but what would be the advantage? Removing it makes
the default populate function simpler, and helps to shorten the
~50 lines of explanations in the comments :)
>
> > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> > ---
> > lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++++++---------------
> > lib/librte_mempool/rte_mempool.h | 6 +++++
> > 2 files changed, 37 insertions(+), 19 deletions(-)
> >
> > diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> > index 335032dc8..7def0ba68 100644
> > --- a/lib/librte_mempool/rte_mempool.c
> > +++ b/lib/librte_mempool/rte_mempool.c
> > @@ -414,6 +414,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
> > return ret;
> > }
> > +int
> > +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
> > +{
> > + bool need_iova_contig_obj;
> > + bool alloc_in_ext_mem;
> > + int ret;
> > +
> > + /* check if we can retrieve a valid socket ID */
> > + ret = rte_malloc_heap_socket_is_external(mp->socket_id);
> > + if (ret < 0)
> > + return -EINVAL;
> > + alloc_in_ext_mem = (ret == 1);
> > + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
> > +
> > + if (!need_iova_contig_obj)
> > + *pg_sz = 0;
> > + else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)
> > + *pg_sz = get_min_page_size(mp->socket_id);
> > + else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
> > + *pg_sz = get_min_page_size(mp->socket_id);
> > + else
> > + *pg_sz = getpagesize();
> > +
> > + return 0;
> > +}
> > +
> > /* Default function to populate the mempool: allocate memory in memzones,
> > * and populate them. Return the number of objects added, or a negative
> > * value on error.
> > @@ -425,12 +451,11 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> > char mz_name[RTE_MEMZONE_NAMESIZE];
> > const struct rte_memzone *mz;
> > ssize_t mem_size;
> > - size_t align, pg_sz, pg_shift;
> > + size_t align, pg_sz, pg_shift = 0;
> > rte_iova_t iova;
> > unsigned mz_id, n;
> > int ret;
> > bool need_iova_contig_obj;
> > - bool alloc_in_ext_mem;
> > ret = mempool_ops_alloc_once(mp);
> > if (ret != 0)
> > @@ -482,26 +507,13 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> > * synonymous with IOVA contiguousness will not hold.
> > */
> > - /* check if we can retrieve a valid socket ID */
> > - ret = rte_malloc_heap_socket_is_external(mp->socket_id);
> > - if (ret < 0)
> > - return -EINVAL;
> > - alloc_in_ext_mem = (ret == 1);
> > need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
> > + ret = rte_mempool_get_page_size(mp, &pg_sz);
> > + if (ret < 0)
> > + return ret;
> > - if (!need_iova_contig_obj) {
> > - pg_sz = 0;
> > - pg_shift = 0;
> > - } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
> > - pg_sz = 0;
> > - pg_shift = 0;
> > - } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
> > - pg_sz = get_min_page_size(mp->socket_id);
> > - pg_shift = rte_bsf32(pg_sz);
> > - } else {
> > - pg_sz = getpagesize();
> > + if (pg_sz != 0)
> > pg_shift = rte_bsf32(pg_sz);
> > - }
> > for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
> > size_t min_chunk_size;
> > diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> > index 7bc10e699..00b927989 100644
> > --- a/lib/librte_mempool/rte_mempool.h
> > +++ b/lib/librte_mempool/rte_mempool.h
> > @@ -1692,6 +1692,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
> > void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
> > void *arg);
> > +/**
> > + * @internal Get page size used for mempool object allocation.
> > + */
> > +int
> > +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
> > +
> > #ifdef __cplusplus
> > }
> > #endif
> >
>
@@ -414,6 +414,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
return ret;
}
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
+{
+ bool need_iova_contig_obj;
+ bool alloc_in_ext_mem;
+ int ret;
+
+ /* check if we can retrieve a valid socket ID */
+ ret = rte_malloc_heap_socket_is_external(mp->socket_id);
+ if (ret < 0)
+ return -EINVAL;
+ alloc_in_ext_mem = (ret == 1);
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+
+ if (!need_iova_contig_obj)
+ *pg_sz = 0;
+ else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)
+ *pg_sz = get_min_page_size(mp->socket_id);
+ else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
+ *pg_sz = get_min_page_size(mp->socket_id);
+ else
+ *pg_sz = getpagesize();
+
+ return 0;
+}
+
/* Default function to populate the mempool: allocate memory in memzones,
* and populate them. Return the number of objects added, or a negative
* value on error.
@@ -425,12 +451,11 @@ rte_mempool_populate_default(struct rte_mempool *mp)
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
ssize_t mem_size;
- size_t align, pg_sz, pg_shift;
+ size_t align, pg_sz, pg_shift = 0;
rte_iova_t iova;
unsigned mz_id, n;
int ret;
bool need_iova_contig_obj;
- bool alloc_in_ext_mem;
ret = mempool_ops_alloc_once(mp);
if (ret != 0)
@@ -482,26 +507,13 @@ rte_mempool_populate_default(struct rte_mempool *mp)
* synonymous with IOVA contiguousness will not hold.
*/
- /* check if we can retrieve a valid socket ID */
- ret = rte_malloc_heap_socket_is_external(mp->socket_id);
- if (ret < 0)
- return -EINVAL;
- alloc_in_ext_mem = (ret == 1);
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ ret = rte_mempool_get_page_size(mp, &pg_sz);
+ if (ret < 0)
+ return ret;
- if (!need_iova_contig_obj) {
- pg_sz = 0;
- pg_shift = 0;
- } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
- pg_sz = 0;
- pg_shift = 0;
- } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
- pg_sz = get_min_page_size(mp->socket_id);
- pg_shift = rte_bsf32(pg_sz);
- } else {
- pg_sz = getpagesize();
+ if (pg_sz != 0)
pg_shift = rte_bsf32(pg_sz);
- }
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
size_t min_chunk_size;
@@ -1692,6 +1692,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
void *arg);
+/**
+ * @internal Get page size used for mempool object allocation.
+ */
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
+
#ifdef __cplusplus
}
#endif