[3/5] mempool: remove optimistic IOVA-contiguous allocation

Message ID 20191028140122.9592-4-olivier.matz@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series mempool: avoid objects allocations across pages |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Olivier Matz Oct. 28, 2019, 2:01 p.m. UTC
  The previous commit reduced the amount of required memory when
populating the mempool with non iova-contiguous memory.

Since there is no big advantage to have a fully iova-contiguous mempool
if it is not explicitly asked, remove this code, it simplifies the
populate function.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
 lib/librte_mempool/rte_mempool.c | 47 ++++++--------------------------
 1 file changed, 8 insertions(+), 39 deletions(-)
  

Comments

Andrew Rybchenko Oct. 29, 2019, 10:25 a.m. UTC | #1
On 10/28/19 5:01 PM, Olivier Matz wrote:
> The previous commit reduced the amount of required memory when
> populating the mempool with non iova-contiguous memory.
>
> Since there is no big advantage to have a fully iova-contiguous mempool
> if it is not explicitly asked, remove this code, it simplifies the
> populate function.
>
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>

One comment below, other than that
Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>

> ---
>   lib/librte_mempool/rte_mempool.c | 47 ++++++--------------------------
>   1 file changed, 8 insertions(+), 39 deletions(-)
>
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index 3275e48c9..5e1f202dc 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c

[snip]

> @@ -531,36 +521,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)
>   			goto fail;
>   		}
>   
> -		flags = mz_flags;
> -
>   		/* if we're trying to reserve contiguous memory, add appropriate
>   		 * memzone flag.
>   		 */
> -		if (try_iova_contig_mempool)
> -			flags |= RTE_MEMZONE_IOVA_CONTIG;
> +		if (min_chunk_size == (size_t)mem_size)
> +			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
>   
>   		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> -				mp->socket_id, flags, align);
> -
> -		/* if we were trying to allocate contiguous memory, failed and
> -		 * minimum required contiguous chunk fits minimum page, adjust
> -		 * memzone size to the page size, and try again.
> -		 */
> -		if (mz == NULL && try_iova_contig_mempool &&
> -				min_chunk_size <= pg_sz) {
> -			try_iova_contig_mempool = false;
> -			flags &= ~RTE_MEMZONE_IOVA_CONTIG;
> -
> -			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
> -					pg_shift, &min_chunk_size, &align);
> -			if (mem_size < 0) {
> -				ret = mem_size;
> -				goto fail;
> -			}
> +				mp->socket_id, mz_flags, align);
>   
> -			mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> -				mp->socket_id, flags, align);
> -		}
>   		/* don't try reserving with 0 size if we were asked to reserve
>   		 * IOVA-contiguous memory.
>   		 */

[snip]

> @@ -587,7 +556,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
>   		else
>   			iova = RTE_BAD_IOVA;
>   
> -		if (try_iova_contig_mempool || pg_sz == 0)
> +		if (pg_sz == 0)

I think (mz_flags & RTE_MEMZONE_IOVA_CONTIG) is lost here.

>   			ret = rte_mempool_populate_iova(mp, mz->addr,
>   				iova, mz->len,
>   				rte_mempool_memchunk_mz_free,
  
Olivier Matz Oct. 29, 2019, 5:20 p.m. UTC | #2
On Tue, Oct 29, 2019 at 01:25:10PM +0300, Andrew Rybchenko wrote:
> On 10/28/19 5:01 PM, Olivier Matz wrote:
> > The previous commit reduced the amount of required memory when
> > populating the mempool with non iova-contiguous memory.
> > 
> > Since there is no big advantage to have a fully iova-contiguous mempool
> > if it is not explicitly asked, remove this code, it simplifies the
> > populate function.
> > 
> > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> 
> One comment below, other than that
> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
> 
> > ---
> >   lib/librte_mempool/rte_mempool.c | 47 ++++++--------------------------
> >   1 file changed, 8 insertions(+), 39 deletions(-)
> > 
> > diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> > index 3275e48c9..5e1f202dc 100644
> > --- a/lib/librte_mempool/rte_mempool.c
> > +++ b/lib/librte_mempool/rte_mempool.c
> 
> [snip]
> 
> > @@ -531,36 +521,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> >   			goto fail;
> >   		}
> > -		flags = mz_flags;
> > -
> >   		/* if we're trying to reserve contiguous memory, add appropriate
> >   		 * memzone flag.
> >   		 */
> > -		if (try_iova_contig_mempool)
> > -			flags |= RTE_MEMZONE_IOVA_CONTIG;
> > +		if (min_chunk_size == (size_t)mem_size)
> > +			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
> >   		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> > -				mp->socket_id, flags, align);
> > -
> > -		/* if we were trying to allocate contiguous memory, failed and
> > -		 * minimum required contiguous chunk fits minimum page, adjust
> > -		 * memzone size to the page size, and try again.
> > -		 */
> > -		if (mz == NULL && try_iova_contig_mempool &&
> > -				min_chunk_size <= pg_sz) {
> > -			try_iova_contig_mempool = false;
> > -			flags &= ~RTE_MEMZONE_IOVA_CONTIG;
> > -
> > -			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
> > -					pg_shift, &min_chunk_size, &align);
> > -			if (mem_size < 0) {
> > -				ret = mem_size;
> > -				goto fail;
> > -			}
> > +				mp->socket_id, mz_flags, align);
> > -			mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> > -				mp->socket_id, flags, align);
> > -		}
> >   		/* don't try reserving with 0 size if we were asked to reserve
> >   		 * IOVA-contiguous memory.
> >   		 */
> 
> [snip]
> 
> > @@ -587,7 +556,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
> >   		else
> >   			iova = RTE_BAD_IOVA;
> > -		if (try_iova_contig_mempool || pg_sz == 0)
> > +		if (pg_sz == 0)
> 
> I think (mz_flags & RTE_MEMZONE_IOVA_CONTIG) is lost here.

Do you mean we should do instead
	if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
		populate_iova()
	else
		populate_virt()
?

I would say yes, but it should be removed in patch 5/5.
At the end, we don't want objects to be across pages, even
with RTE_MEMZONE_IOVA_CONTIG.

> 
> >   			ret = rte_mempool_populate_iova(mp, mz->addr,
> >   				iova, mz->len,
> >   				rte_mempool_memchunk_mz_free,
>
  
Andrew Rybchenko Oct. 30, 2019, 7:36 a.m. UTC | #3
On 10/29/19 8:20 PM, Olivier Matz wrote:
> On Tue, Oct 29, 2019 at 01:25:10PM +0300, Andrew Rybchenko wrote:
>> On 10/28/19 5:01 PM, Olivier Matz wrote:
>>> The previous commit reduced the amount of required memory when
>>> populating the mempool with non iova-contiguous memory.
>>>
>>> Since there is no big advantage to have a fully iova-contiguous mempool
>>> if it is not explicitly asked, remove this code, it simplifies the
>>> populate function.
>>>
>>> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
>> One comment below, other than that
>> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
>>
>>> ---
>>>    lib/librte_mempool/rte_mempool.c | 47 ++++++--------------------------
>>>    1 file changed, 8 insertions(+), 39 deletions(-)
>>>
>>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
>>> index 3275e48c9..5e1f202dc 100644
>>> --- a/lib/librte_mempool/rte_mempool.c
>>> +++ b/lib/librte_mempool/rte_mempool.c
>> [snip]
>>
>>> @@ -531,36 +521,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)
>>>    			goto fail;
>>>    		}
>>> -		flags = mz_flags;
>>> -
>>>    		/* if we're trying to reserve contiguous memory, add appropriate
>>>    		 * memzone flag.
>>>    		 */
>>> -		if (try_iova_contig_mempool)
>>> -			flags |= RTE_MEMZONE_IOVA_CONTIG;
>>> +		if (min_chunk_size == (size_t)mem_size)
>>> +			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
>>>    		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
>>> -				mp->socket_id, flags, align);
>>> -
>>> -		/* if we were trying to allocate contiguous memory, failed and
>>> -		 * minimum required contiguous chunk fits minimum page, adjust
>>> -		 * memzone size to the page size, and try again.
>>> -		 */
>>> -		if (mz == NULL && try_iova_contig_mempool &&
>>> -				min_chunk_size <= pg_sz) {
>>> -			try_iova_contig_mempool = false;
>>> -			flags &= ~RTE_MEMZONE_IOVA_CONTIG;
>>> -
>>> -			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
>>> -					pg_shift, &min_chunk_size, &align);
>>> -			if (mem_size < 0) {
>>> -				ret = mem_size;
>>> -				goto fail;
>>> -			}
>>> +				mp->socket_id, mz_flags, align);
>>> -			mz = rte_memzone_reserve_aligned(mz_name, mem_size,
>>> -				mp->socket_id, flags, align);
>>> -		}
>>>    		/* don't try reserving with 0 size if we were asked to reserve
>>>    		 * IOVA-contiguous memory.
>>>    		 */
>> [snip]
>>
>>> @@ -587,7 +556,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
>>>    		else
>>>    			iova = RTE_BAD_IOVA;
>>> -		if (try_iova_contig_mempool || pg_sz == 0)
>>> +		if (pg_sz == 0)
>> I think (mz_flags & RTE_MEMZONE_IOVA_CONTIG) is lost here.
> Do you mean we should do instead
> 	if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
> 		populate_iova()
> 	else
> 		populate_virt()
> ?

Yes.

> I would say yes, but it should be removed in patch 5/5.
> At the end, we don't want objects to be across pages, even
> with RTE_MEMZONE_IOVA_CONTIG.
>
>>>    			ret = rte_mempool_populate_iova(mp, mz->addr,
>>>    				iova, mz->len,
>>>    				rte_mempool_memchunk_mz_free,
  
Andrew Rybchenko Oct. 30, 2019, 7:44 a.m. UTC | #4
On 10/30/19 10:36 AM, Andrew Rybchenko wrote:
> On 10/29/19 8:20 PM, Olivier Matz wrote:
>> On Tue, Oct 29, 2019 at 01:25:10PM +0300, Andrew Rybchenko wrote:
>>> On 10/28/19 5:01 PM, Olivier Matz wrote:
>>>> The previous commit reduced the amount of required memory when
>>>> populating the mempool with non iova-contiguous memory.
>>>>
>>>> Since there is no big advantage to have a fully iova-contiguous 
>>>> mempool
>>>> if it is not explicitly asked, remove this code, it simplifies the
>>>> populate function.
>>>>
>>>> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
>>> One comment below, other than that
>>> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
>>>
>>>> ---
>>>>    lib/librte_mempool/rte_mempool.c | 47 
>>>> ++++++--------------------------
>>>>    1 file changed, 8 insertions(+), 39 deletions(-)
>>>>
>>>> diff --git a/lib/librte_mempool/rte_mempool.c 
>>>> b/lib/librte_mempool/rte_mempool.c
>>>> index 3275e48c9..5e1f202dc 100644
>>>> --- a/lib/librte_mempool/rte_mempool.c
>>>> +++ b/lib/librte_mempool/rte_mempool.c
>>> [snip]
>>>
>>>> @@ -531,36 +521,15 @@ rte_mempool_populate_default(struct 
>>>> rte_mempool *mp)
>>>>                goto fail;
>>>>            }
>>>> -        flags = mz_flags;
>>>> -
>>>>            /* if we're trying to reserve contiguous memory, add 
>>>> appropriate
>>>>             * memzone flag.
>>>>             */
>>>> -        if (try_iova_contig_mempool)
>>>> -            flags |= RTE_MEMZONE_IOVA_CONTIG;
>>>> +        if (min_chunk_size == (size_t)mem_size)
>>>> +            mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
>>>>            mz = rte_memzone_reserve_aligned(mz_name, mem_size,
>>>> -                mp->socket_id, flags, align);
>>>> -
>>>> -        /* if we were trying to allocate contiguous memory, failed 
>>>> and
>>>> -         * minimum required contiguous chunk fits minimum page, 
>>>> adjust
>>>> -         * memzone size to the page size, and try again.
>>>> -         */
>>>> -        if (mz == NULL && try_iova_contig_mempool &&
>>>> -                min_chunk_size <= pg_sz) {
>>>> -            try_iova_contig_mempool = false;
>>>> -            flags &= ~RTE_MEMZONE_IOVA_CONTIG;
>>>> -
>>>> -            mem_size = rte_mempool_ops_calc_mem_size(mp, n,
>>>> -                    pg_shift, &min_chunk_size, &align);
>>>> -            if (mem_size < 0) {
>>>> -                ret = mem_size;
>>>> -                goto fail;
>>>> -            }
>>>> +                mp->socket_id, mz_flags, align);
>>>> -            mz = rte_memzone_reserve_aligned(mz_name, mem_size,
>>>> -                mp->socket_id, flags, align);
>>>> -        }
>>>>            /* don't try reserving with 0 size if we were asked to 
>>>> reserve
>>>>             * IOVA-contiguous memory.
>>>>             */
>>> [snip]
>>>
>>>> @@ -587,7 +556,7 @@ rte_mempool_populate_default(struct rte_mempool 
>>>> *mp)
>>>>            else
>>>>                iova = RTE_BAD_IOVA;
>>>> -        if (try_iova_contig_mempool || pg_sz == 0)
>>>> +        if (pg_sz == 0)
>>> I think (mz_flags & RTE_MEMZONE_IOVA_CONTIG) is lost here.
>> Do you mean we should do instead
>>     if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
>>         populate_iova()
>>     else
>>         populate_virt()
>> ?
>
> Yes.
>
>> I would say yes, but it should be removed in patch 5/5.
>> At the end, we don't want objects to be across pages, even
>> with RTE_MEMZONE_IOVA_CONTIG.

Thinking more about it I don't understand why. If we know that
the memzone is IOVA contiguous, why we should not populate
it this way. It does not prevent patch 5/5 to do the job.

>>>>                ret = rte_mempool_populate_iova(mp, mz->addr,
>>>>                    iova, mz->len,
>>>>                    rte_mempool_memchunk_mz_free,
  
Olivier Matz Oct. 30, 2019, 10:38 a.m. UTC | #5
On Wed, Oct 30, 2019 at 10:44:04AM +0300, Andrew Rybchenko wrote:
> On 10/30/19 10:36 AM, Andrew Rybchenko wrote:
> > On 10/29/19 8:20 PM, Olivier Matz wrote:
> > > On Tue, Oct 29, 2019 at 01:25:10PM +0300, Andrew Rybchenko wrote:
> > > > On 10/28/19 5:01 PM, Olivier Matz wrote:
> > > > > The previous commit reduced the amount of required memory when
> > > > > populating the mempool with non iova-contiguous memory.
> > > > > 
> > > > > Since there is no big advantage to have a fully
> > > > > iova-contiguous mempool
> > > > > if it is not explicitly asked, remove this code, it simplifies the
> > > > > populate function.
> > > > > 
> > > > > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> > > > One comment below, other than that
> > > > Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
> > > > 
> > > > > ---
> > > > >    lib/librte_mempool/rte_mempool.c | 47
> > > > > ++++++--------------------------
> > > > >    1 file changed, 8 insertions(+), 39 deletions(-)
> > > > > 
> > > > > diff --git a/lib/librte_mempool/rte_mempool.c
> > > > > b/lib/librte_mempool/rte_mempool.c
> > > > > index 3275e48c9..5e1f202dc 100644
> > > > > --- a/lib/librte_mempool/rte_mempool.c
> > > > > +++ b/lib/librte_mempool/rte_mempool.c
> > > > [snip]
> > > > 
> > > > > @@ -531,36 +521,15 @@ rte_mempool_populate_default(struct
> > > > > rte_mempool *mp)
> > > > >                goto fail;
> > > > >            }
> > > > > -        flags = mz_flags;
> > > > > -
> > > > >            /* if we're trying to reserve contiguous memory,
> > > > > add appropriate
> > > > >             * memzone flag.
> > > > >             */
> > > > > -        if (try_iova_contig_mempool)
> > > > > -            flags |= RTE_MEMZONE_IOVA_CONTIG;
> > > > > +        if (min_chunk_size == (size_t)mem_size)
> > > > > +            mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
> > > > >            mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> > > > > -                mp->socket_id, flags, align);
> > > > > -
> > > > > -        /* if we were trying to allocate contiguous memory,
> > > > > failed and
> > > > > -         * minimum required contiguous chunk fits minimum
> > > > > page, adjust
> > > > > -         * memzone size to the page size, and try again.
> > > > > -         */
> > > > > -        if (mz == NULL && try_iova_contig_mempool &&
> > > > > -                min_chunk_size <= pg_sz) {
> > > > > -            try_iova_contig_mempool = false;
> > > > > -            flags &= ~RTE_MEMZONE_IOVA_CONTIG;
> > > > > -
> > > > > -            mem_size = rte_mempool_ops_calc_mem_size(mp, n,
> > > > > -                    pg_shift, &min_chunk_size, &align);
> > > > > -            if (mem_size < 0) {
> > > > > -                ret = mem_size;
> > > > > -                goto fail;
> > > > > -            }
> > > > > +                mp->socket_id, mz_flags, align);
> > > > > -            mz = rte_memzone_reserve_aligned(mz_name, mem_size,
> > > > > -                mp->socket_id, flags, align);
> > > > > -        }
> > > > >            /* don't try reserving with 0 size if we were
> > > > > asked to reserve
> > > > >             * IOVA-contiguous memory.
> > > > >             */
> > > > [snip]
> > > > 
> > > > > @@ -587,7 +556,7 @@ rte_mempool_populate_default(struct
> > > > > rte_mempool *mp)
> > > > >            else
> > > > >                iova = RTE_BAD_IOVA;
> > > > > -        if (try_iova_contig_mempool || pg_sz == 0)
> > > > > +        if (pg_sz == 0)
> > > > I think (mz_flags & RTE_MEMZONE_IOVA_CONTIG) is lost here.
> > > Do you mean we should do instead
> > >     if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
> > >         populate_iova()
> > >     else
> > >         populate_virt()
> > > ?
> > 
> > Yes.
> > 
> > > I would say yes, but it should be removed in patch 5/5.
> > > At the end, we don't want objects to be across pages, even
> > > with RTE_MEMZONE_IOVA_CONTIG.
> 
> Thinking more about it I don't understand why. If we know that
> the memzone is IOVA contiguous, why we should not populate
> it this way. It does not prevent patch 5/5 to do the job.

You are right. The page-crossing check is done in the ops->populate,
so it is also done by populate_iova().

> > > > >                ret = rte_mempool_populate_iova(mp, mz->addr,
> > > > >                    iova, mz->len,
> > > > >                    rte_mempool_memchunk_mz_free,
>
  

Patch

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 3275e48c9..5e1f202dc 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -427,7 +427,6 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 	unsigned mz_id, n;
 	int ret;
 	bool need_iova_contig_obj;
-	bool try_iova_contig_mempool;
 	bool alloc_in_ext_mem;
 
 	ret = mempool_ops_alloc_once(mp);
@@ -480,9 +479,7 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 	 * We also have to take into account the fact that memory that we're
 	 * going to allocate from can belong to an externally allocated memory
 	 * area, in which case the assumption of IOVA as VA mode being
-	 * synonymous with IOVA contiguousness will not hold. We should also try
-	 * to go for contiguous memory even if we're in no-huge mode, because
-	 * external memory may in fact be IOVA-contiguous.
+	 * synonymous with IOVA contiguousness will not hold.
 	 */
 
 	/* check if we can retrieve a valid socket ID */
@@ -491,7 +488,6 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		return -EINVAL;
 	alloc_in_ext_mem = (ret == 1);
 	need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
-	try_iova_contig_mempool = false;
 
 	if (!need_iova_contig_obj) {
 		pg_sz = 0;
@@ -500,7 +496,6 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		pg_sz = 0;
 		pg_shift = 0;
 	} else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
-		try_iova_contig_mempool = true;
 		pg_sz = get_min_page_size(mp->socket_id);
 		pg_shift = rte_bsf32(pg_sz);
 	} else {
@@ -510,14 +505,9 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 
 	for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
 		size_t min_chunk_size;
-		unsigned int flags;
 
-		if (try_iova_contig_mempool || pg_sz == 0)
-			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
-					0, &min_chunk_size, &align);
-		else
-			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
-					pg_shift, &min_chunk_size, &align);
+		mem_size = rte_mempool_ops_calc_mem_size(
+			mp, n, pg_shift, &min_chunk_size, &align);
 
 		if (mem_size < 0) {
 			ret = mem_size;
@@ -531,36 +521,15 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 			goto fail;
 		}
 
-		flags = mz_flags;
-
 		/* if we're trying to reserve contiguous memory, add appropriate
 		 * memzone flag.
 		 */
-		if (try_iova_contig_mempool)
-			flags |= RTE_MEMZONE_IOVA_CONTIG;
+		if (min_chunk_size == (size_t)mem_size)
+			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
 
 		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
-				mp->socket_id, flags, align);
-
-		/* if we were trying to allocate contiguous memory, failed and
-		 * minimum required contiguous chunk fits minimum page, adjust
-		 * memzone size to the page size, and try again.
-		 */
-		if (mz == NULL && try_iova_contig_mempool &&
-				min_chunk_size <= pg_sz) {
-			try_iova_contig_mempool = false;
-			flags &= ~RTE_MEMZONE_IOVA_CONTIG;
-
-			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
-					pg_shift, &min_chunk_size, &align);
-			if (mem_size < 0) {
-				ret = mem_size;
-				goto fail;
-			}
+				mp->socket_id, mz_flags, align);
 
-			mz = rte_memzone_reserve_aligned(mz_name, mem_size,
-				mp->socket_id, flags, align);
-		}
 		/* don't try reserving with 0 size if we were asked to reserve
 		 * IOVA-contiguous memory.
 		 */
@@ -569,7 +538,7 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 			 * have
 			 */
 			mz = rte_memzone_reserve_aligned(mz_name, 0,
-					mp->socket_id, flags, align);
+					mp->socket_id, mz_flags, align);
 		}
 		if (mz == NULL) {
 			ret = -rte_errno;
@@ -587,7 +556,7 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		else
 			iova = RTE_BAD_IOVA;
 
-		if (try_iova_contig_mempool || pg_sz == 0)
+		if (pg_sz == 0)
 			ret = rte_mempool_populate_iova(mp, mz->addr,
 				iova, mz->len,
 				rte_mempool_memchunk_mz_free,