From patchwork Fri Jul 19 13:38:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 56782 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 901801B197; Fri, 19 Jul 2019 15:39:05 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id C44A3322C for ; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id 9F48E2ED325; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) From: Olivier Matz To: Vamsi Krishna Attunuru , dev@dpdk.org Cc: Andrew Rybchenko , Thomas Monjalon , Anatoly Burakov , Jerin Jacob Kollanukkaran , Kokkilagadda , Ferruh Yigit Date: Fri, 19 Jul 2019 15:38:42 +0200 Message-Id: <20190719133845.32432-2-olivier.matz@6wind.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190719133845.32432-1-olivier.matz@6wind.com> References: <20190719133845.32432-1-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 1/4] mempool: clarify default populate function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" No functional change. Clarify the populate function to make the next commit easier to understand. Rename the variables: - to avoid negation in the name - to have more understandable names Remove useless variable (no_pageshift is equivalent to pg_sz == 0). Remove duplicate affectation of "external" variable. Signed-off-by: Olivier Matz Reviewed-by: Andrew Rybchenko --- lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 7260ce0be..0f29e8712 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -429,24 +429,18 @@ rte_mempool_populate_default(struct rte_mempool *mp) rte_iova_t iova; unsigned mz_id, n; int ret; - bool no_contig, try_contig, no_pageshift, external; + bool need_iova_contig_obj; + bool try_iova_contig_mempool; + bool alloc_in_ext_mem; ret = mempool_ops_alloc_once(mp); if (ret != 0) return ret; - /* check if we can retrieve a valid socket ID */ - ret = rte_malloc_heap_socket_is_external(mp->socket_id); - if (ret < 0) - return -EINVAL; - external = ret; - /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG; - /* * the following section calculates page shift and page size values. * @@ -496,16 +490,23 @@ rte_mempool_populate_default(struct rte_mempool *mp) * to go for contiguous memory even if we're in no-huge mode, because * external memory may in fact be IOVA-contiguous. */ - external = rte_malloc_heap_socket_is_external(mp->socket_id) == 1; - no_pageshift = no_contig || - (!external && rte_eal_iova_mode() == RTE_IOVA_VA); - try_contig = !no_contig && !no_pageshift && - (rte_eal_has_hugepages() || external); - if (no_pageshift) { + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + try_iova_contig_mempool = false; + + if (!need_iova_contig_obj) { + pg_sz = 0; + pg_shift = 0; + } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { pg_sz = 0; pg_shift = 0; - } else if (try_contig) { + } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { + try_iova_contig_mempool = true; pg_sz = get_min_page_size(mp->socket_id); pg_shift = rte_bsf32(pg_sz); } else { @@ -517,7 +518,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) size_t min_chunk_size; unsigned int flags; - if (try_contig || no_pageshift) + if (try_iova_contig_mempool || pg_sz == 0) mem_size = rte_mempool_ops_calc_mem_size(mp, n, 0, &min_chunk_size, &align); else @@ -541,7 +542,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) /* if we're trying to reserve contiguous memory, add appropriate * memzone flag. */ - if (try_contig) + if (try_iova_contig_mempool) flags |= RTE_MEMZONE_IOVA_CONTIG; mz = rte_memzone_reserve_aligned(mz_name, mem_size, @@ -551,8 +552,9 @@ rte_mempool_populate_default(struct rte_mempool *mp) * minimum required contiguous chunk fits minimum page, adjust * memzone size to the page size, and try again. */ - if (mz == NULL && try_contig && min_chunk_size <= pg_sz) { - try_contig = false; + if (mz == NULL && try_iova_contig_mempool && + min_chunk_size <= pg_sz) { + try_iova_contig_mempool = false; flags &= ~RTE_MEMZONE_IOVA_CONTIG; mem_size = rte_mempool_ops_calc_mem_size(mp, n, @@ -587,12 +589,12 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - if (no_contig) - iova = RTE_BAD_IOVA; - else + if (need_iova_contig_obj) iova = mz->iova; + else + iova = RTE_BAD_IOVA; - if (no_pageshift || try_contig) + if (try_iova_contig_mempool || pg_sz == 0) ret = rte_mempool_populate_iova(mp, mz->addr, iova, mz->len, rte_mempool_memchunk_mz_free, From patchwork Fri Jul 19 13:38:43 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 56783 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2C1861B94F; Fri, 19 Jul 2019 15:39:08 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id C9766325F for ; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id ACC9A2ED326; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) From: Olivier Matz To: Vamsi Krishna Attunuru , dev@dpdk.org Cc: Andrew Rybchenko , Thomas Monjalon , Anatoly Burakov , Jerin Jacob Kollanukkaran , Kokkilagadda , Ferruh Yigit Date: Fri, 19 Jul 2019 15:38:43 +0200 Message-Id: <20190719133845.32432-3-olivier.matz@6wind.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190719133845.32432-1-olivier.matz@6wind.com> References: <20190719133845.32432-1-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 2/4] mempool: unalign size when calculating required mem amount X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The size returned by rte_mempool_op_calc_mem_size_default() is aligned to the specified page size. This means that with big pages, the returned amount is more that what we really need to populate the mempool. This problem is tempered by the allocation method of rte_mempool_populate_default(): in some conditions (when try_iova_contig_mempool=true), it first tries to allocate all objs memory in an iova contiguous area, without the alignment constraint. If it fails, it fallbacks to the big aligned allocation, that can also fallback into several smaller allocations. This commit changes rte_mempool_op_calc_mem_size_default() to return the unaligned amount of memory (the alignment constraint is still returned via the *align argument), and removes the optimistic contiguous allocation done when try_iova_contig_mempool=true. This will make the amount of allocated memory more predictible: it will be more than the optimistic contiguous allocation, but less than the big aligned allocation. This opens the door for the next commits that will try to prevent objets from being located across pages. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 44 ++++-------------------------------- lib/librte_mempool/rte_mempool.h | 2 +- lib/librte_mempool/rte_mempool_ops.c | 4 +++- 3 files changed, 9 insertions(+), 41 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 0f29e8712..335032dc8 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -430,7 +430,6 @@ rte_mempool_populate_default(struct rte_mempool *mp) unsigned mz_id, n; int ret; bool need_iova_contig_obj; - bool try_iova_contig_mempool; bool alloc_in_ext_mem; ret = mempool_ops_alloc_once(mp); @@ -477,18 +476,10 @@ rte_mempool_populate_default(struct rte_mempool *mp) * wasting some space this way, but it's much nicer than looping around * trying to reserve each and every page size. * - * However, since size calculation will produce page-aligned sizes, it - * makes sense to first try and see if we can reserve the entire memzone - * in one contiguous chunk as well (otherwise we might end up wasting a - * 1G page on a 10MB memzone). If we fail to get enough contiguous - * memory, then we'll go and reserve space page-by-page. - * * We also have to take into account the fact that memory that we're * going to allocate from can belong to an externally allocated memory * area, in which case the assumption of IOVA as VA mode being - * synonymous with IOVA contiguousness will not hold. We should also try - * to go for contiguous memory even if we're in no-huge mode, because - * external memory may in fact be IOVA-contiguous. + * synonymous with IOVA contiguousness will not hold. */ /* check if we can retrieve a valid socket ID */ @@ -497,7 +488,6 @@ rte_mempool_populate_default(struct rte_mempool *mp) return -EINVAL; alloc_in_ext_mem = (ret == 1); need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); - try_iova_contig_mempool = false; if (!need_iova_contig_obj) { pg_sz = 0; @@ -506,7 +496,6 @@ rte_mempool_populate_default(struct rte_mempool *mp) pg_sz = 0; pg_shift = 0; } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { - try_iova_contig_mempool = true; pg_sz = get_min_page_size(mp->socket_id); pg_shift = rte_bsf32(pg_sz); } else { @@ -518,12 +507,8 @@ rte_mempool_populate_default(struct rte_mempool *mp) size_t min_chunk_size; unsigned int flags; - if (try_iova_contig_mempool || pg_sz == 0) - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - 0, &min_chunk_size, &align); - else - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - pg_shift, &min_chunk_size, &align); + mem_size = rte_mempool_ops_calc_mem_size( + mp, n, pg_shift, &min_chunk_size, &align); if (mem_size < 0) { ret = mem_size; @@ -542,31 +527,12 @@ rte_mempool_populate_default(struct rte_mempool *mp) /* if we're trying to reserve contiguous memory, add appropriate * memzone flag. */ - if (try_iova_contig_mempool) + if (min_chunk_size == (size_t)mem_size) flags |= RTE_MEMZONE_IOVA_CONTIG; mz = rte_memzone_reserve_aligned(mz_name, mem_size, mp->socket_id, flags, align); - /* if we were trying to allocate contiguous memory, failed and - * minimum required contiguous chunk fits minimum page, adjust - * memzone size to the page size, and try again. - */ - if (mz == NULL && try_iova_contig_mempool && - min_chunk_size <= pg_sz) { - try_iova_contig_mempool = false; - flags &= ~RTE_MEMZONE_IOVA_CONTIG; - - mem_size = rte_mempool_ops_calc_mem_size(mp, n, - pg_shift, &min_chunk_size, &align); - if (mem_size < 0) { - ret = mem_size; - goto fail; - } - - mz = rte_memzone_reserve_aligned(mz_name, mem_size, - mp->socket_id, flags, align); - } /* don't try reserving with 0 size if we were asked to reserve * IOVA-contiguous memory. */ @@ -594,7 +560,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) else iova = RTE_BAD_IOVA; - if (try_iova_contig_mempool || pg_sz == 0) + if (pg_sz == 0) ret = rte_mempool_populate_iova(mp, mz->addr, iova, mz->len, rte_mempool_memchunk_mz_free, diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 8053f7a04..7bc10e699 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -458,7 +458,7 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); * @param[out] align * Location for required memory chunk alignment. * @return - * Required memory size aligned at page boundary. + * Required memory size. */ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c index e02eb702c..22c5251eb 100644 --- a/lib/librte_mempool/rte_mempool_ops.c +++ b/lib/librte_mempool/rte_mempool_ops.c @@ -100,7 +100,9 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp) return ops->get_count(mp); } -/* wrapper to notify new memory area to external mempool */ +/* wrapper to calculate the memory size required to store given number + * of objects + */ ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, From patchwork Fri Jul 19 13:38:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 56781 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 085D85905; Fri, 19 Jul 2019 15:39:03 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id D4102374E for ; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id BA2812ED327; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) From: Olivier Matz To: Vamsi Krishna Attunuru , dev@dpdk.org Cc: Andrew Rybchenko , Thomas Monjalon , Anatoly Burakov , Jerin Jacob Kollanukkaran , Kokkilagadda , Ferruh Yigit Date: Fri, 19 Jul 2019 15:38:44 +0200 Message-Id: <20190719133845.32432-4-olivier.matz@6wind.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190719133845.32432-1-olivier.matz@6wind.com> References: <20190719133845.32432-1-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 3/4] mempool: introduce function to get mempool page size X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In rte_mempool_populate_default(), we determine the page size, which is needed for calc_size and allocation of memory. Move this in a function and export it, it will be used in next commit. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++++++--------------- lib/librte_mempool/rte_mempool.h | 6 +++++ 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 335032dc8..7def0ba68 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -414,6 +414,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, return ret; } +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz) +{ + bool need_iova_contig_obj; + bool alloc_in_ext_mem; + int ret; + + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + + if (!need_iova_contig_obj) + *pg_sz = 0; + else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) + *pg_sz = get_min_page_size(mp->socket_id); + else if (rte_eal_has_hugepages() || alloc_in_ext_mem) + *pg_sz = get_min_page_size(mp->socket_id); + else + *pg_sz = getpagesize(); + + return 0; +} + /* Default function to populate the mempool: allocate memory in memzones, * and populate them. Return the number of objects added, or a negative * value on error. @@ -425,12 +451,11 @@ rte_mempool_populate_default(struct rte_mempool *mp) char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; ssize_t mem_size; - size_t align, pg_sz, pg_shift; + size_t align, pg_sz, pg_shift = 0; rte_iova_t iova; unsigned mz_id, n; int ret; bool need_iova_contig_obj; - bool alloc_in_ext_mem; ret = mempool_ops_alloc_once(mp); if (ret != 0) @@ -482,26 +507,13 @@ rte_mempool_populate_default(struct rte_mempool *mp) * synonymous with IOVA contiguousness will not hold. */ - /* check if we can retrieve a valid socket ID */ - ret = rte_malloc_heap_socket_is_external(mp->socket_id); - if (ret < 0) - return -EINVAL; - alloc_in_ext_mem = (ret == 1); need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + ret = rte_mempool_get_page_size(mp, &pg_sz); + if (ret < 0) + return ret; - if (!need_iova_contig_obj) { - pg_sz = 0; - pg_shift = 0; - } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { - pg_sz = 0; - pg_shift = 0; - } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { - pg_sz = get_min_page_size(mp->socket_id); - pg_shift = rte_bsf32(pg_sz); - } else { - pg_sz = getpagesize(); + if (pg_sz != 0) pg_shift = rte_bsf32(pg_sz); - } for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { size_t min_chunk_size; diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 7bc10e699..00b927989 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1692,6 +1692,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @internal Get page size used for mempool object allocation. + */ +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); + #ifdef __cplusplus } #endif From patchwork Fri Jul 19 13:38:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 56784 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 380951B959; Fri, 19 Jul 2019 15:39:11 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id E68224D27 for ; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id C8FED2ED328; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) From: Olivier Matz To: Vamsi Krishna Attunuru , dev@dpdk.org Cc: Andrew Rybchenko , Thomas Monjalon , Anatoly Burakov , Jerin Jacob Kollanukkaran , Kokkilagadda , Ferruh Yigit Date: Fri, 19 Jul 2019 15:38:45 +0200 Message-Id: <20190719133845.32432-5-olivier.matz@6wind.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190719133845.32432-1-olivier.matz@6wind.com> References: <20190719133845.32432-1-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 4/4] mempool: prevent objects from being across pages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When using iova contiguous memory and objets smaller than page size, ensure that objects are not located across several pages. Signed-off-by: Vamsi Krishna Attunuru Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool_ops_default.c | 39 ++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c index 4e2bfc82d..2bbd67367 100644 --- a/lib/librte_mempool/rte_mempool_ops_default.c +++ b/lib/librte_mempool/rte_mempool_ops_default.c @@ -45,19 +45,54 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, return mem_size; } +/* Returns -1 if object falls on a page boundary, else returns 0 */ +static inline int +mempool_check_obj_bounds(void *obj, uint64_t pg_sz, size_t elt_sz) +{ + uintptr_t page_end, elt_addr = (uintptr_t)obj; + uint32_t pg_shift; + uint64_t page_mask; + + if (pg_sz == 0) + return 0; + if (elt_sz > pg_sz) + return 0; + + pg_shift = rte_bsf32(pg_sz); + page_mask = ~((1ull << pg_shift) - 1); + page_end = (elt_addr & page_mask) + pg_sz; + + if (elt_addr + elt_sz > page_end) + return -1; + + return 0; +} + int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) { - size_t total_elt_sz; + size_t total_elt_sz, pg_sz; size_t off; unsigned int i; void *obj; + rte_mempool_get_page_size(mp, &pg_sz); + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) { + for (off = 0, i = 0; i < max_objs; i++) { + /* align offset to next page start if required */ + if (mempool_check_obj_bounds((char *)vaddr + off, + pg_sz, total_elt_sz) < 0) { + off += RTE_PTR_ALIGN_CEIL((char *)vaddr + off, pg_sz) - + ((char *)vaddr + off); + } + + if (off + total_elt_sz > len) + break; + off += mp->header_size; obj = (char *)vaddr + off; obj_cb(mp, obj_cb_arg, obj,