[v2,3/3,20.05] mempool: return 0 if area is too small on populate

Message ID 20200117145754.11682-4-olivier.matz@6wind.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series mempool: fix mempool virt populate with small chunks |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/travis-robot warning Travis build: failed
ci/Intel-compilation fail apply issues

Commit Message

Olivier Matz Jan. 17, 2020, 2:57 p.m. UTC
  Change rte_mempool_populate_iova() and rte_mempool_populate_iova() to
return 0 instead of -EINVAL when there is not enough room to store one
object, as it can be helpful for applications to distinguish this
specific case.

As this is an ABI change, use symbol versioning to preserve old
behavior for binary applications.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
 examples/ntb/ntb_fwd.c                     |  2 +-
 lib/librte_mempool/meson.build             |  1 +
 lib/librte_mempool/rte_mempool.c           | 76 ++++++++++++++++++----
 lib/librte_mempool/rte_mempool.h           | 14 ++--
 lib/librte_mempool/rte_mempool_version.map |  7 ++
 5 files changed, 84 insertions(+), 16 deletions(-)
  

Comments

David Marchand Jan. 17, 2020, 8:32 p.m. UTC | #1
On Fri, Jan 17, 2020 at 3:58 PM Olivier Matz <olivier.matz@6wind.com> wrote:
>
> Change rte_mempool_populate_iova() and rte_mempool_populate_iova() to
> return 0 instead of -EINVAL when there is not enough room to store one
> object, as it can be helpful for applications to distinguish this
> specific case.
>
> As this is an ABI change, use symbol versioning to preserve old
> behavior for binary applications.
>
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> ---
>  examples/ntb/ntb_fwd.c                     |  2 +-
>  lib/librte_mempool/meson.build             |  1 +
>  lib/librte_mempool/rte_mempool.c           | 76 ++++++++++++++++++----
>  lib/librte_mempool/rte_mempool.h           | 14 ++--
>  lib/librte_mempool/rte_mempool_version.map |  7 ++
>  5 files changed, 84 insertions(+), 16 deletions(-)
>
> diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
> index c914256dd..99a43688c 100644
> --- a/examples/ntb/ntb_fwd.c
> +++ b/examples/ntb/ntb_fwd.c
> @@ -1313,7 +1313,7 @@ ntb_mbuf_pool_create(uint16_t mbuf_seg_size, uint32_t nb_mbuf,
>                                         mz->len - ntb_info.ntb_hdr_size,
>                                         ntb_mempool_mz_free,
>                                         (void *)(uintptr_t)mz);
> -               if (ret < 0) {
> +               if (ret <= 0) {
>                         rte_memzone_free(mz);
>                         rte_mempool_free(mp);
>                         return NULL;
> diff --git a/lib/librte_mempool/meson.build b/lib/librte_mempool/meson.build
> index f8710b61b..4fe267cd8 100644
> --- a/lib/librte_mempool/meson.build
> +++ b/lib/librte_mempool/meson.build
> @@ -18,3 +18,4 @@ deps += ['ring']
>
>  # memseg walk is not yet part of stable API
>  allow_experimental_apis = true
> +use_function_versioning = true
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index b434d9f17..0ebb21eec 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -31,6 +31,7 @@
>  #include <rte_string_fns.h>
>  #include <rte_spinlock.h>
>  #include <rte_tailq.h>
> +#include <rte_function_versioning.h>
>
>  #include "rte_mempool.h"
>
> @@ -293,12 +294,17 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
>         return 0;
>  }
>
> +__vsym int
> +rte_mempool_populate_iova_v20_0_1(struct rte_mempool *mp, char *vaddr,
> +       rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
> +       void *opaque);
> +

For 20.05, this should be 20.0.2.



>  /* Add objects in the pool, using a physically contiguous memory
>   * zone. Return the number of objects added, or a negative value
>   * on error.
>   */
> -static int
> -__rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
> +__vsym int
> +rte_mempool_populate_iova_v20_0_1(struct rte_mempool *mp, char *vaddr,
>         rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
>         void *opaque)
>  {
> @@ -355,21 +361,34 @@ __rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>         rte_free(memhdr);
>         return ret;
>  }
> +BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v20_0_1, 20.0.1);
> +MAP_STATIC_SYMBOL(
> +       int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
> +                               rte_iova_t iova, size_t len,
> +                               rte_mempool_memchunk_free_cb_t *free_cb,
> +                               void *opaque),
> +       rte_mempool_populate_iova_v20_0_1);
> +
> +__vsym int
> +rte_mempool_populate_iova_v20_0(struct rte_mempool *mp, char *vaddr,
> +       rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
> +       void *opaque);
>
> -int
> -rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
> +__vsym int
> +rte_mempool_populate_iova_v20_0(struct rte_mempool *mp, char *vaddr,
>         rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
>         void *opaque)
>  {
>         int ret;
>
> -       ret = __rte_mempool_populate_iova(mp, vaddr, iova, len, free_cb,
> +       ret = rte_mempool_populate_iova_v20_0_1(mp, vaddr, iova, len, free_cb,
>                                         opaque);
>         if (ret == 0)
>                 ret = -EINVAL;
>
>         return ret;
>  }
> +VERSION_SYMBOL(rte_mempool_populate_iova, _v20_0, 20.0);
>
>  static rte_iova_t
>  get_iova(void *addr)
> @@ -384,11 +403,16 @@ get_iova(void *addr)
>         return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
>  }
>
> +__vsym int
> +rte_mempool_populate_virt_v20_0_1(struct rte_mempool *mp, char *addr,
> +       size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
> +       void *opaque);
> +
>  /* Populate the mempool with a virtual area. Return the number of
>   * objects added, or a negative value on error.
>   */
> -int
> -rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
> +__vsym int
> +rte_mempool_populate_virt_v20_0_1(struct rte_mempool *mp, char *addr,
>         size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
>         void *opaque)
>  {
> @@ -421,7 +445,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
>                                 break;
>                 }
>
> -               ret = __rte_mempool_populate_iova(mp, addr + off, iova,
> +               ret = rte_mempool_populate_iova_v20_0_1(mp, addr + off, iova,
>                         phys_len, free_cb, opaque);
>                 if (ret == 0)
>                         continue;
> @@ -432,15 +456,41 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
>                 cnt += ret;
>         }
>
> -       if (cnt == 0)
> -               return -EINVAL;
> -
>         return cnt;
>
>   fail:
>         rte_mempool_free_memchunks(mp);
>         return ret;
>  }
> +BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v20_0_1, 20.0.1);
> +MAP_STATIC_SYMBOL(
> +       int rte_mempool_populate_virt(struct rte_mempool *mp,
> +                               char *addr, size_t len, size_t pg_sz,
> +                               rte_mempool_memchunk_free_cb_t *free_cb,
> +                               void *opaque),
> +       rte_mempool_populate_virt_v20_0_1);
> +
> +__vsym int
> +rte_mempool_populate_virt_v20_0(struct rte_mempool *mp, char *addr,
> +       size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
> +       void *opaque);
> +
> +__vsym int
> +rte_mempool_populate_virt_v20_0(struct rte_mempool *mp, char *addr,
> +       size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
> +       void *opaque)
> +{
> +       int ret;
> +
> +       ret = rte_mempool_populate_virt_v20_0_1(mp, addr, len, pg_sz,
> +                                               free_cb, opaque);
> +
> +       if (ret == 0)
> +               ret = -EINVAL;
> +
> +       return ret;
> +}
> +VERSION_SYMBOL(rte_mempool_populate_virt, _v20_0, 20.0);
>
>  /* Get the minimal page size used in a mempool before populating it. */
>  int
> @@ -601,6 +651,8 @@ rte_mempool_populate_default(struct rte_mempool *mp)
>                                 mz->len, pg_sz,
>                                 rte_mempool_memchunk_mz_free,
>                                 (void *)(uintptr_t)mz);
> +               if (ret == 0) /* should not happen */
> +                       ret = -ENOBUFS;
>                 if (ret < 0) {
>                         rte_memzone_free(mz);
>                         goto fail;
> @@ -692,6 +744,8 @@ rte_mempool_populate_anon(struct rte_mempool *mp)
>
>         ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
>                 rte_mempool_memchunk_anon_free, addr);
> +       if (ret == 0) /* should not happen */
> +               ret = -ENOBUFS;
>         if (ret < 0) {
>                 rte_errno = -ret;
>                 goto fail;
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 0a1dc6059..5a106a432 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -1106,9 +1106,12 @@ rte_mempool_free(struct rte_mempool *mp);
>   * @param opaque
>   *   An opaque argument passed to free_cb.
>   * @return
> - *   The number of objects added on success.
> + *   The number of objects added on success (strictly positive).
>   *   On error, the chunk is not added in the memory list of the
> - *   mempool and a negative errno is returned.
> + *   mempool the following code is returned:
> + *     (0): not enough room in chunk for one object.
> + *     (-ENOSPC): mempool is already populated.
> + *     (-ENOMEM): allocation failure.
>   */
>  int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>         rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
> @@ -1133,9 +1136,12 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>   * @param opaque
>   *   An opaque argument passed to free_cb.
>   * @return
> - *   The number of objects added on success.
> + *   The number of objects added on success (strictly positive).
>   *   On error, the chunk is not added in the memory list of the
> - *   mempool and a negative errno is returned.
> + *   mempool the following code is returned:
> + *     (0): not enough room in chunk for one object.
> + *     (-ENOSPC): mempool is already populated.
> + *     (-ENOMEM): allocation failure.
>   */
>  int
>  rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
> diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
> index d002dfc46..34d977e47 100644
> --- a/lib/librte_mempool/rte_mempool_version.map
> +++ b/lib/librte_mempool/rte_mempool_version.map
> @@ -35,6 +35,13 @@ DPDK_20.0 {
>         local: *;
>  };
>
> +DPDK_20.0.1 {
> +       global:
> +
> +       rte_mempool_populate_iova;
> +       rte_mempool_populate_virt;
> +} DPDK_20.0;
> +
>  EXPERIMENTAL {
>         global:
>
> --
> 2.20.1
>
  

Patch

diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c
index c914256dd..99a43688c 100644
--- a/examples/ntb/ntb_fwd.c
+++ b/examples/ntb/ntb_fwd.c
@@ -1313,7 +1313,7 @@  ntb_mbuf_pool_create(uint16_t mbuf_seg_size, uint32_t nb_mbuf,
 					mz->len - ntb_info.ntb_hdr_size,
 					ntb_mempool_mz_free,
 					(void *)(uintptr_t)mz);
-		if (ret < 0) {
+		if (ret <= 0) {
 			rte_memzone_free(mz);
 			rte_mempool_free(mp);
 			return NULL;
diff --git a/lib/librte_mempool/meson.build b/lib/librte_mempool/meson.build
index f8710b61b..4fe267cd8 100644
--- a/lib/librte_mempool/meson.build
+++ b/lib/librte_mempool/meson.build
@@ -18,3 +18,4 @@  deps += ['ring']
 
 # memseg walk is not yet part of stable API
 allow_experimental_apis = true
+use_function_versioning = true
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index b434d9f17..0ebb21eec 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -31,6 +31,7 @@ 
 #include <rte_string_fns.h>
 #include <rte_spinlock.h>
 #include <rte_tailq.h>
+#include <rte_function_versioning.h>
 
 #include "rte_mempool.h"
 
@@ -293,12 +294,17 @@  mempool_ops_alloc_once(struct rte_mempool *mp)
 	return 0;
 }
 
+__vsym int
+rte_mempool_populate_iova_v20_0_1(struct rte_mempool *mp, char *vaddr,
+	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+	void *opaque);
+
 /* Add objects in the pool, using a physically contiguous memory
  * zone. Return the number of objects added, or a negative value
  * on error.
  */
-static int
-__rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+__vsym int
+rte_mempool_populate_iova_v20_0_1(struct rte_mempool *mp, char *vaddr,
 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
 	void *opaque)
 {
@@ -355,21 +361,34 @@  __rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	rte_free(memhdr);
 	return ret;
 }
+BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v20_0_1, 20.0.1);
+MAP_STATIC_SYMBOL(
+	int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+				rte_iova_t iova, size_t len,
+				rte_mempool_memchunk_free_cb_t *free_cb,
+				void *opaque),
+	rte_mempool_populate_iova_v20_0_1);
+
+__vsym int
+rte_mempool_populate_iova_v20_0(struct rte_mempool *mp, char *vaddr,
+	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+	void *opaque);
 
-int
-rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+__vsym int
+rte_mempool_populate_iova_v20_0(struct rte_mempool *mp, char *vaddr,
 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
 	void *opaque)
 {
 	int ret;
 
-	ret = __rte_mempool_populate_iova(mp, vaddr, iova, len, free_cb,
+	ret = rte_mempool_populate_iova_v20_0_1(mp, vaddr, iova, len, free_cb,
 					opaque);
 	if (ret == 0)
 		ret = -EINVAL;
 
 	return ret;
 }
+VERSION_SYMBOL(rte_mempool_populate_iova, _v20_0, 20.0);
 
 static rte_iova_t
 get_iova(void *addr)
@@ -384,11 +403,16 @@  get_iova(void *addr)
 	return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
 }
 
+__vsym int
+rte_mempool_populate_virt_v20_0_1(struct rte_mempool *mp, char *addr,
+	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+	void *opaque);
+
 /* Populate the mempool with a virtual area. Return the number of
  * objects added, or a negative value on error.
  */
-int
-rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+__vsym int
+rte_mempool_populate_virt_v20_0_1(struct rte_mempool *mp, char *addr,
 	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
 	void *opaque)
 {
@@ -421,7 +445,7 @@  rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
 				break;
 		}
 
-		ret = __rte_mempool_populate_iova(mp, addr + off, iova,
+		ret = rte_mempool_populate_iova_v20_0_1(mp, addr + off, iova,
 			phys_len, free_cb, opaque);
 		if (ret == 0)
 			continue;
@@ -432,15 +456,41 @@  rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
 		cnt += ret;
 	}
 
-	if (cnt == 0)
-		return -EINVAL;
-
 	return cnt;
 
  fail:
 	rte_mempool_free_memchunks(mp);
 	return ret;
 }
+BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v20_0_1, 20.0.1);
+MAP_STATIC_SYMBOL(
+	int rte_mempool_populate_virt(struct rte_mempool *mp,
+				char *addr, size_t len, size_t pg_sz,
+				rte_mempool_memchunk_free_cb_t *free_cb,
+				void *opaque),
+	rte_mempool_populate_virt_v20_0_1);
+
+__vsym int
+rte_mempool_populate_virt_v20_0(struct rte_mempool *mp, char *addr,
+	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+	void *opaque);
+
+__vsym int
+rte_mempool_populate_virt_v20_0(struct rte_mempool *mp, char *addr,
+	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+	void *opaque)
+{
+	int ret;
+
+	ret = rte_mempool_populate_virt_v20_0_1(mp, addr, len, pg_sz,
+						free_cb, opaque);
+
+	if (ret == 0)
+		ret = -EINVAL;
+
+	return ret;
+}
+VERSION_SYMBOL(rte_mempool_populate_virt, _v20_0, 20.0);
 
 /* Get the minimal page size used in a mempool before populating it. */
 int
@@ -601,6 +651,8 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 				mz->len, pg_sz,
 				rte_mempool_memchunk_mz_free,
 				(void *)(uintptr_t)mz);
+		if (ret == 0) /* should not happen */
+			ret = -ENOBUFS;
 		if (ret < 0) {
 			rte_memzone_free(mz);
 			goto fail;
@@ -692,6 +744,8 @@  rte_mempool_populate_anon(struct rte_mempool *mp)
 
 	ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
 		rte_mempool_memchunk_anon_free, addr);
+	if (ret == 0) /* should not happen */
+		ret = -ENOBUFS;
 	if (ret < 0) {
 		rte_errno = -ret;
 		goto fail;
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 0a1dc6059..5a106a432 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1106,9 +1106,12 @@  rte_mempool_free(struct rte_mempool *mp);
  * @param opaque
  *   An opaque argument passed to free_cb.
  * @return
- *   The number of objects added on success.
+ *   The number of objects added on success (strictly positive).
  *   On error, the chunk is not added in the memory list of the
- *   mempool and a negative errno is returned.
+ *   mempool the following code is returned:
+ *     (0): not enough room in chunk for one object.
+ *     (-ENOSPC): mempool is already populated.
+ *     (-ENOMEM): allocation failure.
  */
 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
@@ -1133,9 +1136,12 @@  int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
  * @param opaque
  *   An opaque argument passed to free_cb.
  * @return
- *   The number of objects added on success.
+ *   The number of objects added on success (strictly positive).
  *   On error, the chunk is not added in the memory list of the
- *   mempool and a negative errno is returned.
+ *   mempool the following code is returned:
+ *     (0): not enough room in chunk for one object.
+ *     (-ENOSPC): mempool is already populated.
+ *     (-ENOMEM): allocation failure.
  */
 int
 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index d002dfc46..34d977e47 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -35,6 +35,13 @@  DPDK_20.0 {
 	local: *;
 };
 
+DPDK_20.0.1 {
+	global:
+
+	rte_mempool_populate_iova;
+	rte_mempool_populate_virt;
+} DPDK_20.0;
+
 EXPERIMENTAL {
 	global: