[1/4] mempool: add event callbacks

Message ID 20210818090755.2419483-2-dkozlyuk@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series net/mlx5: implicit mempool registration |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Dmitry Kozlyuk Aug. 18, 2021, 9:07 a.m. UTC
  Performance of MLX5 PMD of different classes can benefit if PMD knows
which memory it will need to handle in advance, before the first mbuf
is sent to the PMD. It is impractical, however, to consider
all allocated memory for this purpose. Most often mbuf memory comes
from mempools that can come and go. PMD can enumerate existing mempools
on device start, but it also needs to track mempool creation
and destruction after the forwarding starts but before an mbuf
from the new mempool is sent to the device.

Add an internal API to register callback for mempool lify cycle events,
currently RTE_MEMPOOL_EVENT_CREATE and RTE_MEMPOOL_EVENT_DESTROY:
* rte_mempool_event_callback_register()
* rte_mempool_event_callback_unregister()

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 lib/mempool/rte_mempool.c | 153 ++++++++++++++++++++++++++++++++++++--
 lib/mempool/rte_mempool.h |  56 ++++++++++++++
 lib/mempool/version.map   |   8 ++
 3 files changed, 212 insertions(+), 5 deletions(-)
  

Comments

Jerin Jacob Oct. 12, 2021, 3:12 a.m. UTC | #1
On Wed, Aug 18, 2021 at 2:38 PM Dmitry Kozlyuk <dkozlyuk@nvidia.com> wrote:
>
> Performance of MLX5 PMD of different classes can benefit if PMD knows
> which memory it will need to handle in advance, before the first mbuf
> is sent to the PMD. It is impractical, however, to consider
> all allocated memory for this purpose. Most often mbuf memory comes
> from mempools that can come and go. PMD can enumerate existing mempools
> on device start, but it also needs to track mempool creation
> and destruction after the forwarding starts but before an mbuf
> from the new mempool is sent to the device.
>
> Add an internal API to register callback for mempool lify cycle events,
> currently RTE_MEMPOOL_EVENT_CREATE and RTE_MEMPOOL_EVENT_DESTROY:
> * rte_mempool_event_callback_register()
> * rte_mempool_event_callback_unregister()
>
> Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>



> ---
>  lib/mempool/rte_mempool.c | 153 ++++++++++++++++++++++++++++++++++++--
>  lib/mempool/rte_mempool.h |  56 ++++++++++++++
>  lib/mempool/version.map   |   8 ++
>  3 files changed, 212 insertions(+), 5 deletions(-)
>
> diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
> index 59a588425b..0ec56ad278 100644
> --- a/lib/mempool/rte_mempool.c
> +++ b/lib/mempool/rte_mempool.c
> @@ -42,6 +42,18 @@ static struct rte_tailq_elem rte_mempool_tailq = {
>  };
>  EAL_REGISTER_TAILQ(rte_mempool_tailq)
>
> +TAILQ_HEAD(mempool_callback_list, rte_tailq_entry);
> +
> +static struct rte_tailq_elem callback_tailq = {
> +       .name = "RTE_MEMPOOL_CALLBACK",
> +};
> +EAL_REGISTER_TAILQ(callback_tailq)
> +
> +/* Invoke all registered mempool event callbacks. */
> +static void
> +mempool_event_callback_invoke(enum rte_mempool_event event,
> +                             struct rte_mempool *mp);
> +
>  #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
>  #define CALC_CACHE_FLUSHTHRESH(c)      \
>         ((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
> @@ -722,6 +734,7 @@ rte_mempool_free(struct rte_mempool *mp)
>         }
>         rte_mcfg_tailq_write_unlock();
>
> +       mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);
>         rte_mempool_trace_free(mp);
>         rte_mempool_free_memchunks(mp);
>         rte_mempool_ops_free(mp);
> @@ -778,10 +791,10 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache)
>  }
>
>  /* create an empty mempool */
> -struct rte_mempool *
> -rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
> -       unsigned cache_size, unsigned private_data_size,
> -       int socket_id, unsigned flags)
> +static struct rte_mempool *
> +mempool_create_empty(const char *name, unsigned int n,
> +       unsigned int elt_size, unsigned int cache_size,
> +       unsigned int private_data_size, int socket_id, unsigned int flags)
>  {
>         char mz_name[RTE_MEMZONE_NAMESIZE];
>         struct rte_mempool_list *mempool_list;
> @@ -915,6 +928,19 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
>         return NULL;
>  }
>
> +struct rte_mempool *
> +rte_mempool_create_empty(const char *name, unsigned int n,
> +       unsigned int elt_size, unsigned int cache_size,
> +       unsigned int private_data_size, int socket_id, unsigned int flags)
> +{
> +       struct rte_mempool *mp;
> +
> +       mp = mempool_create_empty(name, n, elt_size, cache_size,
> +               private_data_size, socket_id, flags);
> +       mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_CREATE, mp);
> +       return mp;
> +}
> +
>  /* create the mempool */
>  struct rte_mempool *
>  rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
> @@ -926,7 +952,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
>         int ret;
>         struct rte_mempool *mp;
>
> -       mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> +       mp = mempool_create_empty(name, n, elt_size, cache_size,
>                 private_data_size, socket_id, flags);
>         if (mp == NULL)
>                 return NULL;
> @@ -958,6 +984,8 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
>         if (obj_init)
>                 rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
>
> +       mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_CREATE, mp);
> +
>         rte_mempool_trace_create(name, n, elt_size, cache_size,
>                 private_data_size, mp_init, mp_init_arg, obj_init,
>                 obj_init_arg, flags, mp);
> @@ -1343,3 +1371,118 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
>
>         rte_mcfg_mempool_read_unlock();
>  }
> +
> +struct mempool_callback {
> +       rte_mempool_event_callback *func;
> +       void *arg;
> +};
> +
> +static void
> +mempool_event_callback_invoke(enum rte_mempool_event event,
> +                             struct rte_mempool *mp)
> +{
> +       struct mempool_callback_list *list;
> +       struct rte_tailq_entry *te;
> +       void *tmp_te;
> +
> +       rte_mcfg_tailq_read_lock();
> +       list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
> +       TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
> +               struct mempool_callback *cb = te->data;
> +               rte_mcfg_tailq_read_unlock();
> +               cb->func(event, mp, cb->arg);
> +               rte_mcfg_tailq_read_lock();
> +       }
> +       rte_mcfg_tailq_read_unlock();
> +}
> +
> +int
> +rte_mempool_event_callback_register(rte_mempool_event_callback *func,
> +                                   void *arg)
> +{
> +       struct mempool_callback_list *list;
> +       struct rte_tailq_entry *te = NULL;
> +       struct mempool_callback *cb;
> +       void *tmp_te;
> +       int ret;
> +
> +       rte_mcfg_mempool_read_lock();
> +       rte_mcfg_tailq_write_lock();
> +
> +       list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
> +       TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
> +               struct mempool_callback *cb =
> +                                       (struct mempool_callback *)te->data;
> +               if (cb->func == func && cb->arg == arg) {
> +                       ret = -EEXIST;
> +                       goto exit;
> +               }
> +       }
> +
> +       te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> +       if (te == NULL) {
> +               RTE_LOG(ERR, MEMPOOL,
> +                       "Cannot allocate event callback tailq entry!\n");
> +               ret = -ENOMEM;
> +               goto exit;
> +       }
> +
> +       cb = rte_malloc("MEMPOOL_EVENT_CALLBACK", sizeof(*cb), 0);
> +       if (cb == NULL) {
> +               RTE_LOG(ERR, MEMPOOL,
> +                       "Cannot allocate event callback!\n");
> +               rte_free(te);
> +               ret = -ENOMEM;
> +               goto exit;
> +       }
> +
> +       cb->func = func;
> +       cb->arg = arg;
> +       te->data = cb;
> +       TAILQ_INSERT_TAIL(list, te, next);
> +       ret = 0;
> +
> +exit:
> +       rte_mcfg_tailq_write_unlock();
> +       rte_mcfg_mempool_read_unlock();
> +       rte_errno = -ret;
> +       return ret;
> +}
> +
> +int
> +rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
> +                                     void *arg)
> +{
> +       struct mempool_callback_list *list;
> +       struct rte_tailq_entry *te = NULL;
> +       struct mempool_callback *cb;
> +       int ret;
> +
> +       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> +               rte_errno = EPERM;
> +               return -1;
> +       }
> +
> +       rte_mcfg_mempool_read_lock();
> +       rte_mcfg_tailq_write_lock();
> +       ret = -ENOENT;
> +       list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
> +       TAILQ_FOREACH(te, list, next) {
> +               cb = (struct mempool_callback *)te->data;
> +               if (cb->func == func && cb->arg == arg)
> +                       break;
> +       }
> +       if (te != NULL) {
> +               TAILQ_REMOVE(list, te, next);
> +               ret = 0;
> +       }
> +       rte_mcfg_tailq_write_unlock();
> +       rte_mcfg_mempool_read_unlock();
> +
> +       if (ret == 0) {
> +               rte_free(te);
> +               rte_free(cb);
> +       }
> +       rte_errno = -ret;
> +       return ret;
> +}
> diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
> index 4235d6f0bf..1e9b8f0229 100644
> --- a/lib/mempool/rte_mempool.h
> +++ b/lib/mempool/rte_mempool.h
> @@ -1775,6 +1775,62 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
>  int
>  rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
>
> +/**
> + * Mempool event type.
> + * @internal
> + */
> +enum rte_mempool_event {
> +       /** Occurs after a successful mempool creation. */
> +       RTE_MEMPOOL_EVENT_CREATE = 0,
> +       /** Occurs before destruction of a mempool begins. */
> +       RTE_MEMPOOL_EVENT_DESTROY = 1,
> +};
> +
> +/**
> + * @internal
> + * Mempool event callback.
> + */
> +typedef void (rte_mempool_event_callback)(
> +               enum rte_mempool_event event,
> +               struct rte_mempool *mp,
> +               void *arg);
> +
> +/**
> + * @internal
> + * Register a callback invoked on mempool life cycle event.
> + * Callbacks will be invoked in the process that creates the mempool.
> + *
> + * @param cb
> + *   Callback function.
> + * @param cb_arg
> + *   User data.
> + *
> + * @return
> + *   0 on success, negative on failure and rte_errno is set.
> + */
> +__rte_internal
> +int
> +rte_mempool_event_callback_register(rte_mempool_event_callback *cb,
> +                                   void *cb_arg);
> +
> +/**
> + * @internal
> + * Unregister a callback added with rte_mempool_event_callback_register().
> + * @p cb and @p arg must exactly match registration parameters.
> + *
> + * @param cb
> + *   Callback function.
> + * @param cb_arg
> + *   User data.
> + *
> + * @return
> + *   0 on success, negative on failure and rte_errno is set.
> + */
> +__rte_internal
> +int
> +rte_mempool_event_callback_unregister(rte_mempool_event_callback *cb,
> +                                     void *cb_arg);
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/mempool/version.map b/lib/mempool/version.map
> index 9f77da6fff..1b7d7c5456 100644
> --- a/lib/mempool/version.map
> +++ b/lib/mempool/version.map
> @@ -64,3 +64,11 @@ EXPERIMENTAL {
>         __rte_mempool_trace_ops_free;
>         __rte_mempool_trace_set_ops_byname;
>  };
> +
> +INTERNAL {
> +       global:
> +
> +       # added in 21.11
> +       rte_mempool_event_callback_register;
> +       rte_mempool_event_callback_unregister;
> +};
> --
> 2.25.1
>
  

Patch

diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 59a588425b..0ec56ad278 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -42,6 +42,18 @@  static struct rte_tailq_elem rte_mempool_tailq = {
 };
 EAL_REGISTER_TAILQ(rte_mempool_tailq)
 
+TAILQ_HEAD(mempool_callback_list, rte_tailq_entry);
+
+static struct rte_tailq_elem callback_tailq = {
+	.name = "RTE_MEMPOOL_CALLBACK",
+};
+EAL_REGISTER_TAILQ(callback_tailq)
+
+/* Invoke all registered mempool event callbacks. */
+static void
+mempool_event_callback_invoke(enum rte_mempool_event event,
+			      struct rte_mempool *mp);
+
 #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
 #define CALC_CACHE_FLUSHTHRESH(c)	\
 	((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
@@ -722,6 +734,7 @@  rte_mempool_free(struct rte_mempool *mp)
 	}
 	rte_mcfg_tailq_write_unlock();
 
+	mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);
 	rte_mempool_trace_free(mp);
 	rte_mempool_free_memchunks(mp);
 	rte_mempool_ops_free(mp);
@@ -778,10 +791,10 @@  rte_mempool_cache_free(struct rte_mempool_cache *cache)
 }
 
 /* create an empty mempool */
-struct rte_mempool *
-rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
-	unsigned cache_size, unsigned private_data_size,
-	int socket_id, unsigned flags)
+static struct rte_mempool *
+mempool_create_empty(const char *name, unsigned int n,
+	unsigned int elt_size, unsigned int cache_size,
+	unsigned int private_data_size, int socket_id, unsigned int flags)
 {
 	char mz_name[RTE_MEMZONE_NAMESIZE];
 	struct rte_mempool_list *mempool_list;
@@ -915,6 +928,19 @@  rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
 	return NULL;
 }
 
+struct rte_mempool *
+rte_mempool_create_empty(const char *name, unsigned int n,
+	unsigned int elt_size, unsigned int cache_size,
+	unsigned int private_data_size, int socket_id, unsigned int flags)
+{
+	struct rte_mempool *mp;
+
+	mp = mempool_create_empty(name, n, elt_size, cache_size,
+		private_data_size, socket_id, flags);
+	mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_CREATE, mp);
+	return mp;
+}
+
 /* create the mempool */
 struct rte_mempool *
 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -926,7 +952,7 @@  rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
 	int ret;
 	struct rte_mempool *mp;
 
-	mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
+	mp = mempool_create_empty(name, n, elt_size, cache_size,
 		private_data_size, socket_id, flags);
 	if (mp == NULL)
 		return NULL;
@@ -958,6 +984,8 @@  rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
 	if (obj_init)
 		rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
 
+	mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_CREATE, mp);
+
 	rte_mempool_trace_create(name, n, elt_size, cache_size,
 		private_data_size, mp_init, mp_init_arg, obj_init,
 		obj_init_arg, flags, mp);
@@ -1343,3 +1371,118 @@  void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
 
 	rte_mcfg_mempool_read_unlock();
 }
+
+struct mempool_callback {
+	rte_mempool_event_callback *func;
+	void *arg;
+};
+
+static void
+mempool_event_callback_invoke(enum rte_mempool_event event,
+			      struct rte_mempool *mp)
+{
+	struct mempool_callback_list *list;
+	struct rte_tailq_entry *te;
+	void *tmp_te;
+
+	rte_mcfg_tailq_read_lock();
+	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
+	TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
+		struct mempool_callback *cb = te->data;
+		rte_mcfg_tailq_read_unlock();
+		cb->func(event, mp, cb->arg);
+		rte_mcfg_tailq_read_lock();
+	}
+	rte_mcfg_tailq_read_unlock();
+}
+
+int
+rte_mempool_event_callback_register(rte_mempool_event_callback *func,
+				    void *arg)
+{
+	struct mempool_callback_list *list;
+	struct rte_tailq_entry *te = NULL;
+	struct mempool_callback *cb;
+	void *tmp_te;
+	int ret;
+
+	rte_mcfg_mempool_read_lock();
+	rte_mcfg_tailq_write_lock();
+
+	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
+	TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
+		struct mempool_callback *cb =
+					(struct mempool_callback *)te->data;
+		if (cb->func == func && cb->arg == arg) {
+			ret = -EEXIST;
+			goto exit;
+		}
+	}
+
+	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, MEMPOOL,
+			"Cannot allocate event callback tailq entry!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	cb = rte_malloc("MEMPOOL_EVENT_CALLBACK", sizeof(*cb), 0);
+	if (cb == NULL) {
+		RTE_LOG(ERR, MEMPOOL,
+			"Cannot allocate event callback!\n");
+		rte_free(te);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	cb->func = func;
+	cb->arg = arg;
+	te->data = cb;
+	TAILQ_INSERT_TAIL(list, te, next);
+	ret = 0;
+
+exit:
+	rte_mcfg_tailq_write_unlock();
+	rte_mcfg_mempool_read_unlock();
+	rte_errno = -ret;
+	return ret;
+}
+
+int
+rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
+				      void *arg)
+{
+	struct mempool_callback_list *list;
+	struct rte_tailq_entry *te = NULL;
+	struct mempool_callback *cb;
+	int ret;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		rte_errno = EPERM;
+		return -1;
+	}
+
+	rte_mcfg_mempool_read_lock();
+	rte_mcfg_tailq_write_lock();
+	ret = -ENOENT;
+	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
+	TAILQ_FOREACH(te, list, next) {
+		cb = (struct mempool_callback *)te->data;
+		if (cb->func == func && cb->arg == arg)
+			break;
+	}
+	if (te != NULL) {
+		TAILQ_REMOVE(list, te, next);
+		ret = 0;
+	}
+	rte_mcfg_tailq_write_unlock();
+	rte_mcfg_mempool_read_unlock();
+
+	if (ret == 0) {
+		rte_free(te);
+		rte_free(cb);
+	}
+	rte_errno = -ret;
+	return ret;
+}
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index 4235d6f0bf..1e9b8f0229 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -1775,6 +1775,62 @@  void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
 int
 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
 
+/**
+ * Mempool event type.
+ * @internal
+ */
+enum rte_mempool_event {
+	/** Occurs after a successful mempool creation. */
+	RTE_MEMPOOL_EVENT_CREATE = 0,
+	/** Occurs before destruction of a mempool begins. */
+	RTE_MEMPOOL_EVENT_DESTROY = 1,
+};
+
+/**
+ * @internal
+ * Mempool event callback.
+ */
+typedef void (rte_mempool_event_callback)(
+		enum rte_mempool_event event,
+		struct rte_mempool *mp,
+		void *arg);
+
+/**
+ * @internal
+ * Register a callback invoked on mempool life cycle event.
+ * Callbacks will be invoked in the process that creates the mempool.
+ *
+ * @param cb
+ *   Callback function.
+ * @param cb_arg
+ *   User data.
+ *
+ * @return
+ *   0 on success, negative on failure and rte_errno is set.
+ */
+__rte_internal
+int
+rte_mempool_event_callback_register(rte_mempool_event_callback *cb,
+				    void *cb_arg);
+
+/**
+ * @internal
+ * Unregister a callback added with rte_mempool_event_callback_register().
+ * @p cb and @p arg must exactly match registration parameters.
+ *
+ * @param cb
+ *   Callback function.
+ * @param cb_arg
+ *   User data.
+ *
+ * @return
+ *   0 on success, negative on failure and rte_errno is set.
+ */
+__rte_internal
+int
+rte_mempool_event_callback_unregister(rte_mempool_event_callback *cb,
+				      void *cb_arg);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/mempool/version.map b/lib/mempool/version.map
index 9f77da6fff..1b7d7c5456 100644
--- a/lib/mempool/version.map
+++ b/lib/mempool/version.map
@@ -64,3 +64,11 @@  EXPERIMENTAL {
 	__rte_mempool_trace_ops_free;
 	__rte_mempool_trace_set_ops_byname;
 };
+
+INTERNAL {
+	global:
+
+	# added in 21.11
+	rte_mempool_event_callback_register;
+	rte_mempool_event_callback_unregister;
+};