[dpdk-dev,2/5] memool: add stack (lifo) based external mempool handler

Message ID 1453829155-1366-3-git-send-email-david.hunt@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers

Commit Message

Hunt, David Jan. 26, 2016, 5:25 p.m. UTC
  adds a simple stack based mempool handler

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool_perf.c           |   1 -
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 167 +++++++++++++++++++++++++++++++++
 3 files changed, 168 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c
  

Comments

Olivier Matz Feb. 4, 2016, 3:02 p.m. UTC | #1
Hi,

> [PATCH 2/5] memool: add stack (lifo) based external mempool handler

typo in the patch title: memool -> mempool


On 01/26/2016 06:25 PM, David Hunt wrote:
> adds a simple stack based mempool handler
>
> Signed-off-by: David Hunt <david.hunt@intel.com>

What is the purpose of this mempool handler?

Is it an example or is it something that could be useful for
dpdk applications?

If it's just an example, I think we could move this code
in app/test.

> --- a/app/test/test_mempool_perf.c
> +++ b/app/test/test_mempool_perf.c
> @@ -52,7 +52,6 @@
>   #include <rte_lcore.h>
>   #include <rte_atomic.h>
>   #include <rte_branch_prediction.h>
> -#include <rte_ring.h>
>   #include <rte_mempool.h>
>   #include <rte_spinlock.h>
>   #include <rte_malloc.h>

Is this change related?



> +struct rte_mempool_common_stack {
> +	/* Spinlock to protect access */
> +	rte_spinlock_t sl;
> +
> +	uint32_t size;
> +	uint32_t len;
> +	void *objs[];
> +
> +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> +#endif

There is nothing inside the #ifdef


> +static void *
> +common_stack_alloc(struct rte_mempool *mp,
> +		const char *name, unsigned n, int socket_id, unsigned flags)
> +{
> +	struct rte_mempool_common_stack *s;
> +	char stack_name[RTE_RING_NAMESIZE];
> +
> +	int size = sizeof(*s) + (n+16)*sizeof(void *);
> +
> +	flags = flags;
> +
> +	/* Allocate our local memory structure */
> +	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", name);
> +	s = rte_zmalloc_socket(stack_name,
> +					size, RTE_CACHE_LINE_SIZE, socket_id);
> +	if (s == NULL) {
> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
> +		return NULL;
> +	}
> +
> +	/* And the spinlock we use to protect access */
> +	rte_spinlock_init(&s->sl);
> +
> +	s->size = n;
> +	mp->rt_pool = (void *) s;
> +	mp->handler_idx = rte_get_mempool_handler("stack");
> +
> +	return (void *) s;
> +}

The explicit casts could be removed I think.


> +
> +static int common_stack_put(void *p, void * const *obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack *s =
> +				(struct rte_mempool_common_stack *)p;

indent issue (same in get() and count())

> +	void **cache_objs;
> +	unsigned index;
> +
> +	/* Acquire lock */
> +	rte_spinlock_lock(&s->sl);
> +	cache_objs = &s->objs[s->len];
> +
> +	/* Is there sufficient space in the stack ? */
> +	if ((s->len + n) > s->size) {
> +		rte_spinlock_unlock(&s->sl);
> +		return -ENOENT;
> +	}

I think this cannot happen as there is a check in the get().
I wonder if a rte_panic() wouldn't be better here.



Regards,
Olivier
  

Patch

diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index 091c1df..c5a1d2a 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -52,7 +52,6 @@ 
 #include <rte_lcore.h>
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
-#include <rte_ring.h>
 #include <rte_mempool.h>
 #include <rte_spinlock.h>
 #include <rte_malloc.h>
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index 7c81ef6..d795b48 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -43,6 +43,7 @@  LIBABIVER := 1
 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_dom0_mempool.c
 endif
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..c7d232e
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,167 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+#include "rte_mempool_internal.h"
+
+struct rte_mempool_common_stack {
+	/* Spinlock to protect access */
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#endif
+};
+
+static void *
+common_stack_alloc(struct rte_mempool *mp,
+		const char *name, unsigned n, int socket_id, unsigned flags)
+{
+	struct rte_mempool_common_stack *s;
+	char stack_name[RTE_RING_NAMESIZE];
+
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	flags = flags;
+
+	/* Allocate our local memory structure */
+	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", name);
+	s = rte_zmalloc_socket(stack_name,
+					size, RTE_CACHE_LINE_SIZE, socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return NULL;
+	}
+
+	/* And the spinlock we use to protect access */
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->rt_pool = (void *) s;
+	mp->handler_idx = rte_get_mempool_handler("stack");
+
+	return (void *) s;
+}
+
+static int common_stack_put(void *p, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack *s =
+				(struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int common_stack_get(void *p, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack *s =
+					(struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index, len;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+					++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned common_stack_get_count(void *p)
+{
+	struct rte_mempool_common_stack *s =
+					(struct rte_mempool_common_stack *)p;
+
+	return s->len;
+}
+
+static int
+common_stack_free(struct rte_mempool *mp)
+{
+	struct rte_mempool_common_stack *s;
+
+	s = mp->rt_pool;
+
+	rte_free(s);
+
+	return 0;
+}
+
+static struct rte_mempool_handler handler_stack = {
+	.name = "stack",
+	.alloc = common_stack_alloc,
+	.put = common_stack_put,
+	.get = common_stack_get,
+	.get_count = common_stack_get_count,
+	.free = common_stack_free
+};
+
+REGISTER_MEMPOOL_HANDLER(handler_stack);