[dpdk-dev,1/2] mempool: add stack (fifo) mempool handler

Message ID 1462472982-49782-2-git-send-email-david.hunt@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers

Commit Message

Hunt, David May 5, 2016, 6:29 p.m. UTC
  This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 154 +++++++++++++++++++++++++++++++++
 2 files changed, 155 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c
  

Comments

Stephen Hemminger May 5, 2016, 9:28 p.m. UTC | #1
Overall, this is ok, but why can't it be the default?
Lots of little things should be cleaned up


> +struct rte_mempool_common_stack
> +{
> +	/* Spinlock to protect access */
> +	rte_spinlock_t sl;
> +
> +	uint32_t size;
> +	uint32_t len;
> +	void *objs[];
> +
> +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> +#endif

Useless remove it

> +};
> +
> +static void *
> +common_stack_alloc(struct rte_mempool *mp)
> +{
> +	struct rte_mempool_common_stack *s;
> +	char stack_name[RTE_RING_NAMESIZE];
> +	unsigned n = mp->size;
> +	int size = sizeof(*s) + (n+16)*sizeof(void*);
> +
> +	/* Allocate our local memory structure */
> +	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", mp->name);
> +	s = rte_zmalloc_socket(stack_name,
The name for zmalloc is ignored in current code, why bother making it unique.

> +			size,
> +			RTE_CACHE_LINE_SIZE,
> +			mp->socket_id);
> +	if (s == NULL) {
> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
> +		return NULL;
> +	}
> +
> +	/* And the spinlock we use to protect access */
> +	rte_spinlock_init(&s->sl);
> +
> +	s->size = n;
> +	mp->pool = (void *) s;
Since pool is void *, no need for a cast here

> +	rte_mempool_set_handler(mp, "stack");
> +
> +	return (void *) s;
> +}
> +
> +static int common_stack_put(void *p, void * const *obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
> +	void **cache_objs;
> +	unsigned index;
> +
> +	/* Acquire lock */
Useless obvious comment, about as good a classic /* Add one to i */
>
> +static int common_stack_get(void *p, void **obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;

Unnecessary cast, in C void * can be assigned to any type.

> +	void **cache_objs;
> +	unsigned index, len;
> +
> +	/* Acquire lock */
Yet another useless comment.

> +	rte_spinlock_lock(&s->sl);
> +
> +	if(unlikely(n > s->len)) {
> +		rte_spinlock_unlock(&s->sl);
> +		return -ENOENT;
> +	}
> +
> +	cache_objs = s->objs;
> +
> +	for (index = 0, len = s->len - 1; index < n; ++index, len--, obj_table++)
> +		*obj_table = cache_objs[len];
> +
> +	s->len -= n;
> +	rte_spinlock_unlock(&s->sl);
> +	return n;
> +}
> +
> +static unsigned common_stack_get_count(void *p)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;

Another useless cast.

> +	return s->len;
> +}
> +
> +static void
> +common_stack_free(void *p)
> +{
> +	rte_free((struct rte_mempool_common_stack *)p);
Yet another useless cast

> +}
> +
> +static struct rte_mempool_handler handler_stack = {
For security, any data structure with function pointers should be const.

> +	.name = "stack",
> +	.alloc = common_stack_alloc,
> +	.free = common_stack_free,
> +	.put = common_stack_put,
> +	.get = common_stack_get,
> +	.get_count = common_stack_get_count
> +};
> +
> +MEMPOOL_REGISTER_HANDLER(handler_stack);
  
Hunt, David May 19, 2016, 3:21 p.m. UTC | #2
On 5/5/2016 10:28 PM, Stephen Hemminger wrote:
> Overall, this is ok, but why can't it be the default?

Backward compatibility would probably be the main reason, to have the 
least impact when recompiling.

> Lots of little things should be cleaned up

I've submitted a v2, and addressed all your issues. Thanks for the review.

Regards,
Dave.


--snip---
  

Patch

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index f19366e..5aa9ef8 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@  LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..1874781
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,154 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+struct rte_mempool_common_stack
+{
+	/* Spinlock to protect access */
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#endif
+};
+
+static void *
+common_stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_common_stack *s;
+	char stack_name[RTE_RING_NAMESIZE];
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void*);
+
+	/* Allocate our local memory structure */
+	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", mp->name);
+	s = rte_zmalloc_socket(stack_name,
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return NULL;
+	}
+
+	/* And the spinlock we use to protect access */
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool = (void *) s;
+	rte_mempool_set_handler(mp, "stack");
+
+	return (void *) s;
+}
+
+static int common_stack_put(void *p, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int common_stack_get(void *p, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index, len;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+
+	if(unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n; ++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned common_stack_get_count(void *p)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	return s->len;
+}
+
+static void
+common_stack_free(void *p)
+{
+	rte_free((struct rte_mempool_common_stack *)p);
+}
+
+static struct rte_mempool_handler handler_stack = {
+	.name = "stack",
+	.alloc = common_stack_alloc,
+	.free = common_stack_free,
+	.put = common_stack_put,
+	.get = common_stack_get,
+	.get_count = common_stack_get_count
+};
+
+MEMPOOL_REGISTER_HANDLER(handler_stack);