@@ -161,7 +161,6 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
n_get_bulk);
if (unlikely(ret < 0)) {
rte_mempool_dump(stdout, mp);
- rte_ring_dump(stdout, mp->ring);
/* in this case, objects are lost... */
return -1;
}
@@ -42,6 +42,9 @@ LIBABIVER := 1
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_handler.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_default.c
+
ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
endif
@@ -59,10 +59,11 @@
#include <rte_spinlock.h>
#include "rte_mempool.h"
+#include "rte_mempool_handler.h"
TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
-static struct rte_tailq_elem rte_mempool_tailq = {
+struct rte_tailq_elem rte_mempool_tailq = {
.name = "RTE_MEMPOOL",
};
EAL_REGISTER_TAILQ(rte_mempool_tailq)
@@ -149,7 +150,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
obj_init(mp, obj_init_arg, obj, obj_idx);
/* enqueue in ring */
- rte_ring_sp_enqueue(mp->ring, obj);
+ rte_mempool_ext_put_bulk(mp, &obj, 1);
}
uint32_t
@@ -420,117 +421,76 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
}
/*
+ * Common mempool create function.
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
* Setting vaddr to NULL, makes mempool to fallback to original behaviour
- * and allocate space for mempool and it's elements as one big chunk of
- * physically continuos memory.
- * */
-struct rte_mempool *
-rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ * which will call rte_mempool_ext_alloc to allocate the object memory.
+ * If it is an intenal mempool handler, it will allocate space for mempool
+ * and it's elements as one big chunk of physically continuous memory.
+ * If it is an external mempool handler, it will allocate space for mempool
+ * and call the rte_mempool_ext_alloc for the object memory.
+ */
+static struct rte_mempool *
+mempool_create(const char *name,
+ unsigned num_elt, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+ int socket_id, unsigned flags,
+ void *vaddr, const phys_addr_t paddr[],
+ uint32_t pg_num, uint32_t pg_shift,
+ const char *handler_name)
{
- char mz_name[RTE_MEMZONE_NAMESIZE];
- char rg_name[RTE_RING_NAMESIZE];
+ const struct rte_memzone *mz;
struct rte_mempool_list *mempool_list;
struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te = NULL;
- struct rte_ring *r = NULL;
- const struct rte_memzone *mz;
- size_t mempool_size;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
- int rg_flags = 0;
- void *obj;
struct rte_mempool_objsz objsz;
- void *startaddr;
+ void *startaddr = NULL;
int page_size = getpagesize();
-
- /* compilation-time checks */
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
- RTE_CACHE_LINE_MASK) != 0);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
+ void *obj = NULL;
+ size_t mempool_size;
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
/* asked cache too big */
if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* check that we have both VA and PA */
- if (vaddr != NULL && paddr == NULL) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* Check that pg_num and pg_shift parameters are valid. */
- if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {
+ CALC_CACHE_FLUSHTHRESH(cache_size) > num_elt) {
rte_errno = EINVAL;
return NULL;
}
- /* "no cache align" imply "no spread" */
- if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
- flags |= MEMPOOL_F_NO_SPREAD;
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+ /* Check that pg_num and pg_shift parameters are valid. */
+ if (pg_num < RTE_DIM(mp->elt_pa) ||
+ pg_shift > MEMPOOL_PG_SHIFT_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
- /* ring flags */
- if (flags & MEMPOOL_F_SP_PUT)
- rg_flags |= RING_F_SP_ENQ;
- if (flags & MEMPOOL_F_SC_GET)
- rg_flags |= RING_F_SC_DEQ;
+ /* "no cache align" imply "no spread" */
+ if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= MEMPOOL_F_NO_SPREAD;
- /* calculate mempool object sizes. */
- if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
- rte_errno = EINVAL;
- return NULL;
+ /* calculate mempool object sizes. */
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
}
rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
- /* allocate the ring that will be used to store objects */
- /* Ring functions will return appropriate errors if we are
- * running as a secondary process etc., so no checks made
- * in this function for that condition */
- snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);
- r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
- if (r == NULL)
- goto exit_unlock;
-
/*
* reserve a memory zone for this mempool: private data is
* cache-aligned
*/
- private_data_size = (private_data_size +
- RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
+ private_data_size = RTE_ALIGN_CEIL(private_data_size,
+ RTE_MEMPOOL_ALIGN);
- if (! rte_eal_has_hugepages()) {
- /*
- * expand private data size to a whole page, so that the
- * first pool element will start on a new standard page
- */
- int head = sizeof(struct rte_mempool);
- int new_size = (private_data_size + head) % page_size;
- if (new_size) {
- private_data_size += page_size - new_size;
- }
- }
/* try to allocate tailq entry */
te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
@@ -539,23 +499,51 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
goto exit_unlock;
}
- /*
- * If user provided an external memory buffer, then use it to
- * store mempool objects. Otherwise reserve a memzone that is large
- * enough to hold mempool header and metadata plus mempool objects.
- */
- mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
- mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
- if (vaddr == NULL)
- mempool_size += (size_t)objsz.total_size * n;
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+
+ if (!rte_eal_has_hugepages()) {
+ /*
+ * expand private data size to a whole page, so that the
+ * first pool element will start on a new standard page
+ */
+ int head = sizeof(struct rte_mempool);
+ int new_size = (private_data_size + head) % page_size;
+
+ if (new_size)
+ private_data_size += page_size - new_size;
+ }
+
- if (! rte_eal_has_hugepages()) {
/*
- * we want the memory pool to start on a page boundary,
- * because pool elements crossing page boundaries would
- * result in discontiguous physical addresses
+ * If user provided an external memory buffer, then use it to
+ * store mempool objects. Otherwise reserve a memzone that is
+ * large enough to hold mempool header and metadata plus
+ * mempool objects
*/
- mempool_size += page_size;
+ mempool_size =
+ MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
+ mempool_size =
+ RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
+ if (vaddr == NULL)
+ mempool_size += (size_t)objsz.total_size * num_elt;
+
+ if (!rte_eal_has_hugepages()) {
+ /*
+ * we want the memory pool to start on a page boundary,
+ * because pool elements crossing page boundaries would
+ * result in discontiguous physical addresses
+ */
+ mempool_size += page_size;
+ }
+ } else {
+ /*
+ * If user provided an external memory buffer, then use it to
+ * store mempool objects. Otherwise reserve a memzone that is
+ * large enough to hold mempool header and metadata plus
+ * mempool objects
+ */
+ mempool_size = sizeof(*mp) + private_data_size;
+ mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
}
snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
@@ -564,16 +552,22 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
if (mz == NULL)
goto exit_unlock;
- if (rte_eal_has_hugepages()) {
- startaddr = (void*)mz->addr;
- } else {
- /* align memory pool start address on a page boundary */
- unsigned long addr = (unsigned long)mz->addr;
- if (addr & (page_size - 1)) {
- addr += page_size;
- addr &= ~(page_size - 1);
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+
+ if (rte_eal_has_hugepages()) {
+ startaddr = (void *)mz->addr;
+ } else {
+ /* align memory pool start address on a page boundary */
+ unsigned long addr = (unsigned long)mz->addr;
+
+ if (addr & (page_size - 1)) {
+ addr += page_size;
+ addr &= ~(page_size - 1);
+ }
+ startaddr = (void *)addr;
}
- startaddr = (void*)addr;
+ } else {
+ startaddr = (void *)mz->addr;
}
/* init the mempool structure */
@@ -581,8 +575,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
memset(mp, 0, sizeof(*mp));
snprintf(mp->name, sizeof(mp->name), "%s", name);
mp->phys_addr = mz->phys_addr;
- mp->ring = r;
- mp->size = n;
+ mp->size = num_elt;
mp->flags = flags;
mp->elt_size = objsz.elt_size;
mp->header_size = objsz.header_size;
@@ -591,35 +584,52 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
- /* calculate address of the first element for continuous mempool. */
- obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
- private_data_size;
- obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
-
- /* populate address translation fields. */
- mp->pg_num = pg_num;
- mp->pg_shift = pg_shift;
- mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
+ mp->handler_idx = rte_get_mempool_handler_idx(handler_name);
+ if (mp->handler_idx < 0) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot find mempool handler by name!\n");
+ goto exit_unlock;
+ }
- /* mempool elements allocated together with mempool */
- if (vaddr == NULL) {
- mp->elt_va_start = (uintptr_t)obj;
- mp->elt_pa[0] = mp->phys_addr +
- (mp->elt_va_start - (uintptr_t)mp);
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+ /* calculate address of first element for continuous mempool. */
+ obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
+ private_data_size;
+ obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
+
+ /* populate address translation fields. */
+ mp->pg_num = pg_num;
+ mp->pg_shift = pg_shift;
+ mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
+
+ /* mempool elements allocated together with mempool */
+ if (vaddr == NULL) {
+ mp->elt_va_start = (uintptr_t)obj;
+ mp->elt_pa[0] = mp->phys_addr +
+ (mp->elt_va_start - (uintptr_t)mp);
+ /* mempool elements in a separate chunk of memory. */
+ } else {
+ mp->elt_va_start = (uintptr_t)vaddr;
+ memcpy(mp->elt_pa, paddr,
+ sizeof(mp->elt_pa[0]) * pg_num);
+ }
- /* mempool elements in a separate chunk of memory. */
- } else {
- mp->elt_va_start = (uintptr_t)vaddr;
- memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+ mp->elt_va_end = mp->elt_va_start;
}
- mp->elt_va_end = mp->elt_va_start;
+ /* Parameters are setup. Call the mempool handler alloc */
+ mp->pool =
+ rte_mempool_ext_alloc(mp, name, num_elt, socket_id, flags);
+ if (mp->pool == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Failed to alloc mempool!\n");
+ goto exit_unlock;
+ }
/* call the initializer */
if (mp_init)
mp_init(mp, mp_init_arg);
- mempool_populate(mp, n, 1, obj_init, obj_init_arg);
+ if (obj_init)
+ mempool_populate(mp, num_elt, 1, obj_init, obj_init_arg);
te->data = (void *) mp;
@@ -632,19 +642,83 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
exit_unlock:
rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);
- rte_ring_free(r);
rte_free(te);
return NULL;
}
+/* Create the mempool over already allocated chunk of memory */
+struct rte_mempool *
+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags, void *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ struct rte_mempool *mp = NULL;
+ char handler_name[RTE_MEMPOOL_NAMESIZE];
+
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+ RTE_CACHE_LINE_MASK) != 0);
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+
+ /* check that we have both VA and PA */
+ if (vaddr != NULL && paddr == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * Since we have 4 combinations of the SP/SC/MP/MC, and stack,
+ * examine the flags to set the correct index into the handler table.
+ */
+ if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
+ sprintf(handler_name, "%s", "ring_sp_sc");
+ else if (flags & MEMPOOL_F_SP_PUT)
+ sprintf(handler_name, "%s", "ring_sp_mc");
+ else if (flags & MEMPOOL_F_SC_GET)
+ sprintf(handler_name, "%s", "ring_mp_sc");
+ else
+ sprintf(handler_name, "%s", "ring_mp_mc");
+
+ flags |= MEMPOOL_F_INT_HANDLER;
+
+ mp = mempool_create(name,
+ n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id,
+ flags,
+ vaddr, paddr,
+ pg_num, pg_shift,
+ handler_name);
+
+ return mp;
+}
+
/* Return the number of entries in the mempool */
unsigned
rte_mempool_count(const struct rte_mempool *mp)
{
unsigned count;
- count = rte_ring_count(mp->ring);
+ count = rte_mempool_ext_get_count(mp);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
{
@@ -800,7 +874,6 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
- fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
fprintf(f, " size=%"PRIu32"\n", mp->size);
fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
@@ -823,7 +896,7 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
mp->size);
cache_count = rte_mempool_dump_cache(f, mp);
- common_count = rte_ring_count(mp->ring);
+ common_count = rte_mempool_ext_get_count(mp);
if ((cache_count + common_count) > mp->size)
common_count = mp->size - cache_count;
fprintf(f, " common_pool_count=%u\n", common_count);
@@ -917,3 +990,30 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
}
+
+
+/* create the mempool using an external mempool manager */
+struct rte_mempool *
+rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags,
+ const char *handler_name)
+{
+ struct rte_mempool *mp = NULL;
+
+ mp = mempool_create(name,
+ n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, /* vaddr, paddr */
+ 0, 0, /* pg_num, pg_shift, */
+ handler_name);
+
+ return mp;
+
+
+}
@@ -175,12 +175,93 @@ struct rte_mempool_objtlr {
#endif
};
+/* Handler functions for external mempool support */
+typedef void *(*rte_mempool_alloc_t)(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags);
+typedef int (*rte_mempool_put_t)(void *p,
+ void * const *obj_table, unsigned n);
+typedef int (*rte_mempool_get_t)(void *p, void **obj_table,
+ unsigned n);
+typedef unsigned (*rte_mempool_get_count)(void *p);
+typedef int (*rte_mempool_free_t)(struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager alloc callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the memory pool.
+ * @param n
+ * Number of objects in the mempool.
+ * @param socket_id
+ * socket id on which to allocate.
+ * @param flags
+ * general flags to allocate function (MEMPOOL_F_* flags)
+ */
+void *
+rte_mempool_ext_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags);
+
+/**
+ * @internal wrapper for external mempool manager get callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to get
+ * @return
+ * - >=0: Success; got n number of objects
+ * - <0: Error; code of handler get function.
+ */
+int
+rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table,
+ unsigned n);
+
+/**
+ * @internal wrapper for external mempool manager put callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to put
+ * @return
+ * - >=0: Success; number of objects supplied.
+ * - <0: Error; code of handler put function.
+ */
+int
+rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n);
+
+/**
+ * @internal wrapper for external mempool manager get_count callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ */
+unsigned
+rte_mempool_ext_get_count(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager free callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @return
+ * The number of objects available in the mempool.
+ */
+int
+rte_mempool_ext_free(struct rte_mempool *mp);
+
/**
* The RTE mempool structure.
*/
struct rte_mempool {
char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
- struct rte_ring *ring; /**< Ring to store objects. */
phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
int flags; /**< Flags of the mempool. */
uint32_t size; /**< Size of the mempool. */
@@ -194,6 +275,18 @@ struct rte_mempool {
unsigned private_data_size; /**< Size of private data. */
+ /* Common pool data structure pointer */
+ void *pool;
+
+ /*
+ * Index into the array of structs containing callback fn pointers.
+ * We're using an index here rather than pointers to the callbacks
+ * to facilitate any secondary processes that may want to use
+ * this mempool. Any function pointers stored in the mempool
+ * directly would not be valid for secondary processes.
+ */
+ int16_t handler_idx;
+
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/** Per-lcore local cache. */
struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
@@ -223,6 +316,8 @@ struct rte_mempool {
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+#define MEMPOOL_F_INT_HANDLER 0x0020 /**< Using internal mempool handler */
+
/**
* @internal When debug is enabled, store some statistics.
@@ -728,7 +823,6 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-
/**
* Dump the status of the mempool to the console.
*
@@ -753,7 +847,7 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
*/
static inline void __attribute__((always_inline))
__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+ unsigned n, __rte_unused int is_mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
struct rte_mempool_cache *cache;
@@ -793,10 +887,15 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
cache->len += n;
- if (cache->len >= flushthresh) {
- rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
+ if (unlikely(cache->len >= flushthresh)) {
+ rte_mempool_ext_put_bulk(mp, &cache->objs[cache_size],
cache->len - cache_size);
cache->len = cache_size;
+ /*
+ * Increment stats counter to tell us how many pool puts
+ * happened
+ */
+ __MEMPOOL_STAT_ADD(mp, put_pool, n);
}
return;
@@ -804,22 +903,10 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
ring_enqueue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
- /* push remaining objects in ring */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- if (is_mp) {
- if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
- else {
- if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
-#else
- if (is_mp)
- rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
- else
- rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
-#endif
+ /* Increment stats counter to tell us how many pool puts happened */
+ __MEMPOOL_STAT_ADD(mp, put_pool, n);
+
+ rte_mempool_ext_put_bulk(mp, obj_table, n);
}
@@ -943,7 +1030,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
*/
static inline int __attribute__((always_inline))
__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+ unsigned n, __attribute__((unused))int is_mc)
{
int ret;
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
@@ -967,7 +1054,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
uint32_t req = n + (cache_size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
- ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
+ ret = rte_mempool_ext_get_bulk(mp,
+ &cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
* In the offchance that we are buffer constrained,
@@ -995,10 +1083,7 @@ ring_dequeue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* get remaining objects from ring */
- if (is_mc)
- ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
- else
- ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+ ret = rte_mempool_ext_get_bulk(mp, obj_table, n);
if (ret < 0)
__MEMPOOL_STAT_ADD(mp, get_fail, n);
@@ -1401,6 +1486,80 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
void *arg);
+/**
+ * Function to get the name of a mempool handler
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The name of the mempool handler
+ */
+char *rte_mempool_get_handler_name(struct rte_mempool *mp);
+
+/**
+ * Create a new mempool named *name* in memory.
+ *
+ * This function uses an externally defined alloc callback to allocate memory.
+ * Its size is set to n elements.
+ * All elements of the mempool are allocated separately to the mempool header.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache. Note that even if not used, the
+ * memory space for cache is always reserved in a mempool structure,
+ * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * general flags to allocate function (MEMPOOL_F_* flags)
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ */
+struct rte_mempool *
+rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags,
+ const char *handler_name);
+
#ifdef __cplusplus
}
#endif
new file mode 100644
@@ -0,0 +1,136 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+#include "rte_mempool.h"
+#include "rte_mempool_handler.h"
+
+static int
+common_ring_mp_put(void *p, void * const *obj_table, unsigned n)
+{
+ return rte_ring_mp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_sp_put(void *p, void * const *obj_table, unsigned n)
+{
+ return rte_ring_sp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_mc_get(void *p, void **obj_table, unsigned n)
+{
+ return rte_ring_mc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_sc_get(void *p, void **obj_table, unsigned n)
+{
+ return rte_ring_sc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static unsigned
+common_ring_get_count(void *p)
+{
+ return rte_ring_count((struct rte_ring *)p);
+}
+
+
+static void *
+common_ring_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags)
+{
+ struct rte_ring *r;
+ char rg_name[RTE_RING_NAMESIZE];
+ int rg_flags = 0;
+
+ if (flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /* allocate the ring that will be used to store objects */
+ /* Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition */
+ snprintf(rg_name, sizeof(rg_name), "%s-ring", name);
+ r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
+ if (r == NULL)
+ return NULL;
+
+ mp->pool = r;
+
+ return r;
+}
+
+static struct rte_mempool_handler handler_mp_mc = {
+ .name = "ring_mp_mc",
+ .alloc = common_ring_alloc,
+ .put = common_ring_mp_put,
+ .get = common_ring_mc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_sp_sc = {
+ .name = "ring_sp_sc",
+ .alloc = common_ring_alloc,
+ .put = common_ring_sp_put,
+ .get = common_ring_sc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_mp_sc = {
+ .name = "ring_mp_sc",
+ .alloc = common_ring_alloc,
+ .put = common_ring_mp_put,
+ .get = common_ring_sc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_sp_mc = {
+ .name = "ring_sp_mc",
+ .alloc = common_ring_alloc,
+ .put = common_ring_sp_put,
+ .get = common_ring_mc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+
+REGISTER_MEMPOOL_HANDLER(handler_mp_mc);
+REGISTER_MEMPOOL_HANDLER(handler_sp_sc);
+REGISTER_MEMPOOL_HANDLER(handler_mp_sc);
+REGISTER_MEMPOOL_HANDLER(handler_sp_mc);
new file mode 100644
@@ -0,0 +1,140 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+#include "rte_mempool.h"
+#include "rte_mempool_handler.h"
+
+/*
+ * Indirect jump table to support external memory pools
+ */
+struct rte_mempool_handler_list mempool_handler_list = {
+ .sl = RTE_SPINLOCK_INITIALIZER ,
+ .num_handlers = 0
+};
+
+/*
+ * Returns the name of the mempool
+ */
+char *
+rte_mempool_get_handler_name(struct rte_mempool *mp) {
+ return mempool_handler_list.handler[mp->handler_idx].name;
+}
+
+int16_t
+rte_mempool_register_handler(struct rte_mempool_handler *h)
+{
+ int16_t handler_idx;
+
+ /* */
+ rte_spinlock_lock(&mempool_handler_list.sl);
+
+ /* Check whether jump table has space */
+ if (mempool_handler_list.num_handlers >= RTE_MEMPOOL_MAX_HANDLER_IDX) {
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Maximum number of mempool handlers exceeded\n");
+ return -1;
+ }
+
+ if ((h->put == NULL) || (h->get == NULL) ||
+ (h->get_count == NULL)) {
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Missing callback while registering mempool handler\n");
+ return -1;
+ }
+
+ /* add new handler index */
+ handler_idx = mempool_handler_list.num_handlers++;
+
+ snprintf(mempool_handler_list.handler[handler_idx].name,
+ RTE_MEMPOOL_NAMESIZE, "%s", h->name);
+ mempool_handler_list.handler[handler_idx].alloc = h->alloc;
+ mempool_handler_list.handler[handler_idx].put = h->put;
+ mempool_handler_list.handler[handler_idx].get = h->get;
+ mempool_handler_list.handler[handler_idx].get_count = h->get_count;
+
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+
+ return handler_idx;
+}
+
+int16_t
+rte_get_mempool_handler_idx(const char *name)
+{
+ int16_t i;
+
+ for (i = 0; i < mempool_handler_list.num_handlers; i++) {
+ if (!strcmp(name, mempool_handler_list.handler[i].name))
+ return i;
+ }
+ return -1;
+}
+
+void *
+rte_mempool_ext_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags)
+{
+ if (mempool_handler_list.handler[mp->handler_idx].alloc) {
+ return (mempool_handler_list.handler[mp->handler_idx].alloc)
+ (mp, name, n, socket_id, flags);
+ }
+ return NULL;
+}
+
+inline int __attribute__((always_inline))
+rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].get)
+ (mp->pool, obj_table, n);
+}
+
+inline int __attribute__((always_inline))
+rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].put)
+ (mp->pool, obj_table, n);
+}
+
+unsigned
+rte_mempool_ext_get_count(const struct rte_mempool *mp)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].get_count)
+ (mp->pool);
+}
new file mode 100644
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_INTERNAL_H_
+#define _RTE_MEMPOOL_INTERNAL_H_
+
+#include <rte_spinlock.h>
+#include <rte_mempool.h>
+
+#define RTE_MEMPOOL_MAX_HANDLER_IDX 16
+
+struct rte_mempool_handler {
+ char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool handler */
+
+ rte_mempool_alloc_t alloc;
+
+ rte_mempool_get_count get_count;
+
+ rte_mempool_free_t free;
+
+ rte_mempool_put_t put;
+
+ rte_mempool_get_t get;
+} __rte_cache_aligned;
+
+struct rte_mempool_handler_list {
+ rte_spinlock_t sl; /**< Spinlock for add/delete. */
+
+ int32_t num_handlers; /**< Number of handlers that are valid. */
+
+ /* storage for all possible handlers */
+ struct rte_mempool_handler handler[RTE_MEMPOOL_MAX_HANDLER_IDX];
+};
+
+int16_t rte_mempool_register_handler(struct rte_mempool_handler *h);
+int16_t rte_get_mempool_handler_idx(const char *name);
+
+#define REGISTER_MEMPOOL_HANDLER(h) \
+static int16_t __attribute__((used)) testfn_##h(void);\
+int16_t __attribute__((constructor, used)) testfn_##h(void)\
+{\
+ return rte_mempool_register_handler(&h);\
+}
+
+#endif
@@ -6,6 +6,7 @@ DPDK_2.0 {
rte_mempool_calc_obj_size;
rte_mempool_count;
rte_mempool_create;
+ rte_mempool_create_ext;
rte_mempool_dump;
rte_mempool_list_dump;
rte_mempool_lookup;