[dpdk-dev,1/6] mempool: add external mempool manager support
Commit Message
Adds the new rte_mempool_create_ext api and callback mechanism for
external mempool handlers
Modifies the existing rte_mempool_create to set up the handler_idx to
the relevant mempool handler based on the handler name:
ring_sp_sc
ring_mp_mc
ring_sp_mc
ring_mp_sc
v2: merges the duplicated code in rte_mempool_xmem_create and
rte_mempool_create_ext into one common function. The old functions
now call the new common function with the relevant parameters.
Signed-off-by: David Hunt <david.hunt@intel.com>
---
app/test/test_mempool_perf.c | 1 -
lib/librte_mempool/Makefile | 2 +
lib/librte_mempool/rte_mempool.c | 383 ++++++++++++++++++-----------
lib/librte_mempool/rte_mempool.h | 200 ++++++++++++---
lib/librte_mempool/rte_mempool_default.c | 236 ++++++++++++++++++
lib/librte_mempool/rte_mempool_internal.h | 75 ++++++
lib/librte_mempool/rte_mempool_version.map | 1 +
7 files changed, 717 insertions(+), 181 deletions(-)
create mode 100644 lib/librte_mempool/rte_mempool_default.c
create mode 100644 lib/librte_mempool/rte_mempool_internal.h
Comments
Hello David,
(I wanted to reply to the 0/6 patch but I couldn't find it anywhere in
mbox format, nor on gmane which is strange.)
I could see both versions of the patch series quickly. I've got only
one question at the moment. Would it be possible to somehow integrate
calls to the dma_map/unmap_* interface of the kernel? I think, this is
possible to be implemented through the vfio iommu group zero (hope so).
The issue is that without being able to explicitly flush buffers before
a DMA transmission, it is not easily possible to use the mempool
manager on most ARMv7 chips. Well, it can be done by calling
dma_alloc_coherent and pass this memory into the mempool manager. But
such memory is very slow (it is, say, non-cacheable).
Regards
Jan
On Tue, 16 Feb 2016 14:48:10 +0000
David Hunt <david.hunt@intel.com> wrote:
> Adds the new rte_mempool_create_ext api and callback mechanism for
> external mempool handlers
>
> Modifies the existing rte_mempool_create to set up the handler_idx to
> the relevant mempool handler based on the handler name:
> ring_sp_sc
> ring_mp_mc
> ring_sp_mc
> ring_mp_sc
>
> v2: merges the duplicated code in rte_mempool_xmem_create and
> rte_mempool_create_ext into one common function. The old functions
> now call the new common function with the relevant parameters.
>
> Signed-off-by: David Hunt <david.hunt@intel.com>
>
> ---
> app/test/test_mempool_perf.c | 1 -
> lib/librte_mempool/Makefile | 2 +
> lib/librte_mempool/rte_mempool.c | 383 ++++++++++++++++++-----------
> lib/librte_mempool/rte_mempool.h | 200 ++++++++++++---
> lib/librte_mempool/rte_mempool_default.c | 236 ++++++++++++++++++
> lib/librte_mempool/rte_mempool_internal.h | 75 ++++++
> lib/librte_mempool/rte_mempool_version.map | 1 +
> 7 files changed, 717 insertions(+), 181 deletions(-)
> create mode 100644 lib/librte_mempool/rte_mempool_default.c
> create mode 100644 lib/librte_mempool/rte_mempool_internal.h
>
> diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
> index cdc02a0..091c1df 100644
> --- a/app/test/test_mempool_perf.c
> +++ b/app/test/test_mempool_perf.c
> @@ -161,7 +161,6 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
> n_get_bulk);
> if (unlikely(ret < 0)) {
> rte_mempool_dump(stdout, mp);
> - rte_ring_dump(stdout, mp->ring);
> /* in this case, objects are lost... */
> return -1;
> }
> diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
> index a6898ef..aeaffd1 100644
> --- a/lib/librte_mempool/Makefile
> +++ b/lib/librte_mempool/Makefile
> @@ -42,6 +42,8 @@ LIBABIVER := 1
>
> # all source are stored in SRCS-y
> SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
> +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_default.c
> +
> ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
> SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
> endif
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index aff5f6d..a577a3e 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -59,10 +59,11 @@
> #include <rte_spinlock.h>
>
> #include "rte_mempool.h"
> +#include "rte_mempool_internal.h"
>
> TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
>
> -static struct rte_tailq_elem rte_mempool_tailq = {
> +struct rte_tailq_elem rte_mempool_tailq = {
> .name = "RTE_MEMPOOL",
> };
> EAL_REGISTER_TAILQ(rte_mempool_tailq)
> @@ -149,7 +150,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
> obj_init(mp, obj_init_arg, obj, obj_idx);
>
> /* enqueue in ring */
> - rte_ring_sp_enqueue(mp->ring, obj);
> + rte_mempool_ext_put_bulk(mp, &obj, 1);
> }
>
> uint32_t
> @@ -375,26 +376,6 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
> return usz;
> }
>
> -#ifndef RTE_LIBRTE_XEN_DOM0
> -/* stub if DOM0 support not configured */
> -struct rte_mempool *
> -rte_dom0_mempool_create(const char *name __rte_unused,
> - unsigned n __rte_unused,
> - unsigned elt_size __rte_unused,
> - unsigned cache_size __rte_unused,
> - unsigned private_data_size __rte_unused,
> - rte_mempool_ctor_t *mp_init __rte_unused,
> - void *mp_init_arg __rte_unused,
> - rte_mempool_obj_ctor_t *obj_init __rte_unused,
> - void *obj_init_arg __rte_unused,
> - int socket_id __rte_unused,
> - unsigned flags __rte_unused)
> -{
> - rte_errno = EINVAL;
> - return NULL;
> -}
> -#endif
> -
> /* create the mempool */
> struct rte_mempool *
> rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
> @@ -420,117 +401,76 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
> }
>
> /*
> + * Common mempool create function.
> * Create the mempool over already allocated chunk of memory.
> * That external memory buffer can consists of physically disjoint pages.
> * Setting vaddr to NULL, makes mempool to fallback to original behaviour
> - * and allocate space for mempool and it's elements as one big chunk of
> - * physically continuos memory.
> - * */
> -struct rte_mempool *
> -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> + * which will call rte_mempool_ext_alloc to allocate the object memory.
> + * If it is an intenal mempool handler, it will allocate space for mempool
> + * and it's elements as one big chunk of physically continuous memory.
> + * If it is an external mempool handler, it will allocate space for mempool
> + * and call the rte_mempool_ext_alloc for the object memory.
> + */
> +static struct rte_mempool *
> +mempool_create(const char *name,
> + unsigned num_elt, unsigned elt_size,
> unsigned cache_size, unsigned private_data_size,
> rte_mempool_ctor_t *mp_init, void *mp_init_arg,
> rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
> - int socket_id, unsigned flags, void *vaddr,
> - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
> + int socket_id, unsigned flags,
> + void *vaddr, const phys_addr_t paddr[],
> + uint32_t pg_num, uint32_t pg_shift,
> + const char *handler_name)
> {
> - char mz_name[RTE_MEMZONE_NAMESIZE];
> - char rg_name[RTE_RING_NAMESIZE];
> + const struct rte_memzone *mz;
> struct rte_mempool_list *mempool_list;
> struct rte_mempool *mp = NULL;
> struct rte_tailq_entry *te;
> - struct rte_ring *r;
> - const struct rte_memzone *mz;
> - size_t mempool_size;
> + char mz_name[RTE_MEMZONE_NAMESIZE];
> int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
> - int rg_flags = 0;
> - void *obj;
> struct rte_mempool_objsz objsz;
> - void *startaddr;
> + void *startaddr = NULL;
> int page_size = getpagesize();
> -
> - /* compilation-time checks */
> - RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
> - RTE_CACHE_LINE_MASK) != 0);
> -#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> - RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
> - RTE_CACHE_LINE_MASK) != 0);
> - RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
> - RTE_CACHE_LINE_MASK) != 0);
> -#endif
> -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> - RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
> - RTE_CACHE_LINE_MASK) != 0);
> - RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
> - RTE_CACHE_LINE_MASK) != 0);
> -#endif
> + void *obj = NULL;
> + size_t mempool_size;
>
> mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
>
> /* asked cache too big */
> if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
> - CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
> + CALC_CACHE_FLUSHTHRESH(cache_size) > num_elt) {
> rte_errno = EINVAL;
> return NULL;
> }
>
> - /* check that we have both VA and PA */
> - if (vaddr != NULL && paddr == NULL) {
> - rte_errno = EINVAL;
> - return NULL;
> - }
> -
> - /* Check that pg_num and pg_shift parameters are valid. */
> - if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {
> - rte_errno = EINVAL;
> - return NULL;
> - }
> -
> - /* "no cache align" imply "no spread" */
> - if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
> - flags |= MEMPOOL_F_NO_SPREAD;
> + if (flags && MEMPOOL_F_INT_HANDLER) {
> + /* Check that pg_num and pg_shift parameters are valid. */
> + if (pg_num < RTE_DIM(mp->elt_pa) ||
> + pg_shift > MEMPOOL_PG_SHIFT_MAX) {
> + rte_errno = EINVAL;
> + return NULL;
> + }
>
> - /* ring flags */
> - if (flags & MEMPOOL_F_SP_PUT)
> - rg_flags |= RING_F_SP_ENQ;
> - if (flags & MEMPOOL_F_SC_GET)
> - rg_flags |= RING_F_SC_DEQ;
> + /* "no cache align" imply "no spread" */
> + if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
> + flags |= MEMPOOL_F_NO_SPREAD;
>
> - /* calculate mempool object sizes. */
> - if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
> - rte_errno = EINVAL;
> - return NULL;
> + /* calculate mempool object sizes. */
> + if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
> + rte_errno = EINVAL;
> + return NULL;
> + }
> }
>
> rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
>
> - /* allocate the ring that will be used to store objects */
> - /* Ring functions will return appropriate errors if we are
> - * running as a secondary process etc., so no checks made
> - * in this function for that condition */
> - snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);
> - r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
> - if (r == NULL)
> - goto exit;
> -
> /*
> * reserve a memory zone for this mempool: private data is
> * cache-aligned
> */
> - private_data_size = (private_data_size +
> - RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
> + private_data_size = RTE_ALIGN_CEIL(private_data_size,
> + RTE_MEMPOOL_ALIGN);
>
> - if (! rte_eal_has_hugepages()) {
> - /*
> - * expand private data size to a whole page, so that the
> - * first pool element will start on a new standard page
> - */
> - int head = sizeof(struct rte_mempool);
> - int new_size = (private_data_size + head) % page_size;
> - if (new_size) {
> - private_data_size += page_size - new_size;
> - }
> - }
>
> /* try to allocate tailq entry */
> te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> @@ -539,23 +479,51 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> goto exit;
> }
>
> - /*
> - * If user provided an external memory buffer, then use it to
> - * store mempool objects. Otherwise reserve a memzone that is large
> - * enough to hold mempool header and metadata plus mempool objects.
> - */
> - mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
> - mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
> - if (vaddr == NULL)
> - mempool_size += (size_t)objsz.total_size * n;
> + if (flags && MEMPOOL_F_INT_HANDLER) {
>
> - if (! rte_eal_has_hugepages()) {
> + if (!rte_eal_has_hugepages()) {
> + /*
> + * expand private data size to a whole page, so that the
> + * first pool element will start on a new standard page
> + */
> + int head = sizeof(struct rte_mempool);
> + int new_size = (private_data_size + head) % page_size;
> +
> + if (new_size)
> + private_data_size += page_size - new_size;
> + }
> +
> +
> + /*
> + * If user provided an external memory buffer, then use it to
> + * store mempool objects. Otherwise reserve a memzone that is
> + * large enough to hold mempool header and metadata plus
> + * mempool objects
> + */
> + mempool_size =
> + MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
> + mempool_size =
> + RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
> + if (vaddr == NULL)
> + mempool_size += (size_t)objsz.total_size * num_elt;
> +
> + if (!rte_eal_has_hugepages()) {
> + /*
> + * we want the memory pool to start on a page boundary,
> + * because pool elements crossing page boundaries would
> + * result in discontiguous physical addresses
> + */
> + mempool_size += page_size;
> + }
> + } else {
> /*
> - * we want the memory pool to start on a page boundary,
> - * because pool elements crossing page boundaries would
> - * result in discontiguous physical addresses
> + * If user provided an external memory buffer, then use it to
> + * store mempool objects. Otherwise reserve a memzone that is
> + * large enough to hold mempool header and metadata plus
> + * mempool objects
> */
> - mempool_size += page_size;
> + mempool_size = sizeof(*mp) + private_data_size;
> + mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
> }
>
> snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
> @@ -563,24 +531,29 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
>
> /*
> - * no more memory: in this case we loose previously reserved
> - * space for the ring as we cannot free it
> + * no more memory
> */
> if (mz == NULL) {
> rte_free(te);
> goto exit;
> }
>
> - if (rte_eal_has_hugepages()) {
> - startaddr = (void*)mz->addr;
> - } else {
> - /* align memory pool start address on a page boundary */
> - unsigned long addr = (unsigned long)mz->addr;
> - if (addr & (page_size - 1)) {
> - addr += page_size;
> - addr &= ~(page_size - 1);
> + if (flags && MEMPOOL_F_INT_HANDLER) {
> +
> + if (rte_eal_has_hugepages()) {
> + startaddr = (void *)mz->addr;
> + } else {
> + /* align memory pool start address on a page boundary */
> + unsigned long addr = (unsigned long)mz->addr;
> +
> + if (addr & (page_size - 1)) {
> + addr += page_size;
> + addr &= ~(page_size - 1);
> + }
> + startaddr = (void *)addr;
> }
> - startaddr = (void*)addr;
> + } else {
> + startaddr = (void *)mz->addr;
> }
>
> /* init the mempool structure */
> @@ -588,8 +561,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> memset(mp, 0, sizeof(*mp));
> snprintf(mp->name, sizeof(mp->name), "%s", name);
> mp->phys_addr = mz->phys_addr;
> - mp->ring = r;
> - mp->size = n;
> + mp->size = num_elt;
> mp->flags = flags;
> mp->elt_size = objsz.elt_size;
> mp->header_size = objsz.header_size;
> @@ -598,35 +570,54 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
> mp->private_data_size = private_data_size;
>
> - /* calculate address of the first element for continuous mempool. */
> - obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
> - private_data_size;
> - obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
> -
> - /* populate address translation fields. */
> - mp->pg_num = pg_num;
> - mp->pg_shift = pg_shift;
> - mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
> + mp->handler_idx = rte_get_mempool_handler_idx(handler_name);
> + if (mp->handler_idx < 0) {
> + RTE_LOG(ERR, MEMPOOL, "Cannot find mempool handler by name!\n");
> + rte_free(te);
> + goto exit;
> + }
>
> - /* mempool elements allocated together with mempool */
> - if (vaddr == NULL) {
> - mp->elt_va_start = (uintptr_t)obj;
> - mp->elt_pa[0] = mp->phys_addr +
> - (mp->elt_va_start - (uintptr_t)mp);
> + if (flags && MEMPOOL_F_INT_HANDLER) {
> + /* calculate address of first element for continuous mempool. */
> + obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
> + private_data_size;
> + obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
> +
> + /* populate address translation fields. */
> + mp->pg_num = pg_num;
> + mp->pg_shift = pg_shift;
> + mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
> +
> + /* mempool elements allocated together with mempool */
> + if (vaddr == NULL) {
> + mp->elt_va_start = (uintptr_t)obj;
> + mp->elt_pa[0] = mp->phys_addr +
> + (mp->elt_va_start - (uintptr_t)mp);
> + /* mempool elements in a separate chunk of memory. */
> + } else {
> + mp->elt_va_start = (uintptr_t)vaddr;
> + memcpy(mp->elt_pa, paddr,
> + sizeof(mp->elt_pa[0]) * pg_num);
> + }
>
> - /* mempool elements in a separate chunk of memory. */
> - } else {
> - mp->elt_va_start = (uintptr_t)vaddr;
> - memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
> + mp->elt_va_end = mp->elt_va_start;
> }
>
> - mp->elt_va_end = mp->elt_va_start;
> + /* Parameters are setup. Call the mempool handler alloc */
> + mp->rt_pool =
> + rte_mempool_ext_alloc(mp, name, num_elt, socket_id, flags);
> + if (mp->rt_pool == NULL) {
> + RTE_LOG(ERR, MEMPOOL, "Failed to alloc mempool!\n");
> + rte_free(te);
> + goto exit;
> + }
>
> /* call the initializer */
> if (mp_init)
> mp_init(mp, mp_init_arg);
>
> - mempool_populate(mp, n, 1, obj_init, obj_init_arg);
> + if (obj_init)
> + mempool_populate(mp, num_elt, 1, obj_init, obj_init_arg);
>
> te->data = (void *) mp;
>
> @@ -640,13 +631,79 @@ exit:
> return mp;
> }
>
> +/* Create the mempool over already allocated chunk of memory */
> +struct rte_mempool *
> +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
> + unsigned cache_size, unsigned private_data_size,
> + rte_mempool_ctor_t *mp_init, void *mp_init_arg,
> + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
> + int socket_id, unsigned flags, void *vaddr,
> + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
> +{
> + struct rte_mempool *mp = NULL;
> + char handler_name[RTE_MEMPOOL_NAMESIZE];
> +
> +
> + /* compilation-time checks */
> + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
> + RTE_CACHE_LINE_MASK) != 0);
> +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
> + RTE_CACHE_LINE_MASK) != 0);
> + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
> + RTE_CACHE_LINE_MASK) != 0);
> +#endif
> +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
> + RTE_CACHE_LINE_MASK) != 0);
> + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
> + RTE_CACHE_LINE_MASK) != 0);
> +#endif
> +
> +
> + /* check that we have both VA and PA */
> + if (vaddr != NULL && paddr == NULL) {
> + rte_errno = EINVAL;
> + return NULL;
> + }
> +
> + /*
> + * Since we have 4 combinations of the SP/SC/MP/MC, and stack,
> + * examine the
> + * flags to set the correct index into the handler table.
> + */
> + if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
> + sprintf(handler_name, "%s", "ring_sp_sc");
> + else if (flags & MEMPOOL_F_SP_PUT)
> + sprintf(handler_name, "%s", "ring_sp_mc");
> + else if (flags & MEMPOOL_F_SC_GET)
> + sprintf(handler_name, "%s", "ring_mp_sc");
> + else
> + sprintf(handler_name, "%s", "ring_mp_mc");
> +
> + flags |= MEMPOOL_F_INT_HANDLER;
> +
> + mp = mempool_create(name,
> + n, elt_size,
> + cache_size, private_data_size,
> + mp_init, mp_init_arg,
> + obj_init, obj_init_arg,
> + socket_id,
> + flags,
> + vaddr, paddr,
> + pg_num, pg_shift,
> + handler_name);
> +
> + return mp;
> +}
> +
> /* Return the number of entries in the mempool */
> unsigned
> rte_mempool_count(const struct rte_mempool *mp)
> {
> unsigned count;
>
> - count = rte_ring_count(mp->ring);
> + count = rte_mempool_ext_get_count(mp);
>
> #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> {
> @@ -802,7 +859,6 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
>
> fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
> fprintf(f, " flags=%x\n", mp->flags);
> - fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
> fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
> fprintf(f, " size=%"PRIu32"\n", mp->size);
> fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
> @@ -825,7 +881,7 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
> mp->size);
>
> cache_count = rte_mempool_dump_cache(f, mp);
> - common_count = rte_ring_count(mp->ring);
> + common_count = rte_mempool_ext_get_count(mp);
> if ((cache_count + common_count) > mp->size)
> common_count = mp->size - cache_count;
> fprintf(f, " common_pool_count=%u\n", common_count);
> @@ -919,3 +975,30 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
>
> rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
> }
> +
> +
> +/* create the mempool using an external mempool manager */
> +struct rte_mempool *
> +rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
> + unsigned cache_size, unsigned private_data_size,
> + rte_mempool_ctor_t *mp_init, void *mp_init_arg,
> + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
> + int socket_id, unsigned flags,
> + const char *handler_name)
> +{
> + struct rte_mempool *mp = NULL;
> +
> + mp = mempool_create(name,
> + n, elt_size,
> + cache_size, private_data_size,
> + mp_init, mp_init_arg,
> + obj_init, obj_init_arg,
> + socket_id, flags,
> + NULL, NULL, /* vaddr, paddr */
> + 0, 0, /* pg_num, pg_shift, */
> + handler_name);
> +
> + return mp;
> +
> +
> +}
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 9745bf0..3705fbd 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -88,6 +88,8 @@ extern "C" {
> struct rte_mempool_debug_stats {
> uint64_t put_bulk; /**< Number of puts. */
> uint64_t put_objs; /**< Number of objects successfully put. */
> + uint64_t put_pool_bulk; /**< Number of puts into pool. */
> + uint64_t put_pool_objs; /**< Number of objects into pool. */
> uint64_t get_success_bulk; /**< Successful allocation number. */
> uint64_t get_success_objs; /**< Objects successfully allocated. */
> uint64_t get_fail_bulk; /**< Failed allocation number. */
> @@ -175,12 +177,85 @@ struct rte_mempool_objtlr {
> #endif
> };
>
> +/* Handler functions for external mempool support */
> +typedef void *(*rte_mempool_alloc_t)(struct rte_mempool *mp,
> + const char *name, unsigned n, int socket_id, unsigned flags);
> +typedef int (*rte_mempool_put_t)(void *p,
> + void * const *obj_table, unsigned n);
> +typedef int (*rte_mempool_get_t)(void *p, void **obj_table,
> + unsigned n);
> +typedef unsigned (*rte_mempool_get_count)(void *p);
> +typedef int (*rte_mempool_free_t)(struct rte_mempool *mp);
> +
> +/**
> + * @internal wrapper for external mempool manager alloc callback.
> + *
> + * @param mp
> + * Pointer to the memory pool.
> + * @param name
> + * Name of the memory pool.
> + * @param n
> + * Number of objects in the mempool.
> + * @param socket_id
> + * socket id on which to allocate.
> + * @param flags
> + * general flags to allocate function (MEMPOOL_F_* flags)
> + */
> +void *
> +rte_mempool_ext_alloc(struct rte_mempool *mp,
> + const char *name, unsigned n, int socket_id, unsigned flags);
> +
> +/**
> + * @internal wrapper for external mempool manager get callback.
> + *
> + * @param mp
> + * Pointer to the memory pool.
> + * @param obj_table
> + * Pointer to a table of void * pointers (objects).
> + * @param n
> + * Number of objects to get
> + */
> +int
> +rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table,
> + unsigned n);
> +
> +/**
> + * @internal wrapper for external mempool manager put callback.
> + *
> + * @param mp
> + * Pointer to the memory pool.
> + * @param obj_table
> + * Pointer to a table of void * pointers (objects).
> + * @param n
> + * Number of objects to put
> + */
> +int
> +rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> + unsigned n);
> +
> +/**
> + * @internal wrapper for external mempool manager get_count callback.
> + *
> + * @param mp
> + * Pointer to the memory pool.
> + */
> +int
> +rte_mempool_ext_get_count(const struct rte_mempool *mp);
> +
> +/**
> + * @internal wrapper for external mempool manager free callback.
> + *
> + * @param mp
> + * Pointer to the memory pool.
> + */
> +int
> +rte_mempool_ext_free(struct rte_mempool *mp);
> +
> /**
> * The RTE mempool structure.
> */
> struct rte_mempool {
> char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
> - struct rte_ring *ring; /**< Ring to store objects. */
> phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
> int flags; /**< Flags of the mempool. */
> uint32_t size; /**< Size of the mempool. */
> @@ -194,6 +269,11 @@ struct rte_mempool {
>
> unsigned private_data_size; /**< Size of private data. */
>
> + /* Common pool data structure pointer */
> + void *rt_pool;
> +
> + int16_t handler_idx;
> +
> #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> /** Per-lcore local cache. */
> struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
> @@ -223,6 +303,8 @@ struct rte_mempool {
> #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
> #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
> #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
> +#define MEMPOOL_F_INT_HANDLER 0x0020 /**< Using internal mempool handler */
> +
>
> /**
> * @internal When debug is enabled, store some statistics.
> @@ -753,7 +835,7 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
> */
> static inline void __attribute__((always_inline))
> __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> - unsigned n, int is_mp)
> + unsigned n, __attribute__((unused)) int is_mp)
> {
> #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> struct rte_mempool_cache *cache;
> @@ -769,8 +851,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
>
> #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> /* cache is not enabled or single producer or non-EAL thread */
> - if (unlikely(cache_size == 0 || is_mp == 0 ||
> - lcore_id >= RTE_MAX_LCORE))
> + if (unlikely(cache_size == 0 || lcore_id >= RTE_MAX_LCORE))
> goto ring_enqueue;
>
> /* Go straight to ring if put would overflow mem allocated for cache */
> @@ -793,8 +874,8 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
>
> cache->len += n;
>
> - if (cache->len >= flushthresh) {
> - rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
> + if (unlikely(cache->len >= flushthresh)) {
> + rte_mempool_ext_put_bulk(mp, &cache->objs[cache_size],
> cache->len - cache_size);
> cache->len = cache_size;
> }
> @@ -804,22 +885,10 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> ring_enqueue:
> #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
>
> - /* push remaining objects in ring */
> -#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> - if (is_mp) {
> - if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
> - rte_panic("cannot put objects in mempool\n");
> - }
> - else {
> - if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
> - rte_panic("cannot put objects in mempool\n");
> - }
> -#else
> - if (is_mp)
> - rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
> - else
> - rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
> -#endif
> + /* Increment stats counter to tell us how many pool puts happened */
> + __MEMPOOL_STAT_ADD(mp, put_pool, n);
> +
> + rte_mempool_ext_put_bulk(mp, obj_table, n);
> }
>
>
> @@ -943,7 +1012,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
> */
> static inline int __attribute__((always_inline))
> __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
> - unsigned n, int is_mc)
> + unsigned n, __attribute__((unused))int is_mc)
> {
> int ret;
> #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> @@ -954,8 +1023,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
> uint32_t cache_size = mp->cache_size;
>
> /* cache is not enabled or single consumer */
> - if (unlikely(cache_size == 0 || is_mc == 0 ||
> - n >= cache_size || lcore_id >= RTE_MAX_LCORE))
> + if (unlikely(cache_size == 0 || n >= cache_size ||
> + lcore_id >= RTE_MAX_LCORE))
> goto ring_dequeue;
>
> cache = &mp->local_cache[lcore_id];
> @@ -967,7 +1036,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
> uint32_t req = n + (cache_size - cache->len);
>
> /* How many do we require i.e. number to fill the cache + the request */
> - ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
> + ret = rte_mempool_ext_get_bulk(mp,
> + &cache->objs[cache->len], req);
> if (unlikely(ret < 0)) {
> /*
> * In the offchance that we are buffer constrained,
> @@ -995,10 +1065,7 @@ ring_dequeue:
> #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
>
> /* get remaining objects from ring */
> - if (is_mc)
> - ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
> - else
> - ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
> + ret = rte_mempool_ext_get_bulk(mp, obj_table, n);
>
> if (ret < 0)
> __MEMPOOL_STAT_ADD(mp, get_fail, n);
> @@ -1401,6 +1468,79 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
> void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
> void *arg);
>
> +/**
> + * Function to get the name of a mempool handler
> + *
> + * @param mp
> + * A pointer to the mempool structure.
> + * @return
> + * The name of the mempool handler
> + */
> +char *rte_mempool_get_handler_name(struct rte_mempool *mp);
> +
> +/**
> + * Create a new mempool named *name* in memory.
> + *
> + * This function uses an externally defined alloc callback to allocate memory.
> + * Its size is set to n elements.
> + * All elements of the mempool are allocated separately to the mempool header.
> + *
> + * @param name
> + * The name of the mempool.
> + * @param n
> + * The number of elements in the mempool. The optimum size (in terms of
> + * memory usage) for a mempool is when n is a power of two minus one:
> + * n = (2^q - 1).
> + * @param cache_size
> + * If cache_size is non-zero, the rte_mempool library will try to
> + * limit the accesses to the common lockless pool, by maintaining a
> + * per-lcore object cache. This argument must be lower or equal to
> + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
> + * cache_size to have "n modulo cache_size == 0": if this is
> + * not the case, some elements will always stay in the pool and will
> + * never be used. The access to the per-lcore table is of course
> + * faster than the multi-producer/consumer pool. The cache can be
> + * disabled if the cache_size argument is set to 0; it can be useful to
> + * avoid losing objects in cache. Note that even if not used, the
> + * memory space for cache is always reserved in a mempool structure,
> + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
> + * @param private_data_size
> + * The size of the private data appended after the mempool
> + * structure. This is useful for storing some private data after the
> + * mempool structure, as is done for rte_mbuf_pool for example.
> + * @param mp_init
> + * A function pointer that is called for initialization of the pool,
> + * before object initialization. The user can initialize the private
> + * data in this function if needed. This parameter can be NULL if
> + * not needed.
> + * @param mp_init_arg
> + * An opaque pointer to data that can be used in the mempool
> + * constructor function.
> + * @param obj_init
> + * A function pointer that is called for each object at
> + * initialization of the pool. The user can set some meta data in
> + * objects if needed. This parameter can be NULL if not needed.
> + * The obj_init() function takes the mempool pointer, the init_arg,
> + * the object pointer and the object number as parameters.
> + * @param obj_init_arg
> + * An opaque pointer to data that can be used as an argument for
> + * each call to the object constructor function.
> + * @param socket_id
> + * The *socket_id* argument is the socket identifier in the case of
> + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
> + * constraint for the reserved zone.
> + * @param flags
> + * @return
> + * The pointer to the new allocated mempool, on success. NULL on error
> + */
> +struct rte_mempool *
> +rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
> + unsigned cache_size, unsigned private_data_size,
> + rte_mempool_ctor_t *mp_init, void *mp_init_arg,
> + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
> + int socket_id, unsigned flags,
> + const char *handler_name);
> +
> #ifdef __cplusplus
> }
> #endif
> diff --git a/lib/librte_mempool/rte_mempool_default.c b/lib/librte_mempool/rte_mempool_default.c
> new file mode 100644
> index 0000000..ca3255e
> --- /dev/null
> +++ b/lib/librte_mempool/rte_mempool_default.c
> @@ -0,0 +1,236 @@
> +/*-
> + * BSD LICENSE
> + *
> + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> + * All rights reserved.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + *
> + * * Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * * Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in
> + * the documentation and/or other materials provided with the
> + * distribution.
> + * * Neither the name of Intel Corporation nor the names of its
> + * contributors may be used to endorse or promote products derived
> + * from this software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <stdio.h>
> +#include <rte_mempool.h>
> +#include <rte_malloc.h>
> +#include <string.h>
> +
> +#include "rte_mempool.h"
> +#include "rte_mempool_internal.h"
> +
> +/*
> + * Indirect jump table to support external memory pools
> + */
> +struct rte_mempool_handler_list mempool_handler_list = {
> + .sl = RTE_SPINLOCK_INITIALIZER ,
> + .num_handlers = 0
> +};
> +
> +/*
> + * Returns the name of the mempool
> + */
> +char *
> +rte_mempool_get_handler_name(struct rte_mempool *mp) {
> + return mempool_handler_list.handler[mp->handler_idx].name;
> +}
> +
> +int16_t
> +rte_mempool_register_handler(struct rte_mempool_handler *h)
> +{
> + int16_t handler_idx;
> +
> + /* */
> + rte_spinlock_lock(&mempool_handler_list.sl);
> +
> + /* Check whether jump table has space */
> + if (mempool_handler_list.num_handlers >= RTE_MEMPOOL_MAX_HANDLER_IDX) {
> + rte_spinlock_unlock(&mempool_handler_list.sl);
> + RTE_LOG(ERR, MEMPOOL,
> + "Maximum number of mempool handlers exceeded\n");
> + return -1;
> + }
> +
> + if ((h->put == NULL) || (h->get == NULL) ||
> + (h->get_count == NULL)) {
> + rte_spinlock_unlock(&mempool_handler_list.sl);
> + RTE_LOG(ERR, MEMPOOL,
> + "Missing callback while registering mempool handler\n");
> + return -1;
> + }
> +
> + /* add new handler index */
> + handler_idx = mempool_handler_list.num_handlers++;
> +
> + snprintf(mempool_handler_list.handler[handler_idx].name,
> + RTE_MEMPOOL_NAMESIZE, "%s", h->name);
> + mempool_handler_list.handler[handler_idx].alloc = h->alloc;
> + mempool_handler_list.handler[handler_idx].put = h->put;
> + mempool_handler_list.handler[handler_idx].get = h->get;
> + mempool_handler_list.handler[handler_idx].get_count = h->get_count;
> +
> + rte_spinlock_unlock(&mempool_handler_list.sl);
> +
> + return handler_idx;
> +}
> +
> +int16_t
> +rte_get_mempool_handler_idx(const char *name)
> +{
> + int16_t i;
> +
> + for (i = 0; i < mempool_handler_list.num_handlers; i++) {
> + if (!strcmp(name, mempool_handler_list.handler[i].name))
> + return i;
> + }
> + return -1;
> +}
> +
> +static int
> +common_ring_mp_put(void *p, void * const *obj_table, unsigned n)
> +{
> + return rte_ring_mp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
> +}
> +
> +static int
> +common_ring_sp_put(void *p, void * const *obj_table, unsigned n)
> +{
> + return rte_ring_sp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
> +}
> +
> +static int
> +common_ring_mc_get(void *p, void **obj_table, unsigned n)
> +{
> + return rte_ring_mc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
> +}
> +
> +static int
> +common_ring_sc_get(void *p, void **obj_table, unsigned n)
> +{
> + return rte_ring_sc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
> +}
> +
> +static unsigned
> +common_ring_get_count(void *p)
> +{
> + return rte_ring_count((struct rte_ring *)p);
> +}
> +
> +
> +static void *
> +rte_mempool_common_ring_alloc(struct rte_mempool *mp,
> + const char *name, unsigned n, int socket_id, unsigned flags)
> +{
> + struct rte_ring *r;
> + char rg_name[RTE_RING_NAMESIZE];
> + int rg_flags = 0;
> +
> + if (flags & MEMPOOL_F_SP_PUT)
> + rg_flags |= RING_F_SP_ENQ;
> + if (flags & MEMPOOL_F_SC_GET)
> + rg_flags |= RING_F_SC_DEQ;
> +
> + /* allocate the ring that will be used to store objects */
> + /* Ring functions will return appropriate errors if we are
> + * running as a secondary process etc., so no checks made
> + * in this function for that condition */
> + snprintf(rg_name, sizeof(rg_name), "%s-ring", name);
> + r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
> + if (r == NULL)
> + return NULL;
> +
> + mp->rt_pool = (void *)r;
> +
> + return (void *) r;
> +}
> +
> +void *
> +rte_mempool_ext_alloc(struct rte_mempool *mp,
> + const char *name, unsigned n, int socket_id, unsigned flags)
> +{
> + if (mempool_handler_list.handler[mp->handler_idx].alloc) {
> + return (mempool_handler_list.handler[mp->handler_idx].alloc)
> + (mp, name, n, socket_id, flags);
> + }
> + return NULL;
> +}
> +
> +inline int __attribute__((always_inline))
> +rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
> +{
> + return (mempool_handler_list.handler[mp->handler_idx].get)
> + (mp->rt_pool, obj_table, n);
> +}
> +
> +inline int __attribute__((always_inline))
> +rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> + unsigned n)
> +{
> + return (mempool_handler_list.handler[mp->handler_idx].put)
> + (mp->rt_pool, obj_table, n);
> +}
> +
> +int
> +rte_mempool_ext_get_count(const struct rte_mempool *mp)
> +{
> + return (mempool_handler_list.handler[mp->handler_idx].get_count)
> + (mp->rt_pool);
> +}
> +
> +static struct rte_mempool_handler handler_mp_mc = {
> + .name = "ring_mp_mc",
> + .alloc = rte_mempool_common_ring_alloc,
> + .put = common_ring_mp_put,
> + .get = common_ring_mc_get,
> + .get_count = common_ring_get_count,
> + .free = NULL
> +};
> +static struct rte_mempool_handler handler_sp_sc = {
> + .name = "ring_sp_sc",
> + .alloc = rte_mempool_common_ring_alloc,
> + .put = common_ring_sp_put,
> + .get = common_ring_sc_get,
> + .get_count = common_ring_get_count,
> + .free = NULL
> +};
> +static struct rte_mempool_handler handler_mp_sc = {
> + .name = "ring_mp_sc",
> + .alloc = rte_mempool_common_ring_alloc,
> + .put = common_ring_mp_put,
> + .get = common_ring_sc_get,
> + .get_count = common_ring_get_count,
> + .free = NULL
> +};
> +static struct rte_mempool_handler handler_sp_mc = {
> + .name = "ring_sp_mc",
> + .alloc = rte_mempool_common_ring_alloc,
> + .put = common_ring_sp_put,
> + .get = common_ring_mc_get,
> + .get_count = common_ring_get_count,
> + .free = NULL
> +};
> +
> +REGISTER_MEMPOOL_HANDLER(handler_mp_mc);
> +REGISTER_MEMPOOL_HANDLER(handler_sp_sc);
> +REGISTER_MEMPOOL_HANDLER(handler_mp_sc);
> +REGISTER_MEMPOOL_HANDLER(handler_sp_mc);
> diff --git a/lib/librte_mempool/rte_mempool_internal.h b/lib/librte_mempool/rte_mempool_internal.h
> new file mode 100644
> index 0000000..982396f
> --- /dev/null
> +++ b/lib/librte_mempool/rte_mempool_internal.h
> @@ -0,0 +1,75 @@
> +/*-
> + * BSD LICENSE
> + *
> + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> + * All rights reserved.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + *
> + * * Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * * Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in
> + * the documentation and/or other materials provided with the
> + * distribution.
> + * * Neither the name of Intel Corporation nor the names of its
> + * contributors may be used to endorse or promote products derived
> + * from this software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef _RTE_MEMPOOL_INTERNAL_H_
> +#define _RTE_MEMPOOL_INTERNAL_H_
> +
> +#include <rte_spinlock.h>
> +#include <rte_mempool.h>
> +
> +#define RTE_MEMPOOL_MAX_HANDLER_IDX 16
> +
> +struct rte_mempool_handler {
> + char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool handler */
> +
> + rte_mempool_alloc_t alloc;
> +
> + rte_mempool_get_count get_count;
> +
> + rte_mempool_free_t free;
> +
> + rte_mempool_put_t put;
> +
> + rte_mempool_get_t get;
> +} __rte_cache_aligned;
> +
> +struct rte_mempool_handler_list {
> + rte_spinlock_t sl; /**< Spinlock for add/delete. */
> +
> + int32_t num_handlers; /**< Number of handlers that are valid. */
> +
> + /* storage for all possible handlers */
> + struct rte_mempool_handler handler[RTE_MEMPOOL_MAX_HANDLER_IDX];
> +};
> +
> +int16_t rte_mempool_register_handler(struct rte_mempool_handler *h);
> +int16_t rte_get_mempool_handler_idx(const char *name);
> +
> +#define REGISTER_MEMPOOL_HANDLER(h) \
> +static int16_t __attribute__((used)) testfn_##h(void);\
> +int16_t __attribute__((constructor, used)) testfn_##h(void)\
> +{\
> + return rte_mempool_register_handler(&h);\
> +}
> +
> +#endif
> diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
> index 17151e0..589db27 100644
> --- a/lib/librte_mempool/rte_mempool_version.map
> +++ b/lib/librte_mempool/rte_mempool_version.map
> @@ -6,6 +6,7 @@ DPDK_2.0 {
> rte_mempool_calc_obj_size;
> rte_mempool_count;
> rte_mempool_create;
> + rte_mempool_create_ext;
> rte_mempool_dump;
> rte_mempool_list_dump;
> rte_mempool_lookup;
Hi David,
On 02/16/2016 03:48 PM, David Hunt wrote:
> Adds the new rte_mempool_create_ext api and callback mechanism for
> external mempool handlers
>
> Modifies the existing rte_mempool_create to set up the handler_idx to
> the relevant mempool handler based on the handler name:
> ring_sp_sc
> ring_mp_mc
> ring_sp_mc
> ring_mp_sc
>
> v2: merges the duplicated code in rte_mempool_xmem_create and
> rte_mempool_create_ext into one common function. The old functions
> now call the new common function with the relevant parameters.
>
> Signed-off-by: David Hunt <david.hunt@intel.com>
I think the refactoring of rte_mempool_create() (adding of
mempool_create()) should go in another commit. It will make the
patches much easier to read.
Also, I'm sorry but it seems that several comments or question I've made
in http://dpdk.org/ml/archives/dev/2016-February/032706.html are
not addressed.
Examples:
- putting some part of the patch in separate commits
- meaning of "rt_pool"
- put_pool_bulk unclear comment
- should we also have get_pool_bulk stats?
- missing _MEMPOOL_STAT_ADD() in mempool_bulk()
- why internal in rte_mempool_internal.h?
- why default in rte_mempool_default.c?
- remaining references to stack handler (in a comment)
- ...?
As you know, doing a proper code review takes a lot of time. If I
have to re-check all of my previous comments, it will take even
more. I'm not saying all my comments require a code change, but in case
you don't agree, please at least explain your opinion so we can debate
on the list.
Regards,
Olivier
On 2/19/2016 1:30 PM, Olivier MATZ wrote:
> Hi David,
>
> On 02/16/2016 03:48 PM, David Hunt wrote:
>> Adds the new rte_mempool_create_ext api and callback mechanism for
>> external mempool handlers
>>
>> Modifies the existing rte_mempool_create to set up the handler_idx to
>> the relevant mempool handler based on the handler name:
>> ring_sp_sc
>> ring_mp_mc
>> ring_sp_mc
>> ring_mp_sc
>>
>> v2: merges the duplicated code in rte_mempool_xmem_create and
>> rte_mempool_create_ext into one common function. The old functions
>> now call the new common function with the relevant parameters.
>>
>> Signed-off-by: David Hunt <david.hunt@intel.com>
> I think the refactoring of rte_mempool_create() (adding of
> mempool_create()) should go in another commit. It will make the
> patches much easier to read.
>
> Also, I'm sorry but it seems that several comments or question I've made
> in http://dpdk.org/ml/archives/dev/2016-February/032706.html are
> not addressed.
>
> Examples:
> - putting some part of the patch in separate commits
> - meaning of "rt_pool"
> - put_pool_bulk unclear comment
> - should we also have get_pool_bulk stats?
> - missing _MEMPOOL_STAT_ADD() in mempool_bulk()
> - why internal in rte_mempool_internal.h?
> - why default in rte_mempool_default.c?
> - remaining references to stack handler (in a comment)
> - ...?
>
> As you know, doing a proper code review takes a lot of time. If I
> have to re-check all of my previous comments, it will take even
> more. I'm not saying all my comments require a code change, but in case
> you don't agree, please at least explain your opinion so we can debate
> on the list.
>
Hi Olivier,
Sincerest apologies. I had intended in coming back around to your
original comments after refactoring the code. I will do that now. I did
take them into consideration, but I see now that I need to do further
work, such as a clearer name for rt_pool, etc. I will respond to your
original email.
Thanks
David.
Hi David,
On 02/29/2016 12:11 PM, Hunt, David wrote:
>> Also, I'm sorry but it seems that several comments or question I've made
>> in http://dpdk.org/ml/archives/dev/2016-February/032706.html are
>> not addressed.
>>
>> Examples:
>> - putting some part of the patch in separate commits
>> - meaning of "rt_pool"
>> - put_pool_bulk unclear comment
>> - should we also have get_pool_bulk stats?
>> - missing _MEMPOOL_STAT_ADD() in mempool_bulk()
>> - why internal in rte_mempool_internal.h?
>> - why default in rte_mempool_default.c?
>> - remaining references to stack handler (in a comment)
>> - ...?
>>
>> As you know, doing a proper code review takes a lot of time. If I
>> have to re-check all of my previous comments, it will take even
>> more. I'm not saying all my comments require a code change, but in case
>> you don't agree, please at least explain your opinion so we can debate
>> on the list.
>>
> Hi Olivier,
> Sincerest apologies. I had intended in coming back around to your
> original comments after refactoring the code. I will do that now. I did
> take them into consideration, but I see now that I need to do further
> work, such as a clearer name for rt_pool, etc. I will respond to your
> original email.
I thought some comments were ignored :)
So no problem in that case, thanks for clarifying.
Olivier
@@ -161,7 +161,6 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
n_get_bulk);
if (unlikely(ret < 0)) {
rte_mempool_dump(stdout, mp);
- rte_ring_dump(stdout, mp->ring);
/* in this case, objects are lost... */
return -1;
}
@@ -42,6 +42,8 @@ LIBABIVER := 1
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_default.c
+
ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
endif
@@ -59,10 +59,11 @@
#include <rte_spinlock.h>
#include "rte_mempool.h"
+#include "rte_mempool_internal.h"
TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
-static struct rte_tailq_elem rte_mempool_tailq = {
+struct rte_tailq_elem rte_mempool_tailq = {
.name = "RTE_MEMPOOL",
};
EAL_REGISTER_TAILQ(rte_mempool_tailq)
@@ -149,7 +150,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
obj_init(mp, obj_init_arg, obj, obj_idx);
/* enqueue in ring */
- rte_ring_sp_enqueue(mp->ring, obj);
+ rte_mempool_ext_put_bulk(mp, &obj, 1);
}
uint32_t
@@ -375,26 +376,6 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
return usz;
}
-#ifndef RTE_LIBRTE_XEN_DOM0
-/* stub if DOM0 support not configured */
-struct rte_mempool *
-rte_dom0_mempool_create(const char *name __rte_unused,
- unsigned n __rte_unused,
- unsigned elt_size __rte_unused,
- unsigned cache_size __rte_unused,
- unsigned private_data_size __rte_unused,
- rte_mempool_ctor_t *mp_init __rte_unused,
- void *mp_init_arg __rte_unused,
- rte_mempool_obj_ctor_t *obj_init __rte_unused,
- void *obj_init_arg __rte_unused,
- int socket_id __rte_unused,
- unsigned flags __rte_unused)
-{
- rte_errno = EINVAL;
- return NULL;
-}
-#endif
-
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -420,117 +401,76 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
}
/*
+ * Common mempool create function.
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
* Setting vaddr to NULL, makes mempool to fallback to original behaviour
- * and allocate space for mempool and it's elements as one big chunk of
- * physically continuos memory.
- * */
-struct rte_mempool *
-rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ * which will call rte_mempool_ext_alloc to allocate the object memory.
+ * If it is an intenal mempool handler, it will allocate space for mempool
+ * and it's elements as one big chunk of physically continuous memory.
+ * If it is an external mempool handler, it will allocate space for mempool
+ * and call the rte_mempool_ext_alloc for the object memory.
+ */
+static struct rte_mempool *
+mempool_create(const char *name,
+ unsigned num_elt, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+ int socket_id, unsigned flags,
+ void *vaddr, const phys_addr_t paddr[],
+ uint32_t pg_num, uint32_t pg_shift,
+ const char *handler_name)
{
- char mz_name[RTE_MEMZONE_NAMESIZE];
- char rg_name[RTE_RING_NAMESIZE];
+ const struct rte_memzone *mz;
struct rte_mempool_list *mempool_list;
struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te;
- struct rte_ring *r;
- const struct rte_memzone *mz;
- size_t mempool_size;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
- int rg_flags = 0;
- void *obj;
struct rte_mempool_objsz objsz;
- void *startaddr;
+ void *startaddr = NULL;
int page_size = getpagesize();
-
- /* compilation-time checks */
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
- RTE_CACHE_LINE_MASK) != 0);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
+ void *obj = NULL;
+ size_t mempool_size;
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
/* asked cache too big */
if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
+ CALC_CACHE_FLUSHTHRESH(cache_size) > num_elt) {
rte_errno = EINVAL;
return NULL;
}
- /* check that we have both VA and PA */
- if (vaddr != NULL && paddr == NULL) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* Check that pg_num and pg_shift parameters are valid. */
- if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* "no cache align" imply "no spread" */
- if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
- flags |= MEMPOOL_F_NO_SPREAD;
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+ /* Check that pg_num and pg_shift parameters are valid. */
+ if (pg_num < RTE_DIM(mp->elt_pa) ||
+ pg_shift > MEMPOOL_PG_SHIFT_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
- /* ring flags */
- if (flags & MEMPOOL_F_SP_PUT)
- rg_flags |= RING_F_SP_ENQ;
- if (flags & MEMPOOL_F_SC_GET)
- rg_flags |= RING_F_SC_DEQ;
+ /* "no cache align" imply "no spread" */
+ if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= MEMPOOL_F_NO_SPREAD;
- /* calculate mempool object sizes. */
- if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
- rte_errno = EINVAL;
- return NULL;
+ /* calculate mempool object sizes. */
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
}
rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
- /* allocate the ring that will be used to store objects */
- /* Ring functions will return appropriate errors if we are
- * running as a secondary process etc., so no checks made
- * in this function for that condition */
- snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);
- r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
- if (r == NULL)
- goto exit;
-
/*
* reserve a memory zone for this mempool: private data is
* cache-aligned
*/
- private_data_size = (private_data_size +
- RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
+ private_data_size = RTE_ALIGN_CEIL(private_data_size,
+ RTE_MEMPOOL_ALIGN);
- if (! rte_eal_has_hugepages()) {
- /*
- * expand private data size to a whole page, so that the
- * first pool element will start on a new standard page
- */
- int head = sizeof(struct rte_mempool);
- int new_size = (private_data_size + head) % page_size;
- if (new_size) {
- private_data_size += page_size - new_size;
- }
- }
/* try to allocate tailq entry */
te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
@@ -539,23 +479,51 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
goto exit;
}
- /*
- * If user provided an external memory buffer, then use it to
- * store mempool objects. Otherwise reserve a memzone that is large
- * enough to hold mempool header and metadata plus mempool objects.
- */
- mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
- mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
- if (vaddr == NULL)
- mempool_size += (size_t)objsz.total_size * n;
+ if (flags && MEMPOOL_F_INT_HANDLER) {
- if (! rte_eal_has_hugepages()) {
+ if (!rte_eal_has_hugepages()) {
+ /*
+ * expand private data size to a whole page, so that the
+ * first pool element will start on a new standard page
+ */
+ int head = sizeof(struct rte_mempool);
+ int new_size = (private_data_size + head) % page_size;
+
+ if (new_size)
+ private_data_size += page_size - new_size;
+ }
+
+
+ /*
+ * If user provided an external memory buffer, then use it to
+ * store mempool objects. Otherwise reserve a memzone that is
+ * large enough to hold mempool header and metadata plus
+ * mempool objects
+ */
+ mempool_size =
+ MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
+ mempool_size =
+ RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
+ if (vaddr == NULL)
+ mempool_size += (size_t)objsz.total_size * num_elt;
+
+ if (!rte_eal_has_hugepages()) {
+ /*
+ * we want the memory pool to start on a page boundary,
+ * because pool elements crossing page boundaries would
+ * result in discontiguous physical addresses
+ */
+ mempool_size += page_size;
+ }
+ } else {
/*
- * we want the memory pool to start on a page boundary,
- * because pool elements crossing page boundaries would
- * result in discontiguous physical addresses
+ * If user provided an external memory buffer, then use it to
+ * store mempool objects. Otherwise reserve a memzone that is
+ * large enough to hold mempool header and metadata plus
+ * mempool objects
*/
- mempool_size += page_size;
+ mempool_size = sizeof(*mp) + private_data_size;
+ mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
}
snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
@@ -563,24 +531,29 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
/*
- * no more memory: in this case we loose previously reserved
- * space for the ring as we cannot free it
+ * no more memory
*/
if (mz == NULL) {
rte_free(te);
goto exit;
}
- if (rte_eal_has_hugepages()) {
- startaddr = (void*)mz->addr;
- } else {
- /* align memory pool start address on a page boundary */
- unsigned long addr = (unsigned long)mz->addr;
- if (addr & (page_size - 1)) {
- addr += page_size;
- addr &= ~(page_size - 1);
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+
+ if (rte_eal_has_hugepages()) {
+ startaddr = (void *)mz->addr;
+ } else {
+ /* align memory pool start address on a page boundary */
+ unsigned long addr = (unsigned long)mz->addr;
+
+ if (addr & (page_size - 1)) {
+ addr += page_size;
+ addr &= ~(page_size - 1);
+ }
+ startaddr = (void *)addr;
}
- startaddr = (void*)addr;
+ } else {
+ startaddr = (void *)mz->addr;
}
/* init the mempool structure */
@@ -588,8 +561,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
memset(mp, 0, sizeof(*mp));
snprintf(mp->name, sizeof(mp->name), "%s", name);
mp->phys_addr = mz->phys_addr;
- mp->ring = r;
- mp->size = n;
+ mp->size = num_elt;
mp->flags = flags;
mp->elt_size = objsz.elt_size;
mp->header_size = objsz.header_size;
@@ -598,35 +570,54 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
- /* calculate address of the first element for continuous mempool. */
- obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
- private_data_size;
- obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
-
- /* populate address translation fields. */
- mp->pg_num = pg_num;
- mp->pg_shift = pg_shift;
- mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
+ mp->handler_idx = rte_get_mempool_handler_idx(handler_name);
+ if (mp->handler_idx < 0) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot find mempool handler by name!\n");
+ rte_free(te);
+ goto exit;
+ }
- /* mempool elements allocated together with mempool */
- if (vaddr == NULL) {
- mp->elt_va_start = (uintptr_t)obj;
- mp->elt_pa[0] = mp->phys_addr +
- (mp->elt_va_start - (uintptr_t)mp);
+ if (flags && MEMPOOL_F_INT_HANDLER) {
+ /* calculate address of first element for continuous mempool. */
+ obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
+ private_data_size;
+ obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
+
+ /* populate address translation fields. */
+ mp->pg_num = pg_num;
+ mp->pg_shift = pg_shift;
+ mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
+
+ /* mempool elements allocated together with mempool */
+ if (vaddr == NULL) {
+ mp->elt_va_start = (uintptr_t)obj;
+ mp->elt_pa[0] = mp->phys_addr +
+ (mp->elt_va_start - (uintptr_t)mp);
+ /* mempool elements in a separate chunk of memory. */
+ } else {
+ mp->elt_va_start = (uintptr_t)vaddr;
+ memcpy(mp->elt_pa, paddr,
+ sizeof(mp->elt_pa[0]) * pg_num);
+ }
- /* mempool elements in a separate chunk of memory. */
- } else {
- mp->elt_va_start = (uintptr_t)vaddr;
- memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+ mp->elt_va_end = mp->elt_va_start;
}
- mp->elt_va_end = mp->elt_va_start;
+ /* Parameters are setup. Call the mempool handler alloc */
+ mp->rt_pool =
+ rte_mempool_ext_alloc(mp, name, num_elt, socket_id, flags);
+ if (mp->rt_pool == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Failed to alloc mempool!\n");
+ rte_free(te);
+ goto exit;
+ }
/* call the initializer */
if (mp_init)
mp_init(mp, mp_init_arg);
- mempool_populate(mp, n, 1, obj_init, obj_init_arg);
+ if (obj_init)
+ mempool_populate(mp, num_elt, 1, obj_init, obj_init_arg);
te->data = (void *) mp;
@@ -640,13 +631,79 @@ exit:
return mp;
}
+/* Create the mempool over already allocated chunk of memory */
+struct rte_mempool *
+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags, void *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ struct rte_mempool *mp = NULL;
+ char handler_name[RTE_MEMPOOL_NAMESIZE];
+
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+ RTE_CACHE_LINE_MASK) != 0);
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+
+ /* check that we have both VA and PA */
+ if (vaddr != NULL && paddr == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * Since we have 4 combinations of the SP/SC/MP/MC, and stack,
+ * examine the
+ * flags to set the correct index into the handler table.
+ */
+ if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
+ sprintf(handler_name, "%s", "ring_sp_sc");
+ else if (flags & MEMPOOL_F_SP_PUT)
+ sprintf(handler_name, "%s", "ring_sp_mc");
+ else if (flags & MEMPOOL_F_SC_GET)
+ sprintf(handler_name, "%s", "ring_mp_sc");
+ else
+ sprintf(handler_name, "%s", "ring_mp_mc");
+
+ flags |= MEMPOOL_F_INT_HANDLER;
+
+ mp = mempool_create(name,
+ n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id,
+ flags,
+ vaddr, paddr,
+ pg_num, pg_shift,
+ handler_name);
+
+ return mp;
+}
+
/* Return the number of entries in the mempool */
unsigned
rte_mempool_count(const struct rte_mempool *mp)
{
unsigned count;
- count = rte_ring_count(mp->ring);
+ count = rte_mempool_ext_get_count(mp);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
{
@@ -802,7 +859,6 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
- fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
fprintf(f, " size=%"PRIu32"\n", mp->size);
fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
@@ -825,7 +881,7 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
mp->size);
cache_count = rte_mempool_dump_cache(f, mp);
- common_count = rte_ring_count(mp->ring);
+ common_count = rte_mempool_ext_get_count(mp);
if ((cache_count + common_count) > mp->size)
common_count = mp->size - cache_count;
fprintf(f, " common_pool_count=%u\n", common_count);
@@ -919,3 +975,30 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
}
+
+
+/* create the mempool using an external mempool manager */
+struct rte_mempool *
+rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags,
+ const char *handler_name)
+{
+ struct rte_mempool *mp = NULL;
+
+ mp = mempool_create(name,
+ n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, /* vaddr, paddr */
+ 0, 0, /* pg_num, pg_shift, */
+ handler_name);
+
+ return mp;
+
+
+}
@@ -88,6 +88,8 @@ extern "C" {
struct rte_mempool_debug_stats {
uint64_t put_bulk; /**< Number of puts. */
uint64_t put_objs; /**< Number of objects successfully put. */
+ uint64_t put_pool_bulk; /**< Number of puts into pool. */
+ uint64_t put_pool_objs; /**< Number of objects into pool. */
uint64_t get_success_bulk; /**< Successful allocation number. */
uint64_t get_success_objs; /**< Objects successfully allocated. */
uint64_t get_fail_bulk; /**< Failed allocation number. */
@@ -175,12 +177,85 @@ struct rte_mempool_objtlr {
#endif
};
+/* Handler functions for external mempool support */
+typedef void *(*rte_mempool_alloc_t)(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags);
+typedef int (*rte_mempool_put_t)(void *p,
+ void * const *obj_table, unsigned n);
+typedef int (*rte_mempool_get_t)(void *p, void **obj_table,
+ unsigned n);
+typedef unsigned (*rte_mempool_get_count)(void *p);
+typedef int (*rte_mempool_free_t)(struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager alloc callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the memory pool.
+ * @param n
+ * Number of objects in the mempool.
+ * @param socket_id
+ * socket id on which to allocate.
+ * @param flags
+ * general flags to allocate function (MEMPOOL_F_* flags)
+ */
+void *
+rte_mempool_ext_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags);
+
+/**
+ * @internal wrapper for external mempool manager get callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to get
+ */
+int
+rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table,
+ unsigned n);
+
+/**
+ * @internal wrapper for external mempool manager put callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to put
+ */
+int
+rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n);
+
+/**
+ * @internal wrapper for external mempool manager get_count callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ */
+int
+rte_mempool_ext_get_count(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager free callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ */
+int
+rte_mempool_ext_free(struct rte_mempool *mp);
+
/**
* The RTE mempool structure.
*/
struct rte_mempool {
char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
- struct rte_ring *ring; /**< Ring to store objects. */
phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
int flags; /**< Flags of the mempool. */
uint32_t size; /**< Size of the mempool. */
@@ -194,6 +269,11 @@ struct rte_mempool {
unsigned private_data_size; /**< Size of private data. */
+ /* Common pool data structure pointer */
+ void *rt_pool;
+
+ int16_t handler_idx;
+
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/** Per-lcore local cache. */
struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
@@ -223,6 +303,8 @@ struct rte_mempool {
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+#define MEMPOOL_F_INT_HANDLER 0x0020 /**< Using internal mempool handler */
+
/**
* @internal When debug is enabled, store some statistics.
@@ -753,7 +835,7 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
*/
static inline void __attribute__((always_inline))
__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+ unsigned n, __attribute__((unused)) int is_mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
struct rte_mempool_cache *cache;
@@ -769,8 +851,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/* cache is not enabled or single producer or non-EAL thread */
- if (unlikely(cache_size == 0 || is_mp == 0 ||
- lcore_id >= RTE_MAX_LCORE))
+ if (unlikely(cache_size == 0 || lcore_id >= RTE_MAX_LCORE))
goto ring_enqueue;
/* Go straight to ring if put would overflow mem allocated for cache */
@@ -793,8 +874,8 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
cache->len += n;
- if (cache->len >= flushthresh) {
- rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
+ if (unlikely(cache->len >= flushthresh)) {
+ rte_mempool_ext_put_bulk(mp, &cache->objs[cache_size],
cache->len - cache_size);
cache->len = cache_size;
}
@@ -804,22 +885,10 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
ring_enqueue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
- /* push remaining objects in ring */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- if (is_mp) {
- if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
- else {
- if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
-#else
- if (is_mp)
- rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
- else
- rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
-#endif
+ /* Increment stats counter to tell us how many pool puts happened */
+ __MEMPOOL_STAT_ADD(mp, put_pool, n);
+
+ rte_mempool_ext_put_bulk(mp, obj_table, n);
}
@@ -943,7 +1012,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
*/
static inline int __attribute__((always_inline))
__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+ unsigned n, __attribute__((unused))int is_mc)
{
int ret;
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
@@ -954,8 +1023,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
uint32_t cache_size = mp->cache_size;
/* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 ||
- n >= cache_size || lcore_id >= RTE_MAX_LCORE))
+ if (unlikely(cache_size == 0 || n >= cache_size ||
+ lcore_id >= RTE_MAX_LCORE))
goto ring_dequeue;
cache = &mp->local_cache[lcore_id];
@@ -967,7 +1036,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
uint32_t req = n + (cache_size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
- ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
+ ret = rte_mempool_ext_get_bulk(mp,
+ &cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
* In the offchance that we are buffer constrained,
@@ -995,10 +1065,7 @@ ring_dequeue:
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* get remaining objects from ring */
- if (is_mc)
- ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
- else
- ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+ ret = rte_mempool_ext_get_bulk(mp, obj_table, n);
if (ret < 0)
__MEMPOOL_STAT_ADD(mp, get_fail, n);
@@ -1401,6 +1468,79 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
void *arg);
+/**
+ * Function to get the name of a mempool handler
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The name of the mempool handler
+ */
+char *rte_mempool_get_handler_name(struct rte_mempool *mp);
+
+/**
+ * Create a new mempool named *name* in memory.
+ *
+ * This function uses an externally defined alloc callback to allocate memory.
+ * Its size is set to n elements.
+ * All elements of the mempool are allocated separately to the mempool header.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache. Note that even if not used, the
+ * memory space for cache is always reserved in a mempool structure,
+ * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ */
+struct rte_mempool *
+rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags,
+ const char *handler_name);
+
#ifdef __cplusplus
}
#endif
new file mode 100644
@@ -0,0 +1,236 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+#include "rte_mempool.h"
+#include "rte_mempool_internal.h"
+
+/*
+ * Indirect jump table to support external memory pools
+ */
+struct rte_mempool_handler_list mempool_handler_list = {
+ .sl = RTE_SPINLOCK_INITIALIZER ,
+ .num_handlers = 0
+};
+
+/*
+ * Returns the name of the mempool
+ */
+char *
+rte_mempool_get_handler_name(struct rte_mempool *mp) {
+ return mempool_handler_list.handler[mp->handler_idx].name;
+}
+
+int16_t
+rte_mempool_register_handler(struct rte_mempool_handler *h)
+{
+ int16_t handler_idx;
+
+ /* */
+ rte_spinlock_lock(&mempool_handler_list.sl);
+
+ /* Check whether jump table has space */
+ if (mempool_handler_list.num_handlers >= RTE_MEMPOOL_MAX_HANDLER_IDX) {
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Maximum number of mempool handlers exceeded\n");
+ return -1;
+ }
+
+ if ((h->put == NULL) || (h->get == NULL) ||
+ (h->get_count == NULL)) {
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Missing callback while registering mempool handler\n");
+ return -1;
+ }
+
+ /* add new handler index */
+ handler_idx = mempool_handler_list.num_handlers++;
+
+ snprintf(mempool_handler_list.handler[handler_idx].name,
+ RTE_MEMPOOL_NAMESIZE, "%s", h->name);
+ mempool_handler_list.handler[handler_idx].alloc = h->alloc;
+ mempool_handler_list.handler[handler_idx].put = h->put;
+ mempool_handler_list.handler[handler_idx].get = h->get;
+ mempool_handler_list.handler[handler_idx].get_count = h->get_count;
+
+ rte_spinlock_unlock(&mempool_handler_list.sl);
+
+ return handler_idx;
+}
+
+int16_t
+rte_get_mempool_handler_idx(const char *name)
+{
+ int16_t i;
+
+ for (i = 0; i < mempool_handler_list.num_handlers; i++) {
+ if (!strcmp(name, mempool_handler_list.handler[i].name))
+ return i;
+ }
+ return -1;
+}
+
+static int
+common_ring_mp_put(void *p, void * const *obj_table, unsigned n)
+{
+ return rte_ring_mp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_sp_put(void *p, void * const *obj_table, unsigned n)
+{
+ return rte_ring_sp_enqueue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_mc_get(void *p, void **obj_table, unsigned n)
+{
+ return rte_ring_mc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static int
+common_ring_sc_get(void *p, void **obj_table, unsigned n)
+{
+ return rte_ring_sc_dequeue_bulk((struct rte_ring *)p, obj_table, n);
+}
+
+static unsigned
+common_ring_get_count(void *p)
+{
+ return rte_ring_count((struct rte_ring *)p);
+}
+
+
+static void *
+rte_mempool_common_ring_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags)
+{
+ struct rte_ring *r;
+ char rg_name[RTE_RING_NAMESIZE];
+ int rg_flags = 0;
+
+ if (flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /* allocate the ring that will be used to store objects */
+ /* Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition */
+ snprintf(rg_name, sizeof(rg_name), "%s-ring", name);
+ r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
+ if (r == NULL)
+ return NULL;
+
+ mp->rt_pool = (void *)r;
+
+ return (void *) r;
+}
+
+void *
+rte_mempool_ext_alloc(struct rte_mempool *mp,
+ const char *name, unsigned n, int socket_id, unsigned flags)
+{
+ if (mempool_handler_list.handler[mp->handler_idx].alloc) {
+ return (mempool_handler_list.handler[mp->handler_idx].alloc)
+ (mp, name, n, socket_id, flags);
+ }
+ return NULL;
+}
+
+inline int __attribute__((always_inline))
+rte_mempool_ext_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].get)
+ (mp->rt_pool, obj_table, n);
+}
+
+inline int __attribute__((always_inline))
+rte_mempool_ext_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].put)
+ (mp->rt_pool, obj_table, n);
+}
+
+int
+rte_mempool_ext_get_count(const struct rte_mempool *mp)
+{
+ return (mempool_handler_list.handler[mp->handler_idx].get_count)
+ (mp->rt_pool);
+}
+
+static struct rte_mempool_handler handler_mp_mc = {
+ .name = "ring_mp_mc",
+ .alloc = rte_mempool_common_ring_alloc,
+ .put = common_ring_mp_put,
+ .get = common_ring_mc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_sp_sc = {
+ .name = "ring_sp_sc",
+ .alloc = rte_mempool_common_ring_alloc,
+ .put = common_ring_sp_put,
+ .get = common_ring_sc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_mp_sc = {
+ .name = "ring_mp_sc",
+ .alloc = rte_mempool_common_ring_alloc,
+ .put = common_ring_mp_put,
+ .get = common_ring_sc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+static struct rte_mempool_handler handler_sp_mc = {
+ .name = "ring_sp_mc",
+ .alloc = rte_mempool_common_ring_alloc,
+ .put = common_ring_sp_put,
+ .get = common_ring_mc_get,
+ .get_count = common_ring_get_count,
+ .free = NULL
+};
+
+REGISTER_MEMPOOL_HANDLER(handler_mp_mc);
+REGISTER_MEMPOOL_HANDLER(handler_sp_sc);
+REGISTER_MEMPOOL_HANDLER(handler_mp_sc);
+REGISTER_MEMPOOL_HANDLER(handler_sp_mc);
new file mode 100644
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_INTERNAL_H_
+#define _RTE_MEMPOOL_INTERNAL_H_
+
+#include <rte_spinlock.h>
+#include <rte_mempool.h>
+
+#define RTE_MEMPOOL_MAX_HANDLER_IDX 16
+
+struct rte_mempool_handler {
+ char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool handler */
+
+ rte_mempool_alloc_t alloc;
+
+ rte_mempool_get_count get_count;
+
+ rte_mempool_free_t free;
+
+ rte_mempool_put_t put;
+
+ rte_mempool_get_t get;
+} __rte_cache_aligned;
+
+struct rte_mempool_handler_list {
+ rte_spinlock_t sl; /**< Spinlock for add/delete. */
+
+ int32_t num_handlers; /**< Number of handlers that are valid. */
+
+ /* storage for all possible handlers */
+ struct rte_mempool_handler handler[RTE_MEMPOOL_MAX_HANDLER_IDX];
+};
+
+int16_t rte_mempool_register_handler(struct rte_mempool_handler *h);
+int16_t rte_get_mempool_handler_idx(const char *name);
+
+#define REGISTER_MEMPOOL_HANDLER(h) \
+static int16_t __attribute__((used)) testfn_##h(void);\
+int16_t __attribute__((constructor, used)) testfn_##h(void)\
+{\
+ return rte_mempool_register_handler(&h);\
+}
+
+#endif
@@ -6,6 +6,7 @@ DPDK_2.0 {
rte_mempool_calc_obj_size;
rte_mempool_count;
rte_mempool_create;
+ rte_mempool_create_ext;
rte_mempool_dump;
rte_mempool_list_dump;
rte_mempool_lookup;