get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/13170/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 13170,
    "url": "http://patches.dpdk.org/api/patches/13170/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1464874043-67467-2-git-send-email-david.hunt@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1464874043-67467-2-git-send-email-david.hunt@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1464874043-67467-2-git-send-email-david.hunt@intel.com",
    "date": "2016-06-02T13:27:19",
    "name": "[dpdk-dev,v7,1/5] mempool: support external mempool operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8a37dc99de08659508cca5c11875c43910b524f5",
    "submitter": {
        "id": 342,
        "url": "http://patches.dpdk.org/api/people/342/?format=api",
        "name": "Hunt, David",
        "email": "david.hunt@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1464874043-67467-2-git-send-email-david.hunt@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/13170/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/13170/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 2991E593A;\n\tThu,  2 Jun 2016 15:27:44 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 80225379B\n\tfor <dev@dpdk.org>; Thu,  2 Jun 2016 15:27:42 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga102.fm.intel.com with ESMTP; 02 Jun 2016 06:27:41 -0700",
            "from sie-lab-214-251.ir.intel.com (HELO silpixa373510.ir.intel.com)\n\t([10.237.214.251])\n\tby fmsmga002.fm.intel.com with ESMTP; 02 Jun 2016 06:27:40 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.26,406,1459839600\"; d=\"scan'208\";a=\"993801706\"",
        "From": "David Hunt <david.hunt@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "olivier.matz@6wind.com, viktorin@rehivetech.com,\n\tjerin.jacob@caviumnetworks.com, David Hunt <david.hunt@intel.com>",
        "Date": "Thu,  2 Jun 2016 14:27:19 +0100",
        "Message-Id": "<1464874043-67467-2-git-send-email-david.hunt@intel.com>",
        "X-Mailer": "git-send-email 2.5.5",
        "In-Reply-To": "<1464874043-67467-1-git-send-email-david.hunt@intel.com>",
        "References": "<1464797998-76690-1-git-send-email-david.hunt@intel.com>\n\t<1464874043-67467-1-git-send-email-david.hunt@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v7 1/5] mempool: support external mempool\n\toperations",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Until now, the objects stored in a mempool were internally stored in a\nring. This patch introduces the possibility to register external handlers\nreplacing the ring.\n\nThe default behavior remains unchanged, but calling the new function\nrte_mempool_set_handler() right after rte_mempool_create_empty() allows\nthe user to change the handler that will be used when populating\nthe mempool.\n\nSigned-off-by: Olivier Matz <olivier.matz@6wind.com>\nSigned-off-by: David Hunt <david.hunt@intel.com>\n---\n lib/librte_mempool/Makefile          |   1 +\n lib/librte_mempool/rte_mempool.c     |  71 ++++-------\n lib/librte_mempool/rte_mempool.h     | 240 ++++++++++++++++++++++++++++++++---\n lib/librte_mempool/rte_mempool_ops.c | 141 ++++++++++++++++++++\n 4 files changed, 389 insertions(+), 64 deletions(-)\n create mode 100644 lib/librte_mempool/rte_mempool_ops.c",
    "diff": "diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile\nindex 43423e0..4cbf772 100644\n--- a/lib/librte_mempool/Makefile\n+++ b/lib/librte_mempool/Makefile\n@@ -42,6 +42,7 @@ LIBABIVER := 2\n \n # all source are stored in SRCS-y\n SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c\n+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c\n # install includes\n SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h\n \ndiff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex b54de43..1c61c57 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -148,7 +148,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)\n #endif\n \n \t/* enqueue in ring */\n-\trte_ring_sp_enqueue(mp->ring, obj);\n+\trte_mempool_ops_enqueue_bulk(mp, &obj, 1);\n }\n \n /* call obj_cb() for each mempool element */\n@@ -303,40 +303,6 @@ rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,\n \treturn (size_t)paddr_idx << pg_shift;\n }\n \n-/* create the internal ring */\n-static int\n-rte_mempool_ring_create(struct rte_mempool *mp)\n-{\n-\tint rg_flags = 0, ret;\n-\tchar rg_name[RTE_RING_NAMESIZE];\n-\tstruct rte_ring *r;\n-\n-\tret = snprintf(rg_name, sizeof(rg_name),\n-\t\tRTE_MEMPOOL_MZ_FORMAT, mp->name);\n-\tif (ret < 0 || ret >= (int)sizeof(rg_name))\n-\t\treturn -ENAMETOOLONG;\n-\n-\t/* ring flags */\n-\tif (mp->flags & MEMPOOL_F_SP_PUT)\n-\t\trg_flags |= RING_F_SP_ENQ;\n-\tif (mp->flags & MEMPOOL_F_SC_GET)\n-\t\trg_flags |= RING_F_SC_DEQ;\n-\n-\t/* Allocate the ring that will be used to store objects.\n-\t * Ring functions will return appropriate errors if we are\n-\t * running as a secondary process etc., so no checks made\n-\t * in this function for that condition.\n-\t */\n-\tr = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),\n-\t\tmp->socket_id, rg_flags);\n-\tif (r == NULL)\n-\t\treturn -rte_errno;\n-\n-\tmp->ring = r;\n-\tmp->flags |= MEMPOOL_F_RING_CREATED;\n-\treturn 0;\n-}\n-\n /* free a memchunk allocated with rte_memzone_reserve() */\n static void\n rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,\n@@ -354,7 +320,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)\n \tvoid *elt;\n \n \twhile (!STAILQ_EMPTY(&mp->elt_list)) {\n-\t\trte_ring_sc_dequeue(mp->ring, &elt);\n+\t\trte_mempool_ops_dequeue_bulk(mp, &elt, 1);\n \t\t(void)elt;\n \t\tSTAILQ_REMOVE_HEAD(&mp->elt_list, next);\n \t\tmp->populated_size--;\n@@ -383,13 +349,16 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,\n \tunsigned i = 0;\n \tsize_t off;\n \tstruct rte_mempool_memhdr *memhdr;\n-\tint ret;\n \n \t/* create the internal ring if not already done */\n \tif ((mp->flags & MEMPOOL_F_RING_CREATED) == 0) {\n-\t\tret = rte_mempool_ring_create(mp);\n-\t\tif (ret < 0)\n-\t\t\treturn ret;\n+\t\trte_errno = 0;\n+\t\tmp->pool_data = rte_mempool_ops_alloc(mp);\n+\t\tif (mp->pool_data == NULL) {\n+\t\t\tif (rte_errno == 0)\n+\t\t\t\treturn -EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n \t}\n \n \t/* mempool is already populated */\n@@ -703,7 +672,7 @@ rte_mempool_free(struct rte_mempool *mp)\n \trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n \n \trte_mempool_free_memchunks(mp);\n-\trte_ring_free(mp->ring);\n+\trte_mempool_ops_free(mp);\n \trte_memzone_free(mp->mz);\n }\n \n@@ -815,6 +784,20 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,\n \t\tRTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0));\n \n \tte->data = mp;\n+\n+\t/*\n+\t * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to\n+\t * set the correct index into the table of ops structs.\n+\t */\n+\tif (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))\n+\t\trte_mempool_set_ops_byname(mp, \"ring_sp_sc\");\n+\telse if (flags & MEMPOOL_F_SP_PUT)\n+\t\trte_mempool_set_ops_byname(mp, \"ring_sp_mc\");\n+\telse if (flags & MEMPOOL_F_SC_GET)\n+\t\trte_mempool_set_ops_byname(mp, \"ring_mp_sc\");\n+\telse\n+\t\trte_mempool_set_ops_byname(mp, \"ring_mp_mc\");\n+\n \trte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);\n \tTAILQ_INSERT_TAIL(mempool_list, te, next);\n \trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n@@ -930,7 +913,7 @@ rte_mempool_count(const struct rte_mempool *mp)\n \tunsigned count;\n \tunsigned lcore_id;\n \n-\tcount = rte_ring_count(mp->ring);\n+\tcount = rte_mempool_ops_get_count(mp);\n \n \tif (mp->cache_size == 0)\n \t\treturn count;\n@@ -1123,7 +1106,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)\n \n \tfprintf(f, \"mempool <%s>@%p\\n\", mp->name, mp);\n \tfprintf(f, \"  flags=%x\\n\", mp->flags);\n-\tfprintf(f, \"  ring=<%s>@%p\\n\", mp->ring->name, mp->ring);\n+\tfprintf(f, \"  pool=%p\\n\", mp->pool_data);\n \tfprintf(f, \"  phys_addr=0x%\" PRIx64 \"\\n\", mp->mz->phys_addr);\n \tfprintf(f, \"  nb_mem_chunks=%u\\n\", mp->nb_mem_chunks);\n \tfprintf(f, \"  size=%\"PRIu32\"\\n\", mp->size);\n@@ -1144,7 +1127,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)\n \t}\n \n \tcache_count = rte_mempool_dump_cache(f, mp);\n-\tcommon_count = rte_ring_count(mp->ring);\n+\tcommon_count = rte_mempool_ops_get_count(mp);\n \tif ((cache_count + common_count) > mp->size)\n \t\tcommon_count = mp->size - cache_count;\n \tfprintf(f, \"  common_pool_count=%u\\n\", common_count);\ndiff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h\nindex 60339bd..a6b28b0 100644\n--- a/lib/librte_mempool/rte_mempool.h\n+++ b/lib/librte_mempool/rte_mempool.h\n@@ -67,6 +67,7 @@\n #include <inttypes.h>\n #include <sys/queue.h>\n \n+#include <rte_spinlock.h>\n #include <rte_log.h>\n #include <rte_debug.h>\n #include <rte_lcore.h>\n@@ -204,9 +205,13 @@ struct rte_mempool_memhdr {\n struct rte_mempool {\n \tchar name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */\n \tstruct rte_ring *ring;           /**< Ring to store objects. */\n+\tunion {\n+\t\tvoid *pool_data;         /**< Ring or pool to store objects */\n+\t\tuint64_t pool_id;        /**< External mempool identifier */\n+\t};\n \tconst struct rte_memzone *mz;    /**< Memzone where pool is allocated */\n \tint flags;                       /**< Flags of the mempool. */\n-\tint socket_id;                   /**< Socket id passed at mempool creation. */\n+\tint socket_id;                   /**< Socket id passed at create */\n \tuint32_t size;                   /**< Max size of the mempool. */\n \tuint32_t cache_size;             /**< Size of per-lcore local cache. */\n \tuint32_t cache_flushthresh;\n@@ -217,6 +222,14 @@ struct rte_mempool {\n \tuint32_t trailer_size;           /**< Size of trailer (after elt). */\n \n \tunsigned private_data_size;      /**< Size of private data. */\n+\t/**\n+\t * Index into rte_mempool_ops_table array of mempool ops\n+\t * structs, which contain callback function pointers.\n+\t * We're using an index here rather than pointers to the callbacks\n+\t * to facilitate any secondary processes that may want to use\n+\t * this mempool.\n+\t */\n+\tint32_t ops_index;\n \n \tstruct rte_mempool_cache *local_cache; /**< Per-lcore local cache */\n \n@@ -325,6 +338,204 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,\n #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)\n #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */\n \n+#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */\n+\n+/**\n+ * prototype for implementation specific data provisioning function\n+ * The function should provide the implementation specific memory for\n+ * for use by the other mempool ops functions in a given mempool ops struct.\n+ * E.g. the default ops provides an instance of the rte_ring for this purpose.\n+ * it will mostlikely point to a different type of data structure, and\n+ * will be transparent to the application programmer.\n+ * The function should also not touch the given *mp instance.\n+ */\n+typedef void *(*rte_mempool_alloc_t)(const struct rte_mempool *mp);\n+\n+/** Free the opaque private data pointed to by mp->pool_data pointer */\n+typedef void (*rte_mempool_free_t)(void *p);\n+\n+/**\n+ * Put an object in the external pool.\n+ * The *p pointer is the opaque data for a given mempool manager (ring,\n+ * array, linked list, etc)\n+ */\n+typedef int (*rte_mempool_put_t)(void *p,\n+\t\tvoid * const *obj_table, unsigned int n);\n+\n+/** Get an object from the external pool. */\n+typedef int (*rte_mempool_get_t)(void *p,\n+\t\tvoid **obj_table, unsigned int n);\n+\n+/** Return the number of available objects in the external pool. */\n+typedef unsigned (*rte_mempool_get_count)(void *p);\n+\n+/** Structure defining mempool operations structure */\n+struct rte_mempool_ops {\n+\tchar name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct */\n+\trte_mempool_alloc_t alloc;       /**< Allocate private data */\n+\trte_mempool_free_t free;         /**< Free the external pool. */\n+\trte_mempool_put_t put;           /**< Put an object. */\n+\trte_mempool_get_t get;           /**< Get an object. */\n+\trte_mempool_get_count get_count; /**< Get the number of available objs. */\n+} __rte_cache_aligned;\n+\n+#define RTE_MEMPOOL_MAX_OPS_IDX 16  /**< Max registered ops structs */\n+\n+/**\n+ * Structure storing the table of registered ops structs, each of which contain\n+ * the function pointers for the mempool ops functions.\n+ * Each process has it's own storage for this ops struct aray so that\n+ * the mempools can be shared across primary and secondary processes.\n+ * The indices used to access the array are valid across processes, whereas\n+ * any function pointers stored directly in the mempool struct would not be.\n+ * This results in us simply having \"ops_index\" in the mempool struct.\n+ */\n+struct rte_mempool_ops_table {\n+\trte_spinlock_t sl;     /**< Spinlock for add/delete. */\n+\tuint32_t num_ops;      /**< Number of used ops structs in the table. */\n+\t/**\n+\t * Storage for all possible ops structs.\n+\t */\n+\tstruct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];\n+} __rte_cache_aligned;\n+\n+/** Array of registered ops structs */\n+extern struct rte_mempool_ops_table rte_mempool_ops_table;\n+\n+/**\n+ * @internal Get the mempool ops struct from its index.\n+ *\n+ * @param ops_index\n+ *   The index of the ops struct in the ops struct table. It must be a valid\n+ *   index: (0 <= idx < num_ops).\n+ * @return\n+ *   The pointer to the ops struct in the table.\n+ */\n+static inline struct rte_mempool_ops *\n+rte_mempool_ops_get(int ops_index)\n+{\n+\treturn &rte_mempool_ops_table.ops[ops_index];\n+}\n+\n+/**\n+ * @internal wrapper for external mempool manager alloc callback.\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ * @return\n+ *   The opaque pointer to the external pool.\n+ */\n+void *\n+rte_mempool_ops_alloc(const struct rte_mempool *mp);\n+\n+/**\n+ * @internal wrapper for external mempool manager get callback.\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ * @param obj_table\n+ *   Pointer to a table of void * pointers (objects).\n+ * @param n\n+ *   Number of objects to get.\n+ * @return\n+ *   - 0: Success; got n objects.\n+ *   - <0: Error; code of get function.\n+ */\n+static inline int\n+rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,\n+\t\tvoid **obj_table, unsigned n)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\n+\tops = rte_mempool_ops_get(mp->ops_index);\n+\treturn ops->get(mp->pool_data, obj_table, n);\n+}\n+\n+/**\n+ * @internal wrapper for external mempool manager put callback.\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ * @param obj_table\n+ *   Pointer to a table of void * pointers (objects).\n+ * @param n\n+ *   Number of objects to put.\n+ * @return\n+ *   - 0: Success; n objects supplied.\n+ *   - <0: Error; code of put function.\n+ */\n+static inline int\n+rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,\n+\t\tunsigned n)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\n+\tops = rte_mempool_ops_get(mp->ops_index);\n+\treturn ops->put(mp->pool_data, obj_table, n);\n+}\n+\n+/**\n+ * @internal wrapper for external mempool manager get_count callback.\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ * @return\n+ *   The number of available objects in the external pool.\n+ */\n+unsigned\n+rte_mempool_ops_get_count(const struct rte_mempool *mp);\n+\n+/**\n+ * @internal wrapper for external mempool manager free callback.\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ */\n+void\n+rte_mempool_ops_free(struct rte_mempool *mp);\n+\n+/**\n+ * Set the ops of a mempool\n+ *\n+ * This can only be done on a mempool that is not populated, i.e. just after\n+ * a call to rte_mempool_create_empty().\n+ *\n+ * @param mp\n+ *   Pointer to the memory pool.\n+ * @param name\n+ *   Name of the ops structure to use for this mempool.\n+ * @return\n+ *   - 0: Sucess; the mempool is now using the requested ops functions\n+ *   - -EINVAL - Invalid ops struct name provided\n+ *   - -EEXIST - mempool already has an ops struct assigned\n+ */\n+int\n+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name);\n+\n+/**\n+ * Register mempool operations\n+ *\n+ * @param h\n+ *   Pointer to and ops structure to register\n+ * @return\n+ *   - >=0: Sucess; return the index of the ops struct in the table.\n+ *   - -EINVAL - some missing callbacks while registering ops struct\n+ *   - -ENOSPC - the maximum number of ops structs has been reached\n+ */\n+int rte_mempool_ops_register(const struct rte_mempool_ops *h);\n+\n+/**\n+ * Macro to statically register the ops of an external mempool manager\n+ * Note that the rte_mempool_ops_register fails silently here when\n+ * more then RTE_MEMPOOL_MAX_OPS_IDX is registered.\n+ */\n+#define MEMPOOL_REGISTER_OPS(h)\t\t\t\t\t\\\n+\tvoid mp_hdlr_init_##h(void);\t\t\t\t\t\\\n+\tvoid __attribute__((constructor, used)) mp_hdlr_init_##h(void)\t\\\n+\t{\t\t\t\t\t\t\t\t\\\n+\t\trte_mempool_ops_register(&h);\t\t\t\\\n+\t}\n+\n /**\n  * An object callback function for mempool.\n  *\n@@ -774,7 +985,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,\n \tcache->len += n;\n \n \tif (cache->len >= flushthresh) {\n-\t\trte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],\n+\t\trte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache_size],\n \t\t\t\tcache->len - cache_size);\n \t\tcache->len = cache_size;\n \t}\n@@ -785,19 +996,10 @@ ring_enqueue:\n \n \t/* push remaining objects in ring */\n #ifdef RTE_LIBRTE_MEMPOOL_DEBUG\n-\tif (is_mp) {\n-\t\tif (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)\n-\t\t\trte_panic(\"cannot put objects in mempool\\n\");\n-\t}\n-\telse {\n-\t\tif (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)\n-\t\t\trte_panic(\"cannot put objects in mempool\\n\");\n-\t}\n+\tif (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)\n+\t\trte_panic(\"cannot put objects in mempool\\n\");\n #else\n-\tif (is_mp)\n-\t\trte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);\n-\telse\n-\t\trte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);\n+\trte_mempool_ops_enqueue_bulk(mp, obj_table, n);\n #endif\n }\n \n@@ -922,7 +1124,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)\n  */\n static inline int __attribute__((always_inline))\n __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,\n-\t\t   unsigned n, int is_mc)\n+\t\t   unsigned int n, int is_mc)\n {\n \tint ret;\n \tstruct rte_mempool_cache *cache;\n@@ -945,7 +1147,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,\n \t\tuint32_t req = n + (cache_size - cache->len);\n \n \t\t/* How many do we require i.e. number to fill the cache + the request */\n-\t\tret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);\n+\t\tret = rte_mempool_ops_dequeue_bulk(mp,\n+\t\t\t&cache->objs[cache->len], req);\n \t\tif (unlikely(ret < 0)) {\n \t\t\t/*\n \t\t\t * In the offchance that we are buffer constrained,\n@@ -972,10 +1175,7 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,\n ring_dequeue:\n \n \t/* get remaining objects from ring */\n-\tif (is_mc)\n-\t\tret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);\n-\telse\n-\t\tret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);\n+\tret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);\n \n \tif (ret < 0)\n \t\t__MEMPOOL_STAT_ADD(mp, get_fail, n);\ndiff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c\nnew file mode 100644\nindex 0000000..ec92a58\n--- /dev/null\n+++ b/lib/librte_mempool/rte_mempool_ops.c\n@@ -0,0 +1,141 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.\n+ *   Copyright(c) 2016 6WIND S.A.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <string.h>\n+\n+#include <rte_mempool.h>\n+\n+/* indirect jump table to support external memory pools */\n+struct rte_mempool_ops_table rte_mempool_ops_table = {\n+\t.sl =  RTE_SPINLOCK_INITIALIZER ,\n+\t.num_ops = 0\n+};\n+\n+/* add a new ops struct in rte_mempool_ops_table, return its index */\n+int\n+rte_mempool_ops_register(const struct rte_mempool_ops *h)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\tint16_t ops_index;\n+\n+\trte_spinlock_lock(&rte_mempool_ops_table.sl);\n+\n+\tif (rte_mempool_ops_table.num_ops >=\n+\t\t\tRTE_MEMPOOL_MAX_OPS_IDX) {\n+\t\trte_spinlock_unlock(&rte_mempool_ops_table.sl);\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Maximum number of mempool ops structs exceeded\\n\");\n+\t\treturn -ENOSPC;\n+\t}\n+\n+\tif (h->put == NULL || h->get == NULL || h->get_count == NULL) {\n+\t\trte_spinlock_unlock(&rte_mempool_ops_table.sl);\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Missing callback while registering mempool ops\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tops_index = rte_mempool_ops_table.num_ops++;\n+\tops = &rte_mempool_ops_table.ops[ops_index];\n+\tsnprintf(ops->name, sizeof(ops->name), \"%s\", h->name);\n+\tops->alloc = h->alloc;\n+\tops->put = h->put;\n+\tops->get = h->get;\n+\tops->get_count = h->get_count;\n+\n+\trte_spinlock_unlock(&rte_mempool_ops_table.sl);\n+\n+\treturn ops_index;\n+}\n+\n+/* wrapper to allocate an external mempool's private (pool) data */\n+void *\n+rte_mempool_ops_alloc(const struct rte_mempool *mp)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\n+\tops = rte_mempool_ops_get(mp->ops_index);\n+\tif (ops->alloc == NULL)\n+\t\treturn NULL;\n+\treturn ops->alloc(mp);\n+}\n+\n+/* wrapper to free an external pool ops */\n+void\n+rte_mempool_ops_free(struct rte_mempool *mp)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\n+\tops = rte_mempool_ops_get(mp->ops_index);\n+\tif (ops->free == NULL)\n+\t\treturn;\n+\treturn ops->free(mp);\n+}\n+\n+/* wrapper to get available objects in an external mempool */\n+unsigned int\n+rte_mempool_ops_get_count(const struct rte_mempool *mp)\n+{\n+\tstruct rte_mempool_ops *ops;\n+\n+\tops = rte_mempool_ops_get(mp->ops_index);\n+\treturn ops->get_count(mp->pool_data);\n+}\n+\n+/* sets mempool ops previously registered by rte_mempool_ops_register */\n+int\n+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name)\n+{\n+\tstruct rte_mempool_ops *ops = NULL;\n+\tunsigned i;\n+\n+\t/* too late, the mempool is already populated */\n+\tif (mp->flags & MEMPOOL_F_RING_CREATED)\n+\t\treturn -EEXIST;\n+\n+\tfor (i = 0; i < rte_mempool_ops_table.num_ops; i++) {\n+\t\tif (!strcmp(name,\n+\t\t\t\trte_mempool_ops_table.ops[i].name)) {\n+\t\t\tops = &rte_mempool_ops_table.ops[i];\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (ops == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmp->ops_index = i;\n+\treturn 0;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "v7",
        "1/5"
    ]
}