From patchwork Thu May 19 13:45:00 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Hunt, David" X-Patchwork-Id: 12896 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id C3AC7AD83; Thu, 19 May 2016 15:45:13 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id BE68EAA3A for ; Thu, 19 May 2016 15:45:11 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 19 May 2016 06:45:11 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,334,1459839600"; d="scan'208";a="705719387" Received: from sie-lab-214-251.ir.intel.com (HELO silpixa373510.ir.intel.com) ([10.237.214.251]) by FMSMGA003.fm.intel.com with ESMTP; 19 May 2016 06:45:09 -0700 From: David Hunt To: dev@dpdk.org Cc: olivier.matz@6wind.com, yuanhan.liu@linux.intel.com, pmatilai@redhat.com, David Hunt Date: Thu, 19 May 2016 14:45:00 +0100 Message-Id: <1463665501-18325-3-git-send-email-david.hunt@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1463665501-18325-1-git-send-email-david.hunt@intel.com> References: <1460642270-8803-1-git-send-email-olivier.matz@6wind.com> <1463665501-18325-1-git-send-email-david.hunt@intel.com> Subject: [dpdk-dev] [PATCH v5 2/3] app/test: test external mempool handler X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Use a minimal custom mempool external handler and check that it also passes basic mempool autotests. Signed-off-by: Olivier Matz Signed-off-by: David Hunt --- app/test/test_mempool.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 9f02758..f55d126 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -85,6 +85,96 @@ static rte_atomic32_t synchro; /* + * Simple example of custom mempool structure. Holds pointers to all the + * elements which are simply malloc'd in this example. + */ +struct custom_mempool { + rte_spinlock_t lock; + unsigned count; + unsigned size; + void *elts[]; +}; + +/* + * Loop though all the element pointers and allocate a chunk of memory, then + * insert that memory into the ring. + */ +static void * +custom_mempool_alloc(struct rte_mempool *mp) +{ + struct custom_mempool *cm; + + cm = rte_zmalloc("custom_mempool", + sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0); + if (cm == NULL) + return NULL; + + rte_spinlock_init(&cm->lock); + cm->count = 0; + cm->size = mp->size; + return cm; +} + +static void +custom_mempool_free(void *p) +{ + rte_free(p); +} + +static int +custom_mempool_put(void *p, void * const *obj_table, unsigned n) +{ + struct custom_mempool *cm = (struct custom_mempool *)p; + int ret = 0; + + rte_spinlock_lock(&cm->lock); + if (cm->count + n > cm->size) { + ret = -ENOBUFS; + } else { + memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n); + cm->count += n; + } + rte_spinlock_unlock(&cm->lock); + return ret; +} + + +static int +custom_mempool_get(void *p, void **obj_table, unsigned n) +{ + struct custom_mempool *cm = (struct custom_mempool *)p; + int ret = 0; + + rte_spinlock_lock(&cm->lock); + if (n > cm->count) { + ret = -ENOENT; + } else { + cm->count -= n; + memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n); + } + rte_spinlock_unlock(&cm->lock); + return ret; +} + +static unsigned +custom_mempool_get_count(void *p) +{ + struct custom_mempool *cm = (struct custom_mempool *)p; + return cm->count; +} + +static struct rte_mempool_handler mempool_handler_custom = { + .name = "custom_handler", + .alloc = custom_mempool_alloc, + .free = custom_mempool_free, + .put = custom_mempool_put, + .get = custom_mempool_get, + .get_count = custom_mempool_get_count, +}; + +MEMPOOL_REGISTER_HANDLER(mempool_handler_custom); + +/* * save the object number in the first 4 bytes of object data. All * other bytes are set to 0. */ @@ -479,6 +569,7 @@ test_mempool(void) { struct rte_mempool *mp_cache = NULL; struct rte_mempool *mp_nocache = NULL; + struct rte_mempool *mp_ext = NULL; rte_atomic32_init(&synchro); @@ -507,6 +598,27 @@ test_mempool(void) goto err; } + /* create a mempool with an external handler */ + mp_ext = rte_mempool_create_empty("test_ext", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + SOCKET_ID_ANY, 0); + + if (mp_ext == NULL) { + printf("cannot allocate mp_ext mempool\n"); + goto err; + } + if (rte_mempool_set_handler(mp_ext, "custom_handler") < 0) { + printf("cannot set custom handler\n"); + goto err; + } + if (rte_mempool_populate_default(mp_ext) < 0) { + printf("cannot populate mp_ext mempool\n"); + goto err; + } + rte_mempool_obj_iter(mp_ext, my_obj_init, NULL); + /* retrieve the mempool from its name */ if (rte_mempool_lookup("test_nocache") != mp_nocache) { printf("Cannot lookup mempool from its name\n"); @@ -547,6 +659,7 @@ test_mempool(void) err: rte_mempool_free(mp_nocache); rte_mempool_free(mp_cache); + rte_mempool_free(mp_ext); return -1; }