From patchwork Tue Apr 20 00:07:59 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dharmik Thakkar X-Patchwork-Id: 91800 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5E790A0524; Tue, 20 Apr 2021 02:08:26 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D723B414EE; Tue, 20 Apr 2021 02:08:21 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 6411141448 for ; Tue, 20 Apr 2021 02:08:19 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id BD7531478; Mon, 19 Apr 2021 17:08:18 -0700 (PDT) Received: from 2p2660v4-1.austin.arm.com (2p2660v4-1.austin.arm.com [10.118.13.237]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id B64203F85F; Mon, 19 Apr 2021 17:08:18 -0700 (PDT) From: Dharmik Thakkar To: Olivier Matz , Andrew Rybchenko Cc: dev@dpdk.org, nd@arm.com, joyce.kong@arm.com, Dharmik Thakkar Date: Mon, 19 Apr 2021 19:07:59 -0500 Message-Id: <20210420000800.1504-2-dharmik.thakkar@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210420000800.1504-1-dharmik.thakkar@arm.com> References: <20210318112022.10510-1-joyce.kong@arm.com> <20210420000800.1504-1-dharmik.thakkar@arm.com> Subject: [dpdk-dev] [PATCH v3 1/2] lib/mempool: make stats macro generic X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Make __MEMPOOL_STAT_ADD macro more generic and delete __MEMPOOL_CONTIG_BLOCKS_STAT_ADD macro Suggested-by: Olivier Matz Signed-off-by: Dharmik Thakkar Reviewed-by: Ruifeng Wang Reviewed-by: Honnappa Nagarahalli Acked-by: Olivier Matz --- lib/librte_mempool/rte_mempool.h | 34 +++++++++++++++----------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index c551cf733acf..848a19226149 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -273,20 +273,11 @@ struct rte_mempool { #define __MEMPOOL_STAT_ADD(mp, name, n) do { \ unsigned __lcore_id = rte_lcore_id(); \ if (__lcore_id < RTE_MAX_LCORE) { \ - mp->stats[__lcore_id].name##_objs += n; \ - mp->stats[__lcore_id].name##_bulk += 1; \ + mp->stats[__lcore_id].name += n; \ } \ } while(0) -#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \ - unsigned int __lcore_id = rte_lcore_id(); \ - if (__lcore_id < RTE_MAX_LCORE) { \ - mp->stats[__lcore_id].name##_blks += n; \ - mp->stats[__lcore_id].name##_bulk += 1; \ - } \ - } while (0) #else #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) -#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0) #endif /** @@ -1288,7 +1279,8 @@ __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, void **cache_objs; /* increment stat now, adding in mempool always success */ - __MEMPOOL_STAT_ADD(mp, put, n); + __MEMPOOL_STAT_ADD(mp, put_bulk, 1); + __MEMPOOL_STAT_ADD(mp, put_objs, n); /* No cache provided or if put would overflow mem allocated for cache */ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) @@ -1446,7 +1438,8 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, cache->len -= n; - __MEMPOOL_STAT_ADD(mp, get_success, n); + __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_success_objs, n); return 0; @@ -1455,10 +1448,13 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, /* get remaining objects from ring */ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n); - if (ret < 0) - __MEMPOOL_STAT_ADD(mp, get_fail, n); - else - __MEMPOOL_STAT_ADD(mp, get_success, n); + if (ret < 0) { + __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_fail_objs, n); + } else { + __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_success_objs, n); + } return ret; } @@ -1581,11 +1577,13 @@ rte_mempool_get_contig_blocks(struct rte_mempool *mp, ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); if (ret == 0) { - __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n); + __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_success_blks, n); __mempool_contig_blocks_check_cookies(mp, first_obj_table, n, 1); } else { - __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n); + __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_fail_blks, n); } rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n); From patchwork Tue Apr 20 00:08:00 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dharmik Thakkar X-Patchwork-Id: 91801 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A58B7A0524; Tue, 20 Apr 2021 02:08:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 208AD414FD; Tue, 20 Apr 2021 02:08:23 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 70C69414C5 for ; Tue, 20 Apr 2021 02:08:19 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D5FBC14BF; Mon, 19 Apr 2021 17:08:18 -0700 (PDT) Received: from 2p2660v4-1.austin.arm.com (2p2660v4-1.austin.arm.com [10.118.13.237]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id CE3613F85F; Mon, 19 Apr 2021 17:08:18 -0700 (PDT) From: Dharmik Thakkar To: Olivier Matz , Andrew Rybchenko Cc: dev@dpdk.org, nd@arm.com, joyce.kong@arm.com, Dharmik Thakkar Date: Mon, 19 Apr 2021 19:08:00 -0500 Message-Id: <20210420000800.1504-3-dharmik.thakkar@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210420000800.1504-1-dharmik.thakkar@arm.com> References: <20210318112022.10510-1-joyce.kong@arm.com> <20210420000800.1504-1-dharmik.thakkar@arm.com> Subject: [dpdk-dev] [PATCH v3 2/2] lib/mempool: distinguish debug counters from cache and pool X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Joyce Kong If cache is enabled, objects will be retrieved/put from/to cache, subsequently from/to the common pool. Now the debug stats calculate the objects retrieved/put from/to cache and pool together, it is better to distinguish them. Signed-off-by: Joyce Kong Signed-off-by: Dharmik Thakkar Reviewed-by: Ruifeng Wang Reviewed-by: Honnappa Nagarahalli --- lib/librte_mempool/rte_mempool.c | 24 ++++++++++++++++ lib/librte_mempool/rte_mempool.h | 47 ++++++++++++++++++++++---------- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index afb1239c8d48..339f14455624 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -1244,6 +1244,18 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { sum.put_bulk += mp->stats[lcore_id].put_bulk; sum.put_objs += mp->stats[lcore_id].put_objs; + sum.put_common_pool_bulk += + mp->stats[lcore_id].put_common_pool_bulk; + sum.put_common_pool_objs += + mp->stats[lcore_id].put_common_pool_objs; + sum.put_cache_bulk += mp->stats[lcore_id].put_cache_bulk; + sum.put_cache_objs += mp->stats[lcore_id].put_cache_objs; + sum.get_common_pool_bulk += + mp->stats[lcore_id].get_common_pool_bulk; + sum.get_common_pool_objs += + mp->stats[lcore_id].get_common_pool_objs; + sum.get_cache_bulk += mp->stats[lcore_id].get_cache_bulk; + sum.get_cache_objs += mp->stats[lcore_id].get_cache_objs; sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk; sum.get_success_objs += mp->stats[lcore_id].get_success_objs; sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; @@ -1254,6 +1266,18 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) fprintf(f, " stats:\n"); fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs); + fprintf(f, " put_common_pool_bulk=%"PRIu64"\n", + sum.put_common_pool_bulk); + fprintf(f, " put_common_pool_objs=%"PRIu64"\n", + sum.put_common_pool_objs); + fprintf(f, " put_cache_bulk=%"PRIu64"\n", sum.put_cache_bulk); + fprintf(f, " put_cache_objs=%"PRIu64"\n", sum.put_cache_objs); + fprintf(f, " get_common_pool_bulk=%"PRIu64"\n", + sum.get_common_pool_bulk); + fprintf(f, " get_common_pool_objs=%"PRIu64"\n", + sum.get_common_pool_objs); + fprintf(f, " get_cache_bulk=%"PRIu64"\n", sum.get_cache_bulk); + fprintf(f, " get_cache_objs=%"PRIu64"\n", sum.get_cache_objs); fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk); fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 848a19226149..0959f8a3f367 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -66,12 +66,20 @@ extern "C" { * A structure that stores the mempool statistics (per-lcore). */ struct rte_mempool_debug_stats { - uint64_t put_bulk; /**< Number of puts. */ - uint64_t put_objs; /**< Number of objects successfully put. */ - uint64_t get_success_bulk; /**< Successful allocation number. */ - uint64_t get_success_objs; /**< Objects successfully allocated. */ - uint64_t get_fail_bulk; /**< Failed allocation number. */ - uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ + uint64_t put_bulk; /**< Number of puts. */ + uint64_t put_objs; /**< Number of objects successfully put. */ + uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */ + uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */ + uint64_t put_cache_bulk; /**< Number of bulks enqueued in cache. */ + uint64_t put_cache_objs; /**< Number of objects enqueued in cache. */ + uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */ + uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */ + uint64_t get_cache_bulk; /**< Number of bulks dequeued from cache. */ + uint64_t get_cache_objs; /**< Number of objects dequeued from cache. */ + uint64_t get_success_bulk; /**< Successful allocation number. */ + uint64_t get_success_objs; /**< Objects successfully allocated. */ + uint64_t get_fail_bulk; /**< Failed allocation number. */ + uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ /** Successful allocation number of contiguous blocks. */ uint64_t get_success_blks; /** Failed allocation number of contiguous blocks. */ @@ -699,10 +707,18 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) { struct rte_mempool_ops *ops; + int ret; rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); - return ops->dequeue(mp, obj_table, n); + ret = ops->dequeue(mp, obj_table, n); + if (ret == 0) { + __MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n); + __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_success_objs, n); + } + return ret; } /** @@ -749,6 +765,8 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, { struct rte_mempool_ops *ops; + __MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1); + __MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n); rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); ops = rte_mempool_get_ops(mp->ops_index); return ops->enqueue(mp, obj_table, n); @@ -1297,14 +1315,18 @@ __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, /* Add elements back into the cache */ rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n); - cache->len += n; + __MEMPOOL_STAT_ADD(mp, put_cache_bulk, 1); + if (cache->len >= cache->flushthresh) { + __MEMPOOL_STAT_ADD(mp, put_cache_objs, + n - (cache->len - cache->size)); rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], cache->len - cache->size); cache->len = cache->size; - } + } else + __MEMPOOL_STAT_ADD(mp, put_cache_objs, n); return; @@ -1438,8 +1460,8 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, cache->len -= n; - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); + __MEMPOOL_STAT_ADD(mp, get_cache_bulk, 1); + __MEMPOOL_STAT_ADD(mp, get_cache_objs, n); return 0; @@ -1451,9 +1473,6 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table, if (ret < 0) { __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); __MEMPOOL_STAT_ADD(mp, get_fail_objs, n); - } else { - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); } return ret;