From patchwork Fri Nov 24 16:06:27 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 31651 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0131B2BF3; Fri, 24 Nov 2017 17:07:14 +0100 (CET) Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [148.163.129.52]) by dpdk.org (Postfix) with ESMTP id B88E71E20 for ; Fri, 24 Nov 2017 17:07:06 +0100 (CET) Received: from pure.maildistiller.com (dispatch1.mdlocal [10.7.20.164]) by dispatch1-us1.ppe-hosted.com (Proofpoint Essentials ESMTP Server) with ESMTP id 8E4F760EAD; Fri, 24 Nov 2017 16:07:05 +0000 (UTC) X-Virus-Scanned: Proofpoint Essentials engine Received: from mx1-us3.ppe-hosted.com (us4-filterqueue.mdlocal [10.7.20.246]) by pure.maildistiller.com (Proofpoint Essentials ESMTP Server) with ESMTPS id 109BF220052; Fri, 24 Nov 2017 16:07:05 +0000 (UTC) Received: from webmail.solarflare.com (webmail.solarflare.com [12.187.104.26]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1-us3.ppe-hosted.com (Proofpoint Essentials ESMTP Server) with ESMTPS id 00E7AB4006E; Fri, 24 Nov 2017 16:07:05 +0000 (UTC) Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25; Fri, 24 Nov 2017 08:07:02 -0800 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25 via Frontend Transport; Fri, 24 Nov 2017 08:07:02 -0800 Received: from uklogin.uk.solarflarecom.com (uklogin.uk.solarflarecom.com [10.17.10.10]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id vAOG70p5019667; Fri, 24 Nov 2017 16:07:00 GMT Received: from uklogin.uk.solarflarecom.com (localhost.localdomain [127.0.0.1]) by uklogin.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id vAOG70S9021131; Fri, 24 Nov 2017 16:07:00 GMT From: Andrew Rybchenko To: CC: Olivier Matz , "Artem V. Andreev" Date: Fri, 24 Nov 2017 16:06:27 +0000 Message-ID: <1511539591-20966-3-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1511539591-20966-1-git-send-email-arybchenko@solarflare.com> References: <1511539591-20966-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 X-MDID: 1511539625-oD-X3RCi38jd Subject: [dpdk-dev] [RFC PATCH 2/6] mempool: implement clustered object allocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "Artem V. Andreev" Clustered allocation is required to simplify packaging objects into buckets and search of the bucket control structure by an object. Signed-off-by: Artem V. Andreev Signed-off-by: Andrew Rybchenko --- lib/librte_mempool/rte_mempool.c | 39 +++++++++++++++++++++++++++++++++++---- lib/librte_mempool/rte_mempool.h | 23 +++++++++++++++++++++-- test/test/test_mempool.c | 2 +- 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index d50dba4..43455a3 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -239,7 +239,8 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, */ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, - unsigned int flags) + unsigned int flags, + const struct rte_mempool_info *info) { size_t obj_per_page, pg_num, pg_sz; unsigned int mask; @@ -252,6 +253,17 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, if (total_elt_sz == 0) return 0; + if (flags & MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS) { + unsigned int align_shift = + rte_bsf32( + rte_align32pow2(total_elt_sz * + info->cluster_size)); + if (pg_shift < align_shift) { + return ((elt_num / info->cluster_size) + 2) + << align_shift; + } + } + if (pg_shift == 0) return total_elt_sz * elt_num; @@ -362,6 +374,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, void *opaque) { unsigned total_elt_sz; + unsigned int page_align_size = 0; unsigned i = 0; size_t off; struct rte_mempool_memhdr *memhdr; @@ -407,7 +420,11 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp->flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS) + if (mp->flags & MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS) { + page_align_size = rte_align32pow2(total_elt_sz * + mp->info.cluster_size); + off = RTE_PTR_ALIGN_CEIL(vaddr, page_align_size) - vaddr; + } else if (mp->flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS) /* align object start address to a multiple of total_elt_sz */ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) @@ -424,6 +441,10 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, mempool_add_elem(mp, (char *)vaddr + off, iova + off); off += mp->elt_size + mp->trailer_size; i++; + if ((mp->flags & MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS) && + (i % mp->info.cluster_size) == 0) + off = RTE_PTR_ALIGN_CEIL((char *)vaddr + off, + page_align_size) - vaddr; } /* not enough room to store one object */ @@ -579,6 +600,16 @@ rte_mempool_populate_default(struct rte_mempool *mp) if ((ret < 0) && (ret != -ENOTSUP)) return ret; + ret = rte_mempool_ops_get_info(mp, &mp->info); + if ((ret < 0) && (ret != -ENOTSUP)) + return ret; + if (ret == -ENOTSUP) + mp->info.cluster_size = 0; + + if ((mp->info.cluster_size == 0) && + (mp_flags & MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS)) + return -EINVAL; + /* update mempool capabilities */ mp->flags |= mp_flags; @@ -595,7 +626,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, - mp->flags); + mp->flags, &mp->info); ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -653,7 +684,7 @@ get_anon_size(const struct rte_mempool *mp) pg_shift = rte_bsf32(pg_sz); total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift, - mp->flags); + mp->flags, &mp->info); return size; } diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 3c59d36..9bcb8b7 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -220,7 +220,10 @@ struct rte_mempool_memhdr { /* * Additional information about the mempool */ -struct rte_mempool_info; +struct rte_mempool_info { + /** Number of objects in a cluster */ + unsigned int cluster_size; +}; /** * The RTE mempool structure. @@ -265,6 +268,7 @@ struct rte_mempool { struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */ uint32_t nb_mem_chunks; /**< Number of memory chunks */ struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */ + struct rte_mempool_info info; /**< Additional mempool info */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG /** Per-lcore statistics. */ @@ -298,6 +302,17 @@ struct rte_mempool { #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080 /** + * This capability flag is advertised by a mempool handler. Used for a case + * where mempool driver wants clusters of objects start at a power-of-two + * boundary + * + * Note: + * - This flag should not be passed by application. + * Flag used for mempool driver only. + */ +#define MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS 0x0100 + +/** * @internal When debug is enabled, store some statistics. * * @param mp @@ -1605,11 +1620,15 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * LOG2 of the physical pages size. If set to 0, ignore page boundaries. * @param flags * The mempool flags. + * @param info + * A pointer to the mempool's additional info (may be NULL unless + * MEMPOOL_F_CAPA_ALLOCATE_IN_CLUSTERS is set in @arg flags) * @return * Required memory size aligned at page boundary. */ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, - uint32_t pg_shift, unsigned int flags); + uint32_t pg_shift, unsigned int flags, + const struct rte_mempool_info *info); /** * Get the size of memory required to store mempool elements. diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c index 37ead50..f4bb9a9 100644 --- a/test/test/test_mempool.c +++ b/test/test/test_mempool.c @@ -485,7 +485,7 @@ test_mempool_xmem_misc(void) elt_num = MAX_KEEP; total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL); sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX, - 0); + 0, NULL); usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1, MEMPOOL_PG_SHIFT_MAX, 0);