From patchwork Tue Oct 6 07:07:49 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 79731 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2E28CA04BB; Tue, 6 Oct 2020 09:08:25 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 58379F64; Tue, 6 Oct 2020 09:08:23 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id B32C5F3E for ; Tue, 6 Oct 2020 09:08:21 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id 75AE446B205; Tue, 6 Oct 2020 09:08:20 +0200 (CEST) From: Olivier Matz To: dev@dpdk.org Cc: Andrew Rybchenko , Ray Kinsella , Neil Horman Date: Tue, 6 Oct 2020 09:07:49 +0200 Message-Id: <20201006070750.27104-1-olivier.matz@6wind.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 1/2] mempool: remove v20 ABI X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Remove the deprecated v20 ABI of rte_mempool_populate_iova() and rte_mempool_populate_virt(). Signed-off-by: Olivier Matz Reviewed-by: David Marchand --- lib/librte_mempool/meson.build | 2 - lib/librte_mempool/rte_mempool.c | 79 ++-------------------- lib/librte_mempool/rte_mempool_version.map | 7 -- 3 files changed, 5 insertions(+), 83 deletions(-) diff --git a/lib/librte_mempool/meson.build b/lib/librte_mempool/meson.build index 7dbe6b9bea..a6e861cbfc 100644 --- a/lib/librte_mempool/meson.build +++ b/lib/librte_mempool/meson.build @@ -9,8 +9,6 @@ foreach flag: extra_flags endif endforeach -use_function_versioning = true - sources = files('rte_mempool.c', 'rte_mempool_ops.c', 'rte_mempool_ops_default.c', 'mempool_trace_points.c') headers = files('rte_mempool.h', 'rte_mempool_trace.h', diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 7774f0c8da..0e3a2a7635 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -30,7 +30,6 @@ #include #include #include -#include #include @@ -305,17 +304,12 @@ mempool_ops_alloc_once(struct rte_mempool *mp) return 0; } -__vsym int -rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr, - rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - /* Add objects in the pool, using a physically contiguous memory * zone. Return the number of objects added, or a negative value * on error. */ -__vsym int -rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr, +int +rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { @@ -375,35 +369,6 @@ rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr, return ret; } -BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v21, 21); -MAP_STATIC_SYMBOL( - int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, - rte_iova_t iova, size_t len, - rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque), - rte_mempool_populate_iova_v21); - -__vsym int -rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr, - rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - -__vsym int -rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr, - rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque) -{ - int ret; - - ret = rte_mempool_populate_iova_v21(mp, vaddr, iova, len, free_cb, - opaque); - if (ret == 0) - ret = -EINVAL; - - return ret; -} -VERSION_SYMBOL(rte_mempool_populate_iova, _v20, 20.0); - static rte_iova_t get_iova(void *addr) { @@ -417,16 +382,11 @@ get_iova(void *addr) return ms->iova + RTE_PTR_DIFF(addr, ms->addr); } -__vsym int -rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, - size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - /* Populate the mempool with a virtual area. Return the number of * objects added, or a negative value on error. */ -__vsym int -rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, +int +rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { @@ -459,7 +419,7 @@ rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, break; } - ret = rte_mempool_populate_iova_v21(mp, addr + off, iova, + ret = rte_mempool_populate_iova(mp, addr + off, iova, phys_len, free_cb, opaque); if (ret == 0) continue; @@ -477,35 +437,6 @@ rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr, rte_mempool_free_memchunks(mp); return ret; } -BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v21, 21); -MAP_STATIC_SYMBOL( - int rte_mempool_populate_virt(struct rte_mempool *mp, - char *addr, size_t len, size_t pg_sz, - rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque), - rte_mempool_populate_virt_v21); - -__vsym int -rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr, - size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque); - -__vsym int -rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr, - size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, - void *opaque) -{ - int ret; - - ret = rte_mempool_populate_virt_v21(mp, addr, len, pg_sz, - free_cb, opaque); - - if (ret == 0) - ret = -EINVAL; - - return ret; -} -VERSION_SYMBOL(rte_mempool_populate_virt, _v20, 20.0); /* Get the minimal page size used in a mempool before populating it. */ int diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index 50e22ee020..83760ecfc9 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -31,13 +31,6 @@ DPDK_21 { local: *; }; -DPDK_20.0 { - global: - - rte_mempool_populate_iova; - rte_mempool_populate_virt; -}; - EXPERIMENTAL { global: From patchwork Tue Oct 6 07:07:50 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olivier Matz X-Patchwork-Id: 79732 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 73F07A04BB; Tue, 6 Oct 2020 09:08:40 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D786C2B8B; Tue, 6 Oct 2020 09:08:24 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id B7DAAF64 for ; Tue, 6 Oct 2020 09:08:21 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id 8365746B206; Tue, 6 Oct 2020 09:08:20 +0200 (CEST) From: Olivier Matz To: dev@dpdk.org Cc: Andrew Rybchenko , Ray Kinsella , Neil Horman Date: Tue, 6 Oct 2020 09:07:50 +0200 Message-Id: <20201006070750.27104-2-olivier.matz@6wind.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20201006070750.27104-1-olivier.matz@6wind.com> References: <20201006070750.27104-1-olivier.matz@6wind.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 2/2] mempool: remove experimental tags X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move symbols introduced in version <= 19.11 in the stable ABI. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.h | 32 ---------------------- lib/librte_mempool/rte_mempool_version.map | 12 +++----- 2 files changed, 4 insertions(+), 40 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 9ea7ff934c..c551cf733a 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -191,9 +191,6 @@ struct rte_mempool_memhdr { }; /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Additional information about the mempool * * The structure is cache-line aligned to avoid ABI breakages in @@ -358,9 +355,6 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * @internal Check contiguous object blocks and update cookies or panic. * * @param mp @@ -421,9 +415,6 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Dequeue a number of contiguous object blocks from the external pool. */ typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, @@ -462,9 +453,6 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, size_t *min_chunk_size, size_t *align); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * @internal Helper to calculate memory size required to store given * number of objects. * @@ -499,7 +487,6 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, * @return * Required memory size. */ -__rte_experimental ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve, size_t *min_chunk_size, size_t *align); @@ -569,9 +556,6 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * @internal Helper to populate memory pool object using provided memory * chunk: just slice objects one by one, taking care of not * crossing page boundaries. @@ -603,7 +587,6 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, * @return * The number of objects added in mempool. */ -__rte_experimental int rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, @@ -621,9 +604,6 @@ int rte_mempool_op_populate_default(struct rte_mempool *mp, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get some additional information about a mempool. */ typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, @@ -846,9 +826,6 @@ int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, void *obj_cb_arg); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Wrapper for mempool_ops get_info callback. * * @param[in] mp @@ -860,7 +837,6 @@ int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, * mempool information * - -ENOTSUP - doesn't support get_info ops (valid case). */ -__rte_experimental int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info); @@ -1577,9 +1553,6 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) } /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get a contiguous blocks of objects from the mempool. * * If cache is enabled, consider to flush it first, to reuse objects @@ -1601,7 +1574,6 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p) * - -EOPNOTSUPP: The mempool driver does not support block dequeue */ static __rte_always_inline int -__rte_experimental rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n) { @@ -1786,13 +1758,9 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * @internal Get page size used for mempool object allocation. * This function is internal to mempool library and mempool drivers. */ -__rte_experimental int rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index 83760ecfc9..50b0602952 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -12,13 +12,17 @@ DPDK_21 { rte_mempool_create_empty; rte_mempool_dump; rte_mempool_free; + rte_mempool_get_page_size; rte_mempool_in_use_count; rte_mempool_list_dump; rte_mempool_lookup; rte_mempool_mem_iter; rte_mempool_obj_iter; rte_mempool_op_calc_mem_size_default; + rte_mempool_op_calc_mem_size_helper; rte_mempool_op_populate_default; + rte_mempool_op_populate_helper; + rte_mempool_ops_get_info; rte_mempool_ops_table; rte_mempool_populate_anon; rte_mempool_populate_default; @@ -34,14 +38,6 @@ DPDK_21 { EXPERIMENTAL { global: - # added in 18.05 - rte_mempool_ops_get_info; - - # added in 19.11 - rte_mempool_get_page_size; - rte_mempool_op_calc_mem_size_helper; - rte_mempool_op_populate_helper; - # added in 20.05 __rte_mempool_trace_ops_dequeue_bulk; __rte_mempool_trace_ops_dequeue_contig_blocks;