From patchwork Thu Jan 3 09:42:09 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Phil Yang X-Patchwork-Id: 49395 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D59ED5911; Thu, 3 Jan 2019 10:42:26 +0100 (CET) Received: from foss.arm.com (usa-sjc-mx-foss1.foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id 82ADA5424 for ; Thu, 3 Jan 2019 10:42:25 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id C89E5EBD; Thu, 3 Jan 2019 01:42:24 -0800 (PST) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.169.107.181]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 4B6253F5CF; Thu, 3 Jan 2019 01:42:24 -0800 (PST) From: Phil Yang To: dev@dpdk.org Cc: nd@arm.com Date: Thu, 3 Jan 2019 17:42:09 +0800 Message-Id: <1546508529-12227-1-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 Subject: [dpdk-dev] [PATCH] eal/atomic: reimplement rte atomic APIs with atomic builtins X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" '__sync' builtins are deprecated, enable '__atomic' builtins for generic atomic operations. Signed-off-by: Phil Yang Reviewed-by: Gavin Hu Tested-by: Phil Yang --- lib/librte_eal/common/include/generic/rte_atomic.h | 80 ++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h index b99ba46..260cdf3 100644 --- a/lib/librte_eal/common/include/generic/rte_atomic.h +++ b/lib/librte_eal/common/include/generic/rte_atomic.h @@ -186,7 +186,12 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src); static inline int rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_bool_compare_and_swap(dst, exp, src); +#else + return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, + __ATOMIC_ACQUIRE) ? 1 : 0; +#endif } #endif @@ -283,7 +288,11 @@ rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) static inline void rte_atomic16_add(rte_atomic16_t *v, int16_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_add(&v->cnt, inc); +#else + __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } /** @@ -297,7 +306,11 @@ rte_atomic16_add(rte_atomic16_t *v, int16_t inc) static inline void rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_sub(&v->cnt, dec); +#else + __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } /** @@ -350,7 +363,11 @@ rte_atomic16_dec(rte_atomic16_t *v) static inline int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_add_and_fetch(&v->cnt, inc); +#else + return __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } /** @@ -370,7 +387,11 @@ rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) static inline int16_t rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_sub_and_fetch(&v->cnt, dec); +#else + return __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } /** @@ -389,7 +410,11 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_add_and_fetch(&v->cnt, 1) == 0; +#else + return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +#endif } #endif @@ -409,7 +434,11 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_sub_and_fetch(&v->cnt, 1) == 0; +#else + return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +#endif } #endif @@ -469,7 +498,13 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src); static inline int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_bool_compare_and_swap(dst, exp, src); +#else + return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, + __ATOMIC_ACQUIRE) ? 1 : 0; +#endif + } #endif @@ -566,7 +601,11 @@ rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) static inline void rte_atomic32_add(rte_atomic32_t *v, int32_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_add(&v->cnt, inc); +#else + __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } /** @@ -580,7 +619,11 @@ rte_atomic32_add(rte_atomic32_t *v, int32_t inc) static inline void rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_sub(&v->cnt, dec); +#else + __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } /** @@ -633,7 +676,11 @@ rte_atomic32_dec(rte_atomic32_t *v) static inline int32_t rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_add_and_fetch(&v->cnt, inc); +#else + return __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } /** @@ -653,7 +700,11 @@ rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) static inline int32_t rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_sub_and_fetch(&v->cnt, dec); +#else + return __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } /** @@ -672,7 +723,11 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_add_and_fetch(&v->cnt, 1) == 0; +#else + return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +#endif } #endif @@ -692,7 +747,11 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v); #ifdef RTE_FORCE_INTRINSICS static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_sub_and_fetch(&v->cnt, 1) == 0; +#else + return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; +#endif } #endif @@ -751,7 +810,12 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src); static inline int rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_bool_compare_and_swap(dst, exp, src); +#else + return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE, + __ATOMIC_ACQUIRE) ? 1 : 0; +#endif } #endif @@ -902,7 +966,11 @@ rte_atomic64_add(rte_atomic64_t *v, int64_t inc); static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_add(&v->cnt, inc); +#else + __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } #endif @@ -921,7 +989,11 @@ rte_atomic64_sub(rte_atomic64_t *v, int64_t dec); static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) __sync_fetch_and_sub(&v->cnt, dec); +#else + __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } #endif @@ -979,7 +1051,11 @@ rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc); static inline int64_t rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_add_and_fetch(&v->cnt, inc); +#else + return __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE); +#endif } #endif @@ -1003,7 +1079,11 @@ rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec); static inline int64_t rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) { +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) return __sync_sub_and_fetch(&v->cnt, dec); +#else + return __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE); +#endif } #endif