From patchwork Fri Jun 26 20:35:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honnappa Nagarahalli X-Patchwork-Id: 72276 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A9797A0520; Fri, 26 Jun 2020 22:35:21 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 853301BEA3; Fri, 26 Jun 2020 22:35:20 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id E93CA1BE99; Fri, 26 Jun 2020 22:35:18 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 3281DD6E; Fri, 26 Jun 2020 13:35:18 -0700 (PDT) Received: from qc2400f-1.austin.arm.com (qc2400f-1.austin.arm.com [10.118.12.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 2517C3F6CF; Fri, 26 Jun 2020 13:35:18 -0700 (PDT) From: Honnappa Nagarahalli To: dev@dpdk.org, honnappa.nagarahalli@arm.com, jerinj@marvell.com, hemant.agrawal@nxp.com, akhil.goyal@nxp.com, ogerlitz@mellanox.com, ajit.khaparde@broadcom.com, pbhagavatula@marvell.com Cc: nd@arm.com, stable@dpdk.org Date: Fri, 26 Jun 2020 15:35:01 -0500 Message-Id: <20200626203502.20658-1-honnappa.nagarahalli@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200608213417.9764-1-honnappa.nagarahalli@arm.com> References: <20200608213417.9764-1-honnappa.nagarahalli@arm.com> Subject: [dpdk-dev] [PATCH v2 1/2] eal/arm: generic counter based loop for CPU freq calculation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" get_tsc_freq uses 'nanosleep' system call to calculate the CPU frequency. However, 'nanosleep' results in the process getting un-scheduled. The kernel saves and restores the PMU state. This ensures that the PMU cycles are not counted towards a sleeping process. When RTE_ARM_EAL_RDTSC_USE_PMU is defined, this results in incorrect CPU frequency calculation. This logic is replaced with generic counter based loop. Bugzilla ID: 450 Fixes: f91bcbb2d9a6 ("eal/arm: use high-resolution cycle counter") Cc: stable@dpdk.org Signed-off-by: Honnappa Nagarahalli Reviewed-by: Ruifeng Wang Reviewed-by: Dharmik Thakkar Reviewed-by: Phil Yang Acked-by: Jerin Jacob --- v2: 1) renamed functions (Jerin) 2) Aligned the frequency to 1MHz ceiling (Pavan) 3) Made all the inlines to always inline for consistency lib/librte_eal/arm/include/rte_cycles_64.h | 45 +++++++++++++++++++--- lib/librte_eal/arm/rte_cycles.c | 27 +++++++++++-- 2 files changed, 63 insertions(+), 9 deletions(-) diff --git a/lib/librte_eal/arm/include/rte_cycles_64.h b/lib/librte_eal/arm/include/rte_cycles_64.h index da557b6a1..e41f9dbd6 100644 --- a/lib/librte_eal/arm/include/rte_cycles_64.h +++ b/lib/librte_eal/arm/include/rte_cycles_64.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2015 Cavium, Inc + * Copyright(c) 2020 Arm Limited */ #ifndef _RTE_CYCLES_ARM64_H_ @@ -11,6 +12,33 @@ extern "C" { #include "generic/rte_cycles.h" +/** Read generic counter frequency */ +static __rte_always_inline uint64_t +__rte_arm64_cntfrq(void) +{ + uint64_t freq; + + asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); + return freq; +} + +/** Read generic counter */ +static __rte_always_inline uint64_t +__rte_arm64_cntvct(void) +{ + uint64_t tsc; + + asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); + return tsc; +} + +static __rte_always_inline uint64_t +__rte_arm64_cntvct_precise(void) +{ + asm volatile("isb" : : : "memory"); + return __rte_arm64_cntvct(); +} + /** * Read the time base register. * @@ -25,10 +53,7 @@ extern "C" { static inline uint64_t rte_rdtsc(void) { - uint64_t tsc; - - asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); - return tsc; + return __rte_arm64_cntvct(); } #else /** @@ -49,14 +74,22 @@ rte_rdtsc(void) * asm volatile("msr pmcr_el0, %0" : : "r" (val)); * */ -static inline uint64_t -rte_rdtsc(void) + +/** Read PMU cycle counter */ +static __rte_always_inline uint64_t +__rte_arm64_pmccntr(void) { uint64_t tsc; asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc)); return tsc; } + +static inline uint64_t +rte_rdtsc(void) +{ + return __rte_arm64_pmccntr(); +} #endif static inline uint64_t diff --git a/lib/librte_eal/arm/rte_cycles.c b/lib/librte_eal/arm/rte_cycles.c index 3500d523e..5bd29b24b 100644 --- a/lib/librte_eal/arm/rte_cycles.c +++ b/lib/librte_eal/arm/rte_cycles.c @@ -3,14 +3,35 @@ */ #include "eal_private.h" +#include "rte_cycles.h" uint64_t get_tsc_freq_arch(void) { #if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU - uint64_t freq; - asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); - return freq; + return __rte_arm64_cntfrq(); +#elif defined RTE_ARCH_ARM64 && defined RTE_ARM_EAL_RDTSC_USE_PMU +#define CYC_PER_1MHZ 1E6 + /* Use the generic counter ticks to calculate the PMU + * cycle frequency. + */ + uint64_t ticks; + uint64_t start_ticks, cur_ticks; + uint64_t start_pmu_cycles, end_pmu_cycles; + + /* Number of ticks for 1/10 second */ + ticks = __rte_arm64_cntfrq() / 10; + + start_ticks = __rte_arm64_cntvct_precise(); + start_pmu_cycles = rte_rdtsc_precise(); + do { + cur_ticks = __rte_arm64_cntvct(); + } while ((cur_ticks - start_ticks) < ticks); + end_pmu_cycles = rte_rdtsc_precise(); + + /* Adjust the cycles to next 1Mhz */ + return RTE_ALIGN_MUL_CEIL(end_pmu_cycles - start_pmu_cycles, + CYC_PER_1MHZ) * 10; #else return 0; #endif From patchwork Fri Jun 26 20:35:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honnappa Nagarahalli X-Patchwork-Id: 72277 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 20F3DA0520; Fri, 26 Jun 2020 22:35:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F22D71BEB4; Fri, 26 Jun 2020 22:35:32 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id E66071BEB3 for ; Fri, 26 Jun 2020 22:35:25 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 725A2D6E; Fri, 26 Jun 2020 13:35:25 -0700 (PDT) Received: from qc2400f-1.austin.arm.com (qc2400f-1.austin.arm.com [10.118.12.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 608F23F6CF; Fri, 26 Jun 2020 13:35:25 -0700 (PDT) From: Honnappa Nagarahalli To: dev@dpdk.org, honnappa.nagarahalli@arm.com, jerinj@marvell.com, hemant.agrawal@nxp.com, akhil.goyal@nxp.com, ogerlitz@mellanox.com, ajit.khaparde@broadcom.com, pbhagavatula@marvell.com Cc: nd@arm.com Date: Fri, 26 Jun 2020 15:35:02 -0500 Message-Id: <20200626203502.20658-2-honnappa.nagarahalli@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200626203502.20658-1-honnappa.nagarahalli@arm.com> References: <20200608213417.9764-1-honnappa.nagarahalli@arm.com> <20200626203502.20658-1-honnappa.nagarahalli@arm.com> Subject: [dpdk-dev] [PATCH v2 2/2] eal/arm: change inline functions to always inline X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change the inline functions to use __rte_always_inline to be consistent with rest of the inline functions. Signed-off-by: Honnappa Nagarahalli Acked-by: Jerin Jacob --- lib/librte_eal/arm/include/rte_cycles_64.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/librte_eal/arm/include/rte_cycles_64.h b/lib/librte_eal/arm/include/rte_cycles_64.h index e41f9dbd6..029fdc435 100644 --- a/lib/librte_eal/arm/include/rte_cycles_64.h +++ b/lib/librte_eal/arm/include/rte_cycles_64.h @@ -50,7 +50,7 @@ __rte_arm64_cntvct_precise(void) * This call is portable to any ARMv8 architecture, however, typically * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks. */ -static inline uint64_t +static __rte_always_inline uint64_t rte_rdtsc(void) { return __rte_arm64_cntvct(); @@ -85,22 +85,25 @@ __rte_arm64_pmccntr(void) return tsc; } -static inline uint64_t +static __rte_always_inline uint64_t rte_rdtsc(void) { return __rte_arm64_pmccntr(); } #endif -static inline uint64_t +static __rte_always_inline uint64_t rte_rdtsc_precise(void) { asm volatile("isb" : : : "memory"); return rte_rdtsc(); } -static inline uint64_t -rte_get_tsc_cycles(void) { return rte_rdtsc(); } +static __rte_always_inline uint64_t +rte_get_tsc_cycles(void) +{ + return rte_rdtsc(); +} #ifdef __cplusplus }