From patchwork Fri Sep 11 03:29:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Phil Yang X-Patchwork-Id: 77315 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5DAABA04B5; Fri, 11 Sep 2020 05:29:52 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B5B231C0DA; Fri, 11 Sep 2020 05:29:38 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id F30831C0DA for ; Fri, 11 Sep 2020 05:29:36 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 72D6B11B3; Thu, 10 Sep 2020 20:29:36 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (phil-VirtualBox.shanghai.arm.com [10.169.182.49]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9F36D3F73C; Thu, 10 Sep 2020 20:29:34 -0700 (PDT) From: Phil Yang To: dev@dpdk.org Cc: Honnappa.Nagarahalli@arm.com, Ruifeng.Wang@arm.com, nd@arm.com, David Hunt Date: Fri, 11 Sep 2020 11:29:26 +0800 Message-Id: <1599794967-17500-4-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1599794967-17500-1-git-send-email-phil.yang@arm.com> References: <1599794967-17500-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH 3/4] power: use C11 atomic builtins for power in use state update X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Since rte_atomicXX APIs are not allowed to be used, use C11 atomic builtins for power in use state update. Signed-off-by: Phil Yang Reviewed-by: Ruifeng Wang Reviewed-by: Honnappa Nagarahalli --- lib/librte_power/power_acpi_cpufreq.c | 45 +++++++++++++++++++++++++-------- lib/librte_power/power_pstate_cpufreq.c | 45 +++++++++++++++++++++++++-------- 2 files changed, 70 insertions(+), 20 deletions(-) diff --git a/lib/librte_power/power_acpi_cpufreq.c b/lib/librte_power/power_acpi_cpufreq.c index 583815a..84a9d75 100644 --- a/lib/librte_power/power_acpi_cpufreq.c +++ b/lib/librte_power/power_acpi_cpufreq.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -86,7 +85,7 @@ struct rte_power_info { FILE *f; /**< FD of scaling_setspeed */ char governor_ori[32]; /**< Original governor name */ uint32_t curr_idx; /**< Freq index in freqs array */ - volatile uint32_t state; /**< Power in use state */ + uint32_t state; /**< Power in use state */ uint16_t turbo_available; /**< Turbo Boost available */ uint16_t turbo_enable; /**< Turbo Boost enable/disable */ } __rte_cache_aligned; @@ -300,6 +299,7 @@ int power_acpi_cpufreq_init(unsigned int lcore_id) { struct rte_power_info *pi; + uint32_t exp_state; if (lcore_id >= RTE_MAX_LCORE) { RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n", @@ -308,8 +308,16 @@ power_acpi_cpufreq_init(unsigned int lcore_id) } pi = &lcore_power_info[lcore_id]; - if (rte_atomic32_cmpset(&(pi->state), POWER_IDLE, POWER_ONGOING) - == 0) { + exp_state = POWER_IDLE; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are done under the correct state. + */ + if (!__atomic_compare_exchange_n(&(pi->state), &exp_state, + POWER_ONGOING, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { RTE_LOG(INFO, POWER, "Power management of lcore %u is " "in use\n", lcore_id); return -1; @@ -346,12 +354,16 @@ power_acpi_cpufreq_init(unsigned int lcore_id) RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u " "power management\n", lcore_id); - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_USED, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return 0; fail: - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_UNKNOWN, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return -1; } @@ -408,6 +420,7 @@ int power_acpi_cpufreq_exit(unsigned int lcore_id) { struct rte_power_info *pi; + uint32_t exp_state; if (lcore_id >= RTE_MAX_LCORE) { RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n", @@ -415,8 +428,16 @@ power_acpi_cpufreq_exit(unsigned int lcore_id) return -1; } pi = &lcore_power_info[lcore_id]; - if (rte_atomic32_cmpset(&(pi->state), POWER_USED, POWER_ONGOING) - == 0) { + exp_state = POWER_USED; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are done under the correct state. + */ + if (!__atomic_compare_exchange_n(&(pi->state), &exp_state, + POWER_ONGOING, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { RTE_LOG(INFO, POWER, "Power management of lcore %u is " "not used\n", lcore_id); return -1; @@ -436,12 +457,16 @@ power_acpi_cpufreq_exit(unsigned int lcore_id) RTE_LOG(INFO, POWER, "Power management of lcore %u has exited from " "'userspace' mode and been set back to the " "original\n", lcore_id); - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_IDLE); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_IDLE, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return 0; fail: - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_UNKNOWN, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return -1; } diff --git a/lib/librte_power/power_pstate_cpufreq.c b/lib/librte_power/power_pstate_cpufreq.c index 2526441..e3126d3 100644 --- a/lib/librte_power/power_pstate_cpufreq.c +++ b/lib/librte_power/power_pstate_cpufreq.c @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -100,7 +99,7 @@ struct pstate_power_info { uint32_t non_turbo_max_ratio; /**< Non Turbo Max ratio */ uint32_t sys_max_freq; /**< system wide max freq */ uint32_t core_base_freq; /**< core base freq */ - volatile uint32_t state; /**< Power in use state */ + uint32_t state; /**< Power in use state */ uint16_t turbo_available; /**< Turbo Boost available */ uint16_t turbo_enable; /**< Turbo Boost enable/disable */ uint16_t priority_core; /**< High Performance core */ @@ -542,6 +541,7 @@ int power_pstate_cpufreq_init(unsigned int lcore_id) { struct pstate_power_info *pi; + uint32_t exp_state; if (lcore_id >= RTE_MAX_LCORE) { RTE_LOG(ERR, POWER, "Lcore id %u can not exceed %u\n", @@ -550,8 +550,16 @@ power_pstate_cpufreq_init(unsigned int lcore_id) } pi = &lcore_power_info[lcore_id]; - if (rte_atomic32_cmpset(&(pi->state), POWER_IDLE, POWER_ONGOING) - == 0) { + exp_state = POWER_IDLE; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are done under the correct state. + */ + if (!__atomic_compare_exchange_n(&(pi->state), &exp_state, + POWER_ONGOING, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { RTE_LOG(INFO, POWER, "Power management of lcore %u is " "in use\n", lcore_id); return -1; @@ -588,12 +596,16 @@ power_pstate_cpufreq_init(unsigned int lcore_id) RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u " "power management\n", lcore_id); - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_USED, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return 0; fail: - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_UNKNOWN, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return -1; } @@ -602,6 +614,7 @@ int power_pstate_cpufreq_exit(unsigned int lcore_id) { struct pstate_power_info *pi; + uint32_t exp_state; if (lcore_id >= RTE_MAX_LCORE) { RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n", @@ -610,8 +623,16 @@ power_pstate_cpufreq_exit(unsigned int lcore_id) } pi = &lcore_power_info[lcore_id]; - if (rte_atomic32_cmpset(&(pi->state), POWER_USED, POWER_ONGOING) - == 0) { + exp_state = POWER_USED; + /* The power in use state works as a guard variable between + * the CPU frequency control initialization and exit process. + * The ACQUIRE memory ordering here pairs with the RELEASE + * ordering below as lock to make sure the frequency operations + * in the critical section are under done the correct state. + */ + if (!__atomic_compare_exchange_n(&(pi->state), &exp_state, + POWER_ONGOING, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { RTE_LOG(INFO, POWER, "Power management of lcore %u is " "not used\n", lcore_id); return -1; @@ -633,12 +654,16 @@ power_pstate_cpufreq_exit(unsigned int lcore_id) RTE_LOG(INFO, POWER, "Power management of lcore %u has exited from " "'performance' mode and been set back to the " "original\n", lcore_id); - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_IDLE); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_IDLE, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return 0; fail: - rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN); + exp_state = POWER_ONGOING; + __atomic_compare_exchange_n(&(pi->state), &exp_state, POWER_UNKNOWN, + 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return -1; }