From patchwork Fri Dec 14 11:49:45 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Hunt, David" X-Patchwork-Id: 48847 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3A86B1B9C9; Fri, 14 Dec 2018 12:50:04 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id C2BDD1B9A8 for ; Fri, 14 Dec 2018 12:49:59 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Dec 2018 03:49:59 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,352,1539673200"; d="scan'208";a="283557686" Received: from silpixa00399952.ir.intel.com (HELO silpixa00399952.ger.corp.intel.com) ([10.237.223.64]) by orsmga005.jf.intel.com with ESMTP; 14 Dec 2018 03:49:58 -0800 From: David Hunt To: dev@dpdk.org Cc: lei.a.yao@intel.com, anatoly.burakov@intel.com, David Hunt Date: Fri, 14 Dec 2018 11:49:45 +0000 Message-Id: <20181214114946.21570-4-david.hunt@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20181214114946.21570-1-david.hunt@intel.com> References: <20181122170220.55482-2-david.hunt@intel.com> <20181214114946.21570-1-david.hunt@intel.com> Subject: [dpdk-dev] [PATCH v2 3/4] examples/power: allow vms to use lcores over 63 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Extending the functionality to allow vms to power manage cores beyond 63. Signed-off-by: David Hunt --- examples/vm_power_manager/channel_manager.c | 74 +++++++++------------ examples/vm_power_manager/channel_manager.h | 30 ++------- examples/vm_power_manager/channel_monitor.c | 56 ++++++---------- examples/vm_power_manager/vm_power_cli.c | 4 +- 4 files changed, 57 insertions(+), 107 deletions(-) diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c index 71f4a0ccf..8756b53b8 100644 --- a/examples/vm_power_manager/channel_manager.c +++ b/examples/vm_power_manager/channel_manager.c @@ -49,7 +49,7 @@ static bool global_hypervisor_available; */ struct virtual_machine_info { char name[CHANNEL_MGR_MAX_NAME_LEN]; - rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS]; + uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS]; char channel_mask[POWER_MGR_MAX_CPUS]; uint8_t num_channels; @@ -79,7 +79,6 @@ update_pcpus_mask(struct virtual_machine_info *vm_info) virVcpuInfoPtr cpuinfo; unsigned i, j; int n_vcpus; - uint64_t mask; memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen); @@ -120,26 +119,23 @@ update_pcpus_mask(struct virtual_machine_info *vm_info) vm_info->info.nrVirtCpu = n_vcpus; } for (i = 0; i < vm_info->info.nrVirtCpu; i++) { - mask = 0; for (j = 0; j < global_n_host_cpus; j++) { - if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) { - mask |= 1ULL << j; - } + if (VIR_CPU_USABLE(global_cpumaps, + global_maplen, i, j) <= 0) + continue; + rte_spinlock_lock(&(vm_info->config_spinlock)); + vm_info->pcpu_map[i] = j; + rte_spinlock_unlock(&(vm_info->config_spinlock)); } - rte_atomic64_set(&vm_info->pcpu_mask[i], mask); } return 0; } int -set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask) +set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu) { - unsigned i = 0; int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG; struct virtual_machine_info *vm_info; - char mask[POWER_MGR_MAX_CPUS]; - - memcpy(mask, core_mask, POWER_MGR_MAX_CPUS); if (vcpu >= CHANNEL_CMDS_MAX_CPUS) { RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n", @@ -166,17 +162,16 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask) return -1; } memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen); - for (i = 0; i < POWER_MGR_MAX_CPUS; i++) { - if (mask[i] != 1) - continue; - VIR_USE_CPU(global_cpumaps, i); - if (i >= global_n_host_cpus) { - RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available " - "number of CPUs(%u)\n", - i, global_n_host_cpus); - return -1; - } + + VIR_USE_CPU(global_cpumaps, pcpu); + + if (pcpu >= global_n_host_cpus) { + RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available " + "number of CPUs(%u)\n", + pcpu, global_n_host_cpus); + return -1; } + if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps, global_maplen, flags) < 0) { RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU " @@ -185,33 +180,24 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask) return -1; } rte_spinlock_lock(&(vm_info->config_spinlock)); - memcpy(&vm_info->pcpu_mask[vcpu], mask, POWER_MGR_MAX_CPUS); + vm_info->pcpu_map[vcpu] = pcpu; rte_spinlock_unlock(&(vm_info->config_spinlock)); return 0; - -} - -int -set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num) -{ - char mask[POWER_MGR_MAX_CPUS]; - - memset(mask, 0, POWER_MGR_MAX_CPUS); - - mask[core_num] = 1; - - return set_pcpus_mask(vm_name, vcpu, mask); } -uint64_t -get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu) +uint16_t +get_pcpu(struct channel_info *chan_info, unsigned int vcpu) { struct virtual_machine_info *vm_info = (struct virtual_machine_info *)chan_info->priv_info; - if (global_hypervisor_available && (vm_info != NULL)) - return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]); - else + if (global_hypervisor_available && (vm_info != NULL)) { + uint16_t pcpu; + rte_spinlock_lock(&(vm_info->config_spinlock)); + pcpu = vm_info->pcpu_map[vcpu]; + rte_spinlock_unlock(&(vm_info->config_spinlock)); + return pcpu; + } else return 0; } @@ -771,9 +757,11 @@ get_info_vm(const char *vm_name, struct vm_info *info) rte_spinlock_unlock(&(vm_info->config_spinlock)); memcpy(info->name, vm_info->name, sizeof(vm_info->name)); + rte_spinlock_lock(&(vm_info->config_spinlock)); for (i = 0; i < info->num_vcpus; i++) { - info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]); + info->pcpu_map[i] = vm_info->pcpu_map[i]; } + rte_spinlock_unlock(&(vm_info->config_spinlock)); return 0; } @@ -823,7 +811,7 @@ add_vm(const char *vm_name) } for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) { - rte_atomic64_init(&new_domain->pcpu_mask[i]); + new_domain->pcpu_map[i] = 0; } if (update_pcpus_mask(new_domain) < 0) { RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n"); diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h index c2520ab6f..119b0d5cc 100644 --- a/examples/vm_power_manager/channel_manager.h +++ b/examples/vm_power_manager/channel_manager.h @@ -40,7 +40,6 @@ struct sockaddr_un _sockaddr_un; #define MAX_CLIENTS 64 #define MAX_VCPUS 20 - struct libvirt_vm_info { const char *vm_name; unsigned int pcpus[MAX_VCPUS]; @@ -82,7 +81,7 @@ struct channel_info { struct vm_info { char name[CHANNEL_MGR_MAX_NAME_LEN]; /**< VM name */ enum vm_status status; /**< libvirt status */ - uint64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU mask for each vCPU */ + uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU map to vCPU */ unsigned num_vcpus; /**< number of vCPUS */ struct channel_info channels[CHANNEL_MGR_MAX_CHANNELS]; /**< Array of channel_info */ unsigned num_channels; /**< Number of channels */ @@ -115,8 +114,7 @@ int channel_manager_init(const char *path); void channel_manager_exit(void); /** - * Get the Physical CPU mask for VM lcore channel(vcpu), result is assigned to - * core_mask. + * Get the Physical CPU for VM lcore channel(vcpu). * It is not thread-safe. * * @param chan_info @@ -130,26 +128,7 @@ void channel_manager_exit(void); * - 0 on error. * - >0 on success. */ -uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu); - -/** - * Set the Physical CPU mask for the specified vCPU. - * It is not thread-safe. - * - * @param name - * Virtual Machine name to lookup - * - * @param vcpu - * The virtual CPU to set. - * - * @param core_mask - * The core mask of the physical CPU(s) to bind the vCPU - * - * @return - * - 0 on success. - * - Negative on error. - */ -int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask); +uint16_t get_pcpu(struct channel_info *chan_info, unsigned int vcpu); /** * Set the Physical CPU for the specified vCPU. @@ -168,7 +147,8 @@ int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask); * - 0 on success. * - Negative on error. */ -int set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num); +int set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu); + /** * Add a VM as specified by name to the Channel Manager. The name must * correspond to a valid libvirt domain name. diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c index 4189bbf1b..85622e7cb 100644 --- a/examples/vm_power_manager/channel_monitor.c +++ b/examples/vm_power_manager/channel_monitor.c @@ -356,7 +356,6 @@ get_pcpu_to_control(struct policy *pol) /* Convert vcpu to pcpu. */ struct vm_info info; int pcpu, count; - uint64_t mask_u64b; struct core_info *ci; ci = get_core_info(); @@ -377,13 +376,8 @@ get_pcpu_to_control(struct policy *pol) */ get_info_vm(pol->pkt.vm_name, &info); for (count = 0; count < pol->pkt.num_vcpu; count++) { - mask_u64b = - info.pcpu_mask[pol->pkt.vcpu_to_control[count]]; - for (pcpu = 0; mask_u64b; - mask_u64b &= ~(1ULL << pcpu++)) { - if ((mask_u64b >> pcpu) & 1) - pcpu_monitor(pol, ci, pcpu, count); - } + pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]]; + pcpu_monitor(pol, ci, pcpu, count); } } else { /* @@ -636,8 +630,6 @@ apply_policy(struct policy *pol) static int process_request(struct channel_packet *pkt, struct channel_info *chan_info) { - uint64_t core_mask; - if (chan_info == NULL) return -1; @@ -646,41 +638,31 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info) return -1; if (pkt->command == CPU_POWER) { - core_mask = get_pcpus_mask(chan_info, pkt->resource_id); - if (core_mask == 0) { - /* - * Core mask will be 0 in the case where - * hypervisor is not available so we're working in - * the host, so use the core as the mask. - */ - core_mask = 1ULL << pkt->resource_id; - } - if (__builtin_popcountll(core_mask) == 1) { + unsigned int core_num; - unsigned core_num = __builtin_ffsll(core_mask) - 1; + core_num = get_pcpu(chan_info, pkt->resource_id); - switch (pkt->unit) { - case(CPU_POWER_SCALE_MIN): - power_manager_scale_core_min(core_num); + switch (pkt->unit) { + case(CPU_POWER_SCALE_MIN): + power_manager_scale_core_min(core_num); break; - case(CPU_POWER_SCALE_MAX): - power_manager_scale_core_max(core_num); + case(CPU_POWER_SCALE_MAX): + power_manager_scale_core_max(core_num); break; - case(CPU_POWER_SCALE_DOWN): - power_manager_scale_core_down(core_num); + case(CPU_POWER_SCALE_DOWN): + power_manager_scale_core_down(core_num); break; - case(CPU_POWER_SCALE_UP): - power_manager_scale_core_up(core_num); + case(CPU_POWER_SCALE_UP): + power_manager_scale_core_up(core_num); break; - case(CPU_POWER_ENABLE_TURBO): - power_manager_enable_turbo_core(core_num); + case(CPU_POWER_ENABLE_TURBO): + power_manager_enable_turbo_core(core_num); break; - case(CPU_POWER_DISABLE_TURBO): - power_manager_disable_turbo_core(core_num); + case(CPU_POWER_DISABLE_TURBO): + power_manager_disable_turbo_core(core_num); + break; + default: break; - default: - break; - } } } diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c index 34546809a..41e89ff20 100644 --- a/examples/vm_power_manager/vm_power_cli.c +++ b/examples/vm_power_manager/vm_power_cli.c @@ -95,8 +95,8 @@ cmd_show_vm_parsed(void *parsed_result, struct cmdline *cl, } cmdline_printf(cl, "Virtual CPU(s): %u\n", info.num_vcpus); for (i = 0; i < info.num_vcpus; i++) { - cmdline_printf(cl, " [%u]: Physical CPU Mask 0x%"PRIx64"\n", i, - info.pcpu_mask[i]); + cmdline_printf(cl, " [%u]: Physical CPU %d\n", i, + info.pcpu_map[i]); } }