[v2,3/3] examples/power_guest: send request for specified core capabilities

Message ID 20190930130043.2885-4-david.hunt@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Core capabilities query |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Hunt, David Sept. 30, 2019, 1 p.m. UTC
  From: Marcin Hajkowski <marcinx.hajkowski@intel.com>

Send request to power manager for core id provided
by user to get related capabilities.

Signed-off-by: Marcin Hajkowski <marcinx.hajkowski@intel.com>
Tested-by: David Hunt <david.hunt@intel.com>
Acked-by: Lee Daly <lee.daly@intel.com>
---
 .../guest_cli/vm_power_cli_guest.c            | 119 +++++++++++++++++-
 1 file changed, 117 insertions(+), 2 deletions(-)
  

Comments

Hunt, David Sept. 30, 2019, 1:04 p.m. UTC | #1
On 30/09/2019 14:00, David Hunt wrote:
> From: Marcin Hajkowski <marcinx.hajkowski@intel.com>
>
> Send request to power manager for core id provided
> by user to get related capabilities.
>
> Signed-off-by: Marcin Hajkowski <marcinx.hajkowski@intel.com>
> Tested-by: David Hunt <david.hunt@intel.com>
> Acked-by: Lee Daly <lee.daly@intel.com>
> ---
>   .../guest_cli/vm_power_cli_guest.c            | 119 +++++++++++++++++-
>   1 file changed, 117 insertions(+), 2 deletions(-)
>
--snip--

Some of the steps to test included blow:

In the Host, enable queries via the virtio-serial interface from the
guest to the host:

vmpower> set_query ubuntu3 enable

In an SST-BF enabled host, with 6 high priority cores per
socket, query the list of high prioroty cores with the
sst_bf.py python script
(https://github.com/intel/commspowermanagement):

root@silpixa00399952:~# sst_bf.py -l
1,6,7,8,9,16,21,26,27,28,29,30
0x7c2103c2

In the host, query the cores used by the VM:

vmpower> show_vm ubuntu3
...snip...
Virtual CPU(s): 8
   [0]: Physical CPU 28
   [1]: Physical CPU 29
   [2]: Physical CPU 30
   [3]: Physical CPU 31
   [4]: Physical CPU 32
   [5]: Physical CPU 33
   [6]: Physical CPU 34
   [7]: Physical CPU 35

So we now know that cores 28,29 and 30 should show up as high priority
in the VM. So, from the VM side, we use the new 'query_cpu_caps' command
to see the capabilities of each core:

vmpower(guest)> query_cpu_caps all
Capabilities of [0] vcore are: turbo possibility: 1, is priority core: 1.
Capabilities of [1] vcore are: turbo possibility: 1, is priority core: 1.
Capabilities of [2] vcore are: turbo possibility: 1, is priority core: 1.
Capabilities of [3] vcore are: turbo possibility: 1, is priority core: 0.
Capabilities of [4] vcore are: turbo possibility: 1, is priority core: 0.
Capabilities of [5] vcore are: turbo possibility: 1, is priority core: 0.
Capabilities of [6] vcore are: turbo possibility: 1, is priority core: 0.
Capabilities of [7] vcore are: turbo possibility: 1, is priority core: 0.

As expected, vcore 0,1,2 (pcore 28,29,30) are high priority cores.

Tested-by: David Hunt <david.hunt@intel.com>
  

Patch

diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 0bdb2d0d0..096ecdc04 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -132,7 +132,7 @@  struct cmd_freq_list_result {
 };
 
 static int
-query_freq_list(struct channel_packet *pkt, unsigned int lcore_id)
+query_data(struct channel_packet *pkt, unsigned int lcore_id)
 {
 	int ret;
 	ret = rte_power_guest_channel_send_msg(pkt, lcore_id);
@@ -206,7 +206,7 @@  cmd_query_freq_list_parsed(void *parsed_result,
 		pkt.resource_id = lcore_id;
 	}
 
-	ret = query_freq_list(&pkt, lcore_id);
+	ret = query_data(&pkt, lcore_id);
 	if (ret < 0) {
 		cmdline_printf(cl, "Error during sending frequency list query.\n");
 		return;
@@ -248,6 +248,120 @@  cmdline_parse_inst_t cmd_query_freq_list = {
 	},
 };
 
+struct cmd_query_caps_result {
+	cmdline_fixed_string_t query_caps;
+	cmdline_fixed_string_t cpu_num;
+};
+
+static int
+receive_capabilities(struct channel_packet_caps_list *pkt_caps_list,
+		unsigned int lcore_id)
+{
+	int ret;
+
+	ret = rte_power_guest_channel_receive_msg(pkt_caps_list,
+		sizeof(struct channel_packet_caps_list),
+		lcore_id);
+	if (ret < 0) {
+		RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n");
+		return -1;
+	}
+	if (pkt_caps_list->command != CPU_POWER_CAPS_LIST) {
+		RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static void
+cmd_query_caps_list_parsed(void *parsed_result,
+		__rte_unused struct cmdline *cl,
+		__rte_unused void *data)
+{
+	struct cmd_query_caps_result *res = parsed_result;
+	unsigned int lcore_id;
+	struct channel_packet_caps_list pkt_caps_list;
+	struct channel_packet pkt;
+	bool query_list = false;
+	int ret;
+	char *ep;
+
+	memset(&pkt, 0, sizeof(struct channel_packet));
+	memset(&pkt_caps_list, 0, sizeof(struct channel_packet_caps_list));
+
+	if (!strcmp(res->cpu_num, "all")) {
+
+		/* Get first enabled lcore. */
+		lcore_id = rte_get_next_lcore(-1,
+				0,
+				0);
+		if (lcore_id == RTE_MAX_LCORE) {
+			cmdline_printf(cl, "Enabled core not found.\n");
+			return;
+		}
+
+		pkt.command = CPU_POWER_QUERY_CAPS_LIST;
+		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
+		query_list = true;
+	} else {
+		errno = 0;
+		lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10);
+		if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM ||
+			ep == res->cpu_num) {
+			cmdline_printf(cl, "Invalid parameter provided.\n");
+			return;
+		}
+		pkt.command = CPU_POWER_QUERY_CAPS;
+		strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
+		pkt.resource_id = lcore_id;
+	}
+
+	ret = query_data(&pkt, lcore_id);
+	if (ret < 0) {
+		cmdline_printf(cl, "Error during sending capabilities query.\n");
+		return;
+	}
+
+	ret = receive_capabilities(&pkt_caps_list, lcore_id);
+	if (ret < 0) {
+		cmdline_printf(cl, "Error during capabilities reception.\n");
+		return;
+	}
+	if (query_list) {
+		unsigned int i;
+		for (i = 0; i < pkt_caps_list.num_vcpu; ++i)
+			cmdline_printf(cl, "Capabilities of [%d] vcore are:"
+					" turbo possibility: %ld, is priority core: %ld.\n",
+					i,
+					pkt_caps_list.turbo[i],
+					pkt_caps_list.priority[i]);
+	} else {
+		cmdline_printf(cl, "Capabilities of [%d] vcore are:"
+				" turbo possibility: %ld, is priority core: %ld.\n",
+				lcore_id,
+				pkt_caps_list.turbo[lcore_id],
+				pkt_caps_list.priority[lcore_id]);
+	}
+}
+
+cmdline_parse_token_string_t cmd_query_caps_token =
+	TOKEN_STRING_INITIALIZER(struct cmd_query_caps_result, query_caps, "query_cpu_caps");
+cmdline_parse_token_string_t cmd_query_caps_cpu_num_token =
+	TOKEN_STRING_INITIALIZER(struct cmd_query_caps_result, cpu_num, NULL);
+
+cmdline_parse_inst_t cmd_query_caps_list = {
+	.f = cmd_query_caps_list_parsed,  /* function to call */
+	.data = NULL,      /* 2nd arg of func */
+	.help_str = "query_cpu_caps <core_num>|all, request"
+				" information regarding virtual core capabilities."
+				" The keyword 'all' will query list of all vcores for the VM",
+	.tokens = {        /* token list, NULL terminated */
+		(void *)&cmd_query_caps_token,
+		(void *)&cmd_query_caps_cpu_num_token,
+		NULL,
+	},
+};
+
 static int
 check_response_cmd(unsigned int lcore_id, int *result)
 {
@@ -408,6 +522,7 @@  cmdline_parse_ctx_t main_ctx[] = {
 		(cmdline_parse_inst_t *)&cmd_send_policy_set,
 		(cmdline_parse_inst_t *)&cmd_set_cpu_freq_set,
 		(cmdline_parse_inst_t *)&cmd_query_freq_list,
+		(cmdline_parse_inst_t *)&cmd_query_caps_list,
 		NULL,
 };