From patchwork Mon Dec 12 02:49:43 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 120734 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76AC9A04FD; Mon, 12 Dec 2022 03:57:01 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 70AB940684; Mon, 12 Dec 2022 03:57:01 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 2B50040151 for ; Mon, 12 Dec 2022 03:56:59 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1670813819; x=1702349819; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=pKm01Ws7aLHBFSgyZu1caN7QY9Ru9QfySACiOb+yuzQ=; b=ILZCqaEemmKhEqAudzDfUf09vIWsAbX8cWL7r0jiFk0viYPMcCdK9wMn Wft/o1EjDHfbGoaYhzfWMXzEUXGLqT2p9OSYTEpma0qflJ/WfV/3/wL/Q A9e8QIpq3RVZRQqMUA8NyiHApVsCcs/teOVwgWj6cmZ93tuNGFqtALBsI XmS+VbGrhTdcWQQpgM35rdzhfpOrsyi4/a7xUR5quF8Zh1/e953RIunCi Ighn7LwRqxg4mICGJDfDC/l9qzxbQxWMctm/tNvXlMzY24+pX/p1qU6XE Vn3iMwjU1bFup5RS4QhlaDFxgXioucr7UBDMjG3vN+ygkaN7IFG9qIY3/ g==; X-IronPort-AV: E=McAfee;i="6500,9779,10558"; a="316449039" X-IronPort-AV: E=Sophos;i="5.96,237,1665471600"; d="scan'208";a="316449039" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Dec 2022 18:56:58 -0800 X-IronPort-AV: E=McAfee;i="6500,9779,10558"; a="641597463" X-IronPort-AV: E=Sophos;i="5.96,237,1665471600"; d="scan'208";a="641597463" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Dec 2022 18:56:56 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V3 1/2] framework/qemu_kvm: support pin VM's thread to vhost CPU lcore Date: Mon, 12 Dec 2022 10:49:43 +0800 Message-Id: <20221212024943.2776599-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org 1)Support pin VM's thread to vhost CPU lcore. 2)Fix add_vm_daemon method issue. Signed-off-by: Wei Ling --- framework/qemu_kvm.py | 50 ++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/framework/qemu_kvm.py b/framework/qemu_kvm.py index 20aa8008..0efe286c 100644 --- a/framework/qemu_kvm.py +++ b/framework/qemu_kvm.py @@ -1241,7 +1241,7 @@ class QEMUKvm(VirtBase): By default VM will start with the daemonize status. Not support starting it on the stdin now. """ - if "daemon" in list(options.keys()) and options["enable"] == "no": + if "enable" in list(options.keys()) and options["enable"] == "no": pass else: daemon_boot_line = "-daemonize" @@ -1365,7 +1365,7 @@ class QEMUKvm(VirtBase): self.migrate_port = int(m.group(1)) - def _start_vm(self): + def _start_vm(self, pin_threads=True): """ Start VM. """ @@ -1377,6 +1377,11 @@ class QEMUKvm(VirtBase): self.__get_pci_mapping() + # pin VM threads with host CPU cores + if pin_threads: + lcores = self.vcpus_pinned_to_vm.split(" ") + self.pin_threads(lcores=lcores) + # query status self.update_status() @@ -1385,7 +1390,7 @@ class QEMUKvm(VirtBase): # when vm is waiting for migration, can't ping if self.vm_status is not ST_PAUSE: - self.__wait_vm_ready() + self.__wait_vm_ready(pin_threads=pin_threads) self.__wait_vmnet_ready() @@ -1445,7 +1450,7 @@ class QEMUKvm(VirtBase): return logged_in - def __wait_vm_ready(self): + def __wait_vm_ready(self, pin_threads=True): logged_in = self.__ping_vm() if not logged_in: if not self.restarted: @@ -1454,7 +1459,7 @@ class QEMUKvm(VirtBase): self.vm_status = ST_NOTSTART self._stop_vm() self.restarted = True - self._start_vm() + self._start_vm(pin_threads=pin_threads) else: raise StartVMFailedException( "Not response in %d seconds!!!" % self.START_TIMEOUT @@ -2003,14 +2008,33 @@ class QEMUKvm(VirtBase): def pin_threads(self, lcores): """ - Pin thread to assigned cores + Pin thread to assigned cores. + If threads <= lcores, like: threads=[427756, 427757], lcores=[48, 49, 50]: + taskset -pc 48 427756 + taskset -pc 49 427757 + + If threads > lcores, like threads=[427756, 427757, 427758, 427759, 427760], lcores=[48,49,50] + taskset -pc 48 427756 + taskset -pc 49 427757 + taskset -pc 50 427758 + taskset -pc 48 427759 + taskset -pc 49 427760 """ - thread_reg = r"CPU #(\d+): .* thread_id=(\d+)" + thread_reg = r"CPU #\d+: thread_id=(\d+)" output = self.__monitor_session("info", "cpus") - thread_cores = re.findall(thread_reg, output) - cores_map = list(zip(thread_cores, lcores)) - for thread_info, core_id in cores_map: - cpu_id, thread_id = thread_info - self.host_session.send_expect( - "taskset -pc %d %s" % (core_id, thread_id), "#" + threads = re.findall(thread_reg, output) + if len(threads) <= len(lcores): + map = list(zip(threads, lcores)) + else: + self.host_logger.warning( + "lcores is less than threads, please check the 'number' and 'cpupin' config." ) + lcore_len = len(lcores) + for item in threads: + thread_idx = threads.index(item) + if thread_idx >= lcore_len: + lcore_idx = thread_idx % lcore_len + lcores.append(lcores[lcore_idx]) + map = list(zip(threads, lcores)) + for thread, lcore in map: + self.host_session.send_expect("taskset -pc %s %s" % (lcore, thread), "#")