From patchwork Mon Nov 14 16:54:29 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119840 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 71094A00C4; Mon, 14 Nov 2022 17:54:51 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8BA694114B; Mon, 14 Nov 2022 17:54:45 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 77A694014F for ; Mon, 14 Nov 2022 17:54:44 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 5C582243CEB; Mon, 14 Nov 2022 17:54:43 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id g_1JwBfZjsR9; Mon, 14 Nov 2022 17:54:40 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id A05B81703B8; Mon, 14 Nov 2022 17:54:39 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 01/10] dts: add node and os abstractions Date: Mon, 14 Nov 2022 16:54:29 +0000 Message-Id: <20221114165438.1133783-2-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The abstraction model in DTS is as follows: Node, defining and implementing methods common to and the base of SUT (system under test) Node and TG (traffic generator) Node. Remote Session, defining and implementing methods common to any remote session implementation, such as SSH Session. OSSession, defining and implementing methods common to any operating system/distribution, such as Linux. OSSession uses a derived Remote Session and Node in turn uses a derived OSSession. This split delegates OS-specific and connection-specific code to specialized classes designed to handle the differences. The base classes implement the methods or parts of methods that are common to all implementations and defines abstract methods that must be implemented by derived classes. Part of the abstractions is the DTS test execution skeleton: node init, execution setup, build setup and then test execution. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 8 +- dts/framework/config/__init__.py | 70 +++++++++- dts/framework/config/conf_yaml_schema.json | 66 +++++++++- dts/framework/dts.py | 116 +++++++++++++---- dts/framework/exception.py | 87 ++++++++++++- dts/framework/remote_session/__init__.py | 18 +-- dts/framework/remote_session/factory.py | 14 ++ dts/framework/remote_session/os/__init__.py | 17 +++ .../remote_session/os/linux_session.py | 11 ++ dts/framework/remote_session/os/os_session.py | 46 +++++++ .../remote_session/os/posix_session.py | 12 ++ .../remote_session/remote_session.py | 23 +++- dts/framework/remote_session/ssh_session.py | 2 +- dts/framework/testbed_model/__init__.py | 6 +- dts/framework/testbed_model/node.py | 62 --------- dts/framework/testbed_model/node/__init__.py | 7 + dts/framework/testbed_model/node/node.py | 120 ++++++++++++++++++ dts/framework/testbed_model/node/sut_node.py | 13 ++ 18 files changed, 591 insertions(+), 107 deletions(-) create mode 100644 dts/framework/remote_session/factory.py create mode 100644 dts/framework/remote_session/os/__init__.py create mode 100644 dts/framework/remote_session/os/linux_session.py create mode 100644 dts/framework/remote_session/os/os_session.py create mode 100644 dts/framework/remote_session/os/posix_session.py delete mode 100644 dts/framework/testbed_model/node.py create mode 100644 dts/framework/testbed_model/node/__init__.py create mode 100644 dts/framework/testbed_model/node/node.py create mode 100644 dts/framework/testbed_model/node/sut_node.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 1aaa593612..6b0bc5c2bf 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -2,8 +2,14 @@ # Copyright 2022 The DPDK contributors executions: - - system_under_test: "SUT 1" + - build_targets: + - arch: x86_64 + os: linux + cpu: native + compiler: gcc + system_under_test: "SUT 1" nodes: - name: "SUT 1" hostname: sut1.change.me.localhost user: root + os: linux diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 214be8e7f4..1b97dc3ab9 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -3,13 +3,14 @@ # Copyright(c) 2022 University of New Hampshire """ -Generic port and topology nodes configuration file load function +Yaml config parsing methods """ import json import os.path import pathlib from dataclasses import dataclass +from enum import Enum, auto, unique from typing import Any import warlock # type: ignore @@ -18,6 +19,47 @@ from framework.settings import SETTINGS +class StrEnum(Enum): + @staticmethod + def _generate_next_value_( + name: str, start: int, count: int, last_values: object + ) -> str: + return name + + +@unique +class Architecture(StrEnum): + i686 = auto() + x86_64 = auto() + x86_32 = auto() + arm64 = auto() + ppc64le = auto() + + +@unique +class OS(StrEnum): + linux = auto() + freebsd = auto() + windows = auto() + + +@unique +class CPUType(StrEnum): + native = auto() + armv8a = auto() + dpaa2 = auto() + thunderx = auto() + xgene1 = auto() + + +@unique +class Compiler(StrEnum): + gcc = auto() + clang = auto() + icc = auto() + msvc = auto() + + # Slots enables some optimizations, by pre-allocating space for the defined # attributes in the underlying data structure. # @@ -29,6 +71,7 @@ class NodeConfiguration: hostname: str user: str password: str | None + os: OS @staticmethod def from_dict(d: dict) -> "NodeConfiguration": @@ -37,19 +80,44 @@ def from_dict(d: dict) -> "NodeConfiguration": hostname=d["hostname"], user=d["user"], password=d.get("password"), + os=OS(d["os"]), + ) + + +@dataclass(slots=True, frozen=True) +class BuildTargetConfiguration: + arch: Architecture + os: OS + cpu: CPUType + compiler: Compiler + name: str + + @staticmethod + def from_dict(d: dict) -> "BuildTargetConfiguration": + return BuildTargetConfiguration( + arch=Architecture(d["arch"]), + os=OS(d["os"]), + cpu=CPUType(d["cpu"]), + compiler=Compiler(d["compiler"]), + name=f"{d['arch']}-{d['os']}-{d['cpu']}-{d['compiler']}", ) @dataclass(slots=True, frozen=True) class ExecutionConfiguration: + build_targets: list[BuildTargetConfiguration] system_under_test: NodeConfiguration @staticmethod def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": + build_targets: list[BuildTargetConfiguration] = list( + map(BuildTargetConfiguration.from_dict, d["build_targets"]) + ) sut_name = d["system_under_test"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" return ExecutionConfiguration( + build_targets=build_targets, system_under_test=node_map[sut_name], ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 6b8d6ccd05..409ce7ac74 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -5,6 +5,58 @@ "node_name": { "type": "string", "description": "A unique identifier for a node" + }, + "OS": { + "type": "string", + "enum": [ + "linux" + ] + }, + "cpu": { + "type": "string", + "description": "Native should be the default on x86", + "enum": [ + "native", + "armv8a", + "dpaa2", + "thunderx", + "xgene1" + ] + }, + "compiler": { + "type": "string", + "enum": [ + "gcc", + "clang", + "icc", + "mscv" + ] + }, + "build_target": { + "type": "object", + "description": "Targets supported by DTS", + "properties": { + "arch": { + "type": "string", + "enum": [ + "ALL", + "x86_64", + "arm64", + "ppc64le", + "other" + ] + }, + "os": { + "$ref": "#/definitions/OS" + }, + "cpu": { + "$ref": "#/definitions/cpu" + }, + "compiler": { + "$ref": "#/definitions/compiler" + } + }, + "additionalProperties": false } }, "type": "object", @@ -29,13 +81,17 @@ "password": { "type": "string", "description": "The password to use on this node. Use only as a last resort. SSH keys are STRONGLY preferred." + }, + "os": { + "$ref": "#/definitions/OS" } }, "additionalProperties": false, "required": [ "name", "hostname", - "user" + "user", + "os" ] }, "minimum": 1 @@ -45,12 +101,20 @@ "items": { "type": "object", "properties": { + "build_targets": { + "type": "array", + "items": { + "$ref": "#/definitions/build_target" + }, + "minimum": 1 + }, "system_under_test": { "$ref": "#/definitions/node_name" } }, "additionalProperties": false, "required": [ + "build_targets", "system_under_test" ] }, diff --git a/dts/framework/dts.py b/dts/framework/dts.py index d23cfc4526..262c392d8e 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -3,32 +3,38 @@ # Copyright(c) 2022 PANTHEON.tech s.r.o. # Copyright(c) 2022 University of New Hampshire +import os import sys import traceback from collections.abc import Iterable -from framework.testbed_model.node import Node +from framework.testbed_model import Node, SutNode -from .config import CONFIGURATION +from .config import CONFIGURATION, BuildTargetConfiguration, ExecutionConfiguration +from .exception import DTSError, ReturnCode from .logger import DTSLOG, getLogger +from .settings import SETTINGS from .utils import check_dts_python_version -dts_logger: DTSLOG | None = None +dts_logger: DTSLOG = getLogger("dts") def run_all() -> None: """ - Main process of DTS, it will run all test suites in the config file. + The main process of DTS. Runs all build targets in all executions from the main + config file. """ - + return_code = ReturnCode.NO_ERR global dts_logger # check the python version of the server that run dts check_dts_python_version() - dts_logger = getLogger("dts") + # prepare the output folder + if not os.path.exists(SETTINGS.output_dir): + os.mkdir(SETTINGS.output_dir) - nodes = {} + nodes: dict[str, Node] = {} # This try/finally block means "Run the try block, if there is an exception, # run the finally block before passing it upward. If there is not an exception, # run the finally block after the try block is finished." This helps avoid the @@ -38,30 +44,92 @@ def run_all() -> None: try: # for all Execution sections for execution in CONFIGURATION.executions: - sut_config = execution.system_under_test - if sut_config.name not in nodes: - node = Node(sut_config) - nodes[sut_config.name] = node - node.send_command("echo Hello World") - - except Exception as e: - # sys.exit() doesn't produce a stack trace, need to print it explicitly - traceback.print_exc() + sut_node = init_nodes(execution, nodes) + run_execution(sut_node, execution) + + except DTSError as e: + dts_logger.error(traceback.format_exc()) + return_code = e.return_code raise e + except Exception: + # sys.exit() doesn't produce a stack trace, need to produce it explicitly + dts_logger.error(traceback.format_exc()) + return_code = ReturnCode.GENERIC_ERR + raise + + finally: + quit_execution(nodes.values(), return_code) + + +def init_nodes( + execution: ExecutionConfiguration, existing_nodes: dict[str, Node] +) -> SutNode: + """ + Create DTS SUT instance used in the given execution and initialize it. If already + initialized (in a previous execution), return the existing SUT. + """ + if execution.system_under_test.name in existing_nodes: + # a Node with the same name already exists + sut_node = existing_nodes[execution.system_under_test.name] + else: + # the SUT has not been initialized yet + sut_node = SutNode(execution.system_under_test) + existing_nodes[sut_node.name] = sut_node + + return sut_node + + +def run_execution(sut_node: SutNode, execution: ExecutionConfiguration) -> None: + """ + Run the given execution. This involves running the execution setup as well as + running all build targets in the given execution. + """ + dts_logger.info(f"Running execution with SUT '{execution.system_under_test.name}'.") + try: + sut_node.setup_execution(execution) + for build_target in execution.build_targets: + run_build_target(sut_node, build_target, execution) + finally: - quit_execution(nodes.values()) + sut_node.cleanup_execution() + + +def run_build_target( + sut_node: SutNode, + build_target: BuildTargetConfiguration, + execution: ExecutionConfiguration, +) -> None: + """ + Run the given build target. + """ + dts_logger.info(f"Running target '{build_target.name}'.") + try: + sut_node.setup_build_target(build_target) + run_suite(sut_node, build_target, execution) + + finally: + sut_node.teardown_build_target() + + +def run_suite( + sut_node: SutNode, + build_target: BuildTargetConfiguration, + execution: ExecutionConfiguration, +) -> None: + """ + Use the given build_target to run the test suite with possibly only a subset + of tests. If no subset is specified, run all tests. + """ -def quit_execution(sut_nodes: Iterable[Node]) -> None: +def quit_execution(nodes: Iterable[Node], return_code: ReturnCode) -> None: """ - Close session to SUT and TG before quit. - Return exit status when failure occurred. + Close all node resources before quitting. """ - for sut_node in sut_nodes: - # close all session - sut_node.node_exit() + for node in nodes: + node.close() if dts_logger is not None: dts_logger.info("DTS execution has ended.") - sys.exit(0) + sys.exit(return_code) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 8b2f08a8f0..cac8d84416 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -7,14 +7,45 @@ User-defined exceptions used across the framework. """ +from enum import IntEnum, unique +from typing import Callable, ClassVar -class SSHTimeoutError(Exception): + +@unique +class ReturnCode(IntEnum): + """ + The various return codes that DTS exists with. + There are four categories of return codes: + 0-9 DTS Framework errors + 10-19 DPDK/Traffic Generator errors + 20-29 Node errors + 30-39 Test errors + """ + + NO_ERR = 0 + GENERIC_ERR = 1 + SSH_ERR = 2 + NODE_SETUP_ERR = 20 + NODE_CLEANUP_ERR = 21 + + +class DTSError(Exception): + """ + The base exception from which all DTS exceptions are derived. Servers to hold + the return code with which DTS should exit. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.GENERIC_ERR + + +class SSHTimeoutError(DTSError): """ Command execution timeout. """ command: str output: str + return_code: ClassVar[ReturnCode] = ReturnCode.SSH_ERR def __init__(self, command: str, output: str): self.command = command @@ -27,12 +58,13 @@ def get_output(self) -> str: return self.output -class SSHConnectionError(Exception): +class SSHConnectionError(DTSError): """ SSH connection error. """ host: str + return_code: ClassVar[ReturnCode] = ReturnCode.SSH_ERR def __init__(self, host: str): self.host = host @@ -41,16 +73,65 @@ def __str__(self) -> str: return f"Error trying to connect with {self.host}" -class SSHSessionDeadError(Exception): +class SSHSessionDeadError(DTSError): """ SSH session is not alive. It can no longer be used. """ host: str + return_code: ClassVar[ReturnCode] = ReturnCode.SSH_ERR def __init__(self, host: str): self.host = host def __str__(self) -> str: return f"SSH session with {self.host} has died" + + +class NodeSetupError(DTSError): + """ + Raised when setting up a node. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.NODE_SETUP_ERR + + def __init__(self): + super(NodeSetupError, self).__init__( + "An error occurred during node execution setup." + ) + + +class NodeCleanupError(DTSError): + """ + Raised when cleaning up node. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.NODE_CLEANUP_ERR + + def __init__(self): + super(NodeCleanupError, self).__init__( + "An error occurred during node execution cleanup." + ) + + +def convert_exception(exception: type[DTSError]) -> Callable[..., Callable[..., None]]: + """ + When a non-DTS exception is raised while executing the decorated function, + convert it to the supplied exception. + """ + + def convert_exception_wrapper(func) -> Callable[..., None]: + def convert(*args, **kwargs) -> None: + try: + func(*args, **kwargs) + + except DTSError: + raise + + except Exception as e: + raise exception() from e + + return convert + + return convert_exception_wrapper diff --git a/dts/framework/remote_session/__init__.py b/dts/framework/remote_session/__init__.py index a227d8db22..f2339b20bd 100644 --- a/dts/framework/remote_session/__init__.py +++ b/dts/framework/remote_session/__init__.py @@ -1,14 +1,14 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 PANTHEON.tech s.r.o. -from framework.config import NodeConfiguration -from framework.logger import DTSLOG +""" +The package provides modules for managing remote connections to a remote host (node), +differentiated by OS. +The package provides a factory function, create_session, that returns the appropriate +remote connection based on the passed configuration. The differences are in the +underlying transport protocol (e.g. SSH) and remote OS (e.g. Linux). +""" -from .remote_session import RemoteSession -from .ssh_session import SSHSession +# pylama:ignore=W0611 - -def create_remote_session( - node_config: NodeConfiguration, name: str, logger: DTSLOG -) -> RemoteSession: - return SSHSession(node_config, name, logger) +from .os import OSSession, create_session diff --git a/dts/framework/remote_session/factory.py b/dts/framework/remote_session/factory.py new file mode 100644 index 0000000000..a227d8db22 --- /dev/null +++ b/dts/framework/remote_session/factory.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +from framework.config import NodeConfiguration +from framework.logger import DTSLOG + +from .remote_session import RemoteSession +from .ssh_session import SSHSession + + +def create_remote_session( + node_config: NodeConfiguration, name: str, logger: DTSLOG +) -> RemoteSession: + return SSHSession(node_config, name, logger) diff --git a/dts/framework/remote_session/os/__init__.py b/dts/framework/remote_session/os/__init__.py new file mode 100644 index 0000000000..9d2ec7fca2 --- /dev/null +++ b/dts/framework/remote_session/os/__init__.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2022 University of New Hampshire + +from framework.config import OS, NodeConfiguration +from framework.logger import DTSLOG + +from .linux_session import LinuxSession +from .os_session import OSSession + + +def create_session( + node_config: NodeConfiguration, name: str, logger: DTSLOG +) -> OSSession: + match node_config.os: + case OS.linux: + return LinuxSession(node_config, name, logger) diff --git a/dts/framework/remote_session/os/linux_session.py b/dts/framework/remote_session/os/linux_session.py new file mode 100644 index 0000000000..39e80631dd --- /dev/null +++ b/dts/framework/remote_session/os/linux_session.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2022 University of New Hampshire + +from .posix_session import PosixSession + + +class LinuxSession(PosixSession): + """ + The implementation of non-Posix compliant parts of Linux remote sessions. + """ diff --git a/dts/framework/remote_session/os/os_session.py b/dts/framework/remote_session/os/os_session.py new file mode 100644 index 0000000000..2a72082628 --- /dev/null +++ b/dts/framework/remote_session/os/os_session.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2022 University of New Hampshire + +from abc import ABC + +from framework.config import NodeConfiguration +from framework.logger import DTSLOG +from framework.remote_session.factory import create_remote_session +from framework.remote_session.remote_session import RemoteSession + + +class OSSession(ABC): + """ + The OS classes create a DTS node remote session and implement OS specific + behavior. There a few control methods implemented by the base class, the rest need + to be implemented by derived classes. + """ + + _config: NodeConfiguration + name: str + logger: DTSLOG + remote_session: RemoteSession + + def __init__( + self, + node_config: NodeConfiguration, + name: str, + logger: DTSLOG, + ) -> None: + self._config = node_config + self.name = name + self.logger = logger + self.remote_session = create_remote_session(node_config, name, logger) + + def close(self, force: bool = False) -> None: + """ + Close the remote session. + """ + self.remote_session.close(force) + + def is_alive(self) -> bool: + """ + Check whether the remote session is still responding. + """ + return self.remote_session.is_alive() diff --git a/dts/framework/remote_session/os/posix_session.py b/dts/framework/remote_session/os/posix_session.py new file mode 100644 index 0000000000..9622a4ea30 --- /dev/null +++ b/dts/framework/remote_session/os/posix_session.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2022 University of New Hampshire + +from .os_session import OSSession + + +class PosixSession(OSSession): + """ + An intermediary class implementing the Posix compliant parts of + Linux and other OS remote sessions. + """ diff --git a/dts/framework/remote_session/remote_session.py b/dts/framework/remote_session/remote_session.py index 33047d9d0a..4095e02c1b 100644 --- a/dts/framework/remote_session/remote_session.py +++ b/dts/framework/remote_session/remote_session.py @@ -19,6 +19,15 @@ class HistoryRecord: class RemoteSession(ABC): + """ + The base class for defining which methods must be implemented in order to connect + to a remote host (node) and maintain a remote session. The derived classes are + supposed to implement/use some underlying transport protocol (e.g. SSH) to + implement the methods. On top of that, it provides some basic services common to + all derived classes, such as keeping history and logging what's being executed + on the remote node. + """ + name: str hostname: str ip: str @@ -58,9 +67,11 @@ def _connect(self) -> None: """ Create connection to assigned node. """ - pass def send_command(self, command: str, timeout: float = SETTINGS.timeout) -> str: + """ + Send a command and return the output. + """ self.logger.info(f"Sending: {command}") out = self._send_command(command, timeout) self.logger.debug(f"Received from {command}: {out}") @@ -70,7 +81,8 @@ def send_command(self, command: str, timeout: float = SETTINGS.timeout) -> str: @abstractmethod def _send_command(self, command: str, timeout: float) -> str: """ - Send a command and return the output. + Use the underlying protocol to execute the command and return the output + of the command. """ def _history_add(self, command: str, output: str) -> None: @@ -79,17 +91,20 @@ def _history_add(self, command: str, output: str) -> None: ) def close(self, force: bool = False) -> None: + """ + Close the remote session and free all used resources. + """ self.logger.logger_exit() self._close(force) @abstractmethod def _close(self, force: bool = False) -> None: """ - Close the remote session, freeing all used resources. + Execute protocol specific steps needed to close the session properly. """ @abstractmethod def is_alive(self) -> bool: """ - Check whether the session is still responding. + Check whether the remote session is still responding. """ diff --git a/dts/framework/remote_session/ssh_session.py b/dts/framework/remote_session/ssh_session.py index 7ec327054d..5816b1ce6b 100644 --- a/dts/framework/remote_session/ssh_session.py +++ b/dts/framework/remote_session/ssh_session.py @@ -17,7 +17,7 @@ class SSHSession(RemoteSession): """ - Module for creating Pexpect SSH sessions to a node. + Module for creating Pexpect SSH remote sessions. """ session: pxssh.pxssh diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index c5512e5812..13c29c59c8 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -2,6 +2,10 @@ # Copyright(c) 2022 University of New Hampshire """ -This module contains the classes used to model the physical traffic generator, +This package contains the classes used to model the physical traffic generator, system under test and any other components that need to be interacted with. """ + +# pylama:ignore=W0611 + +from .node import Node, SutNode diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py deleted file mode 100644 index 8437975416..0000000000 --- a/dts/framework/testbed_model/node.py +++ /dev/null @@ -1,62 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire - -""" -A node is a generic host that DTS connects to and manages. -""" - -from framework.config import NodeConfiguration -from framework.logger import DTSLOG, getLogger -from framework.remote_session import RemoteSession, create_remote_session -from framework.settings import SETTINGS - - -class Node(object): - """ - Basic module for node management. This module implements methods that - manage a node, such as information gathering (of CPU/PCI/NIC) and - environment setup. - """ - - name: str - main_session: RemoteSession - logger: DTSLOG - _config: NodeConfiguration - _other_sessions: list[RemoteSession] - - def __init__(self, node_config: NodeConfiguration): - self._config = node_config - self._other_sessions = [] - - self.name = node_config.name - self.logger = getLogger(self.name) - self.logger.info(f"Created node: {self.name}") - self.main_session = create_remote_session(self._config, self.name, self.logger) - - def send_command(self, cmds: str, timeout: float = SETTINGS.timeout) -> str: - """ - Send commands to node and return string before timeout. - """ - - return self.main_session.send_command(cmds, timeout) - - def create_session(self, name: str) -> RemoteSession: - connection = create_remote_session( - self._config, - name, - getLogger(name, node=self.name), - ) - self._other_sessions.append(connection) - return connection - - def node_exit(self) -> None: - """ - Recover all resource before node exit - """ - if self.main_session: - self.main_session.close() - for session in self._other_sessions: - session.close() - self.logger.logger_exit() diff --git a/dts/framework/testbed_model/node/__init__.py b/dts/framework/testbed_model/node/__init__.py new file mode 100644 index 0000000000..a179056f1f --- /dev/null +++ b/dts/framework/testbed_model/node/__init__.py @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +# pylama:ignore=W0611 + +from .node import Node +from .sut_node import SutNode diff --git a/dts/framework/testbed_model/node/node.py b/dts/framework/testbed_model/node/node.py new file mode 100644 index 0000000000..86654e55ae --- /dev/null +++ b/dts/framework/testbed_model/node/node.py @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2022 University of New Hampshire + +""" +A node is a generic host that DTS connects to and manages. +""" + +from framework.config import ( + BuildTargetConfiguration, + ExecutionConfiguration, + NodeConfiguration, +) +from framework.exception import NodeCleanupError, NodeSetupError, convert_exception +from framework.logger import DTSLOG, getLogger +from framework.remote_session import OSSession, create_session + + +class Node(object): + """ + Basic class for node management. This class implements methods that + manage a node, such as information gathering (of CPU/PCI/NIC) and + environment setup. + """ + + name: str + main_session: OSSession + logger: DTSLOG + config: NodeConfiguration + _other_sessions: list[OSSession] + + def __init__(self, node_config: NodeConfiguration): + self.config = node_config + self._other_sessions = [] + + self.name = node_config.name + self.logger = getLogger(self.name) + self.logger.info(f"Created node: {self.name}") + self.main_session = create_session(self.config, self.name, self.logger) + + @convert_exception(NodeSetupError) + def setup_execution(self, execution_config: ExecutionConfiguration) -> None: + """ + Perform the execution setup that will be done for each execution + this node is part of. + """ + self._setup_execution(execution_config) + + def _setup_execution(self, execution_config: ExecutionConfiguration) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + @convert_exception(NodeSetupError) + def setup_build_target(self, build_target_config: BuildTargetConfiguration) -> None: + """ + Perform the build target setup that will be done for each build target + tested on this node. + """ + self._setup_build_target(build_target_config) + + def _setup_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + @convert_exception(NodeCleanupError) + def teardown_build_target(self) -> None: + """ + Perform the build target cleanup that will be done after each build target + tested on this node. + """ + self._cleanup_build_target() + + def _cleanup_build_target(self) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + @convert_exception(NodeCleanupError) + def cleanup_execution(self) -> None: + """ + Perform the execution cleanup that will be done after each execution + this node is part of concludes. + """ + self._cleanup_execution() + + def _cleanup_execution(self) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + def create_session(self, name: str) -> OSSession: + """ + Create and return a new OSSession tailored to the remote OS. + """ + connection = create_session( + self.config, + name, + getLogger(name, node=self.name), + ) + self._other_sessions.append(connection) + return connection + + def close(self) -> None: + """ + Close all connections and free other resources. + """ + if self.main_session: + self.main_session.close() + for session in self._other_sessions: + session.close() + self.logger.logger_exit() diff --git a/dts/framework/testbed_model/node/sut_node.py b/dts/framework/testbed_model/node/sut_node.py new file mode 100644 index 0000000000..79d54585c9 --- /dev/null +++ b/dts/framework/testbed_model/node/sut_node.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +from .node import Node + + +class SutNode(Node): + """ + A class for managing connections to the System under Test, providing + methods that retrieve the necessary information about the node (such as + cpu, memory and NIC details) and configuration capabilities. + """ From patchwork Mon Nov 14 16:54:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119841 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AAD5FA00C4; Mon, 14 Nov 2022 17:55:01 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2418942D21; Mon, 14 Nov 2022 17:54:47 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 7714F410F2 for ; Mon, 14 Nov 2022 17:54:45 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 27CA3247934; Mon, 14 Nov 2022 17:54:43 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id QNUJ6HNesYpi; Mon, 14 Nov 2022 17:54:42 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 3081D21C5D3; Mon, 14 Nov 2022 17:54:40 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 02/10] dts: add ssh command verification Date: Mon, 14 Nov 2022 16:54:30 +0000 Message-Id: <20221114165438.1133783-3-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This is a basic capability needed to check whether the command execution was successful or not. If not, raise a RemoteCommandExecutionError. When a failure is expected, the caller is supposed to catch the exception. Signed-off-by: Juraj Linkeš --- dts/framework/exception.py | 21 +++++++ .../remote_session/remote_session.py | 55 +++++++++++++------ dts/framework/remote_session/ssh_session.py | 11 +++- 3 files changed, 67 insertions(+), 20 deletions(-) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index cac8d84416..b282e48198 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -25,6 +25,7 @@ class ReturnCode(IntEnum): NO_ERR = 0 GENERIC_ERR = 1 SSH_ERR = 2 + REMOTE_CMD_EXEC_ERR = 3 NODE_SETUP_ERR = 20 NODE_CLEANUP_ERR = 21 @@ -89,6 +90,26 @@ def __str__(self) -> str: return f"SSH session with {self.host} has died" +class RemoteCommandExecutionError(DTSError): + """ + Raised when a command executed on a Node returns a non-zero exit status. + """ + + command: str + command_return_code: int + return_code: ClassVar[ReturnCode] = ReturnCode.REMOTE_CMD_EXEC_ERR + + def __init__(self, command: str, command_return_code: int) -> None: + self.command = command + self.command_return_code = command_return_code + + def __str__(self) -> str: + return ( + f"Command {self.command} returned a non-zero exit code: " + f"{self.command_return_code}" + ) + + class NodeSetupError(DTSError): """ Raised when setting up a node. diff --git a/dts/framework/remote_session/remote_session.py b/dts/framework/remote_session/remote_session.py index 4095e02c1b..fccd80a529 100644 --- a/dts/framework/remote_session/remote_session.py +++ b/dts/framework/remote_session/remote_session.py @@ -7,15 +7,29 @@ from abc import ABC, abstractmethod from framework.config import NodeConfiguration +from framework.exception import RemoteCommandExecutionError from framework.logger import DTSLOG from framework.settings import SETTINGS @dataclasses.dataclass(slots=True, frozen=True) -class HistoryRecord: +class CommandResult: + """ + The result of remote execution of a command. + """ + name: str command: str - output: str | int + stdout: str + stderr: str + return_code: int + + def __str__(self) -> str: + return ( + f"stdout: '{self.stdout}'\n" + f"stderr: '{self.stderr}'\n" + f"return_code: '{self.return_code}'" + ) class RemoteSession(ABC): @@ -35,7 +49,7 @@ class RemoteSession(ABC): username: str password: str logger: DTSLOG - history: list[HistoryRecord] + history: list[CommandResult] _node_config: NodeConfiguration def __init__( @@ -68,28 +82,33 @@ def _connect(self) -> None: Create connection to assigned node. """ - def send_command(self, command: str, timeout: float = SETTINGS.timeout) -> str: + def send_command( + self, command: str, timeout: float = SETTINGS.timeout, verify: bool = False + ) -> CommandResult: """ - Send a command and return the output. + Send a command to the connected node and return CommandResult. + If verify is True, check the return code of the executed command + and raise a RemoteCommandExecutionError if the command failed. """ - self.logger.info(f"Sending: {command}") - out = self._send_command(command, timeout) - self.logger.debug(f"Received from {command}: {out}") - self._history_add(command=command, output=out) - return out + self.logger.info(f"Sending: '{command}'") + result = self._send_command(command, timeout) + if verify and result.return_code: + self.logger.debug( + f"Command '{command}' failed with return code '{result.return_code}'" + ) + self.logger.debug(f"stdout: '{result.stdout}'") + self.logger.debug(f"stderr: '{result.stderr}'") + raise RemoteCommandExecutionError(command, result.return_code) + self.logger.debug(f"Received from '{command}':\n{result}") + self.history.append(result) + return result @abstractmethod - def _send_command(self, command: str, timeout: float) -> str: + def _send_command(self, command: str, timeout: float) -> CommandResult: """ - Use the underlying protocol to execute the command and return the output - of the command. + Use the underlying protocol to execute the command and return CommandResult. """ - def _history_add(self, command: str, output: str) -> None: - self.history.append( - HistoryRecord(name=self.name, command=command, output=output) - ) - def close(self, force: bool = False) -> None: """ Close the remote session and free all used resources. diff --git a/dts/framework/remote_session/ssh_session.py b/dts/framework/remote_session/ssh_session.py index 5816b1ce6b..fb2f01dbc1 100644 --- a/dts/framework/remote_session/ssh_session.py +++ b/dts/framework/remote_session/ssh_session.py @@ -12,7 +12,7 @@ from framework.logger import DTSLOG from framework.utils import GREEN, RED -from .remote_session import RemoteSession +from .remote_session import CommandResult, RemoteSession class SSHSession(RemoteSession): @@ -163,7 +163,14 @@ def _flush(self) -> None: def is_alive(self) -> bool: return self.session.isalive() - def _send_command(self, command: str, timeout: float) -> str: + def _send_command(self, command: str, timeout: float) -> CommandResult: + output = self._send_command_get_output(command, timeout) + return_code = int(self._send_command_get_output("echo $?", timeout)) + + # we're capturing only stdout + return CommandResult(self.name, command, output, "", return_code) + + def _send_command_get_output(self, command: str, timeout: float) -> str: try: self._clean_session() self._send_line(command) From patchwork Mon Nov 14 16:54:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119842 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 96855A00C4; Mon, 14 Nov 2022 17:55:08 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 10FC242D0E; Mon, 14 Nov 2022 17:54:48 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id EAE2542D1D for ; Mon, 14 Nov 2022 17:54:46 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 4136D21C5D3; Mon, 14 Nov 2022 17:54:46 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id Z6jwhHhyXVIw; Mon, 14 Nov 2022 17:54:44 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 998AF21C5D7; Mon, 14 Nov 2022 17:54:40 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 03/10] dts: add dpdk build on sut Date: Mon, 14 Nov 2022 16:54:31 +0000 Message-Id: <20221114165438.1133783-4-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add the ability to build DPDK and apps, using a configured target. Signed-off-by: Juraj Linkeš --- dts/framework/exception.py | 17 +++ dts/framework/remote_session/os/os_session.py | 90 +++++++++++- .../remote_session/os/posix_session.py | 128 +++++++++++++++++ .../remote_session/remote_session.py | 34 ++++- dts/framework/remote_session/ssh_session.py | 64 ++++++++- dts/framework/settings.py | 40 +++++- dts/framework/testbed_model/node/sut_node.py | 131 ++++++++++++++++++ dts/framework/utils.py | 15 ++ 8 files changed, 505 insertions(+), 14 deletions(-) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index b282e48198..93d99432ae 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -26,6 +26,7 @@ class ReturnCode(IntEnum): GENERIC_ERR = 1 SSH_ERR = 2 REMOTE_CMD_EXEC_ERR = 3 + DPDK_BUILD_ERR = 10 NODE_SETUP_ERR = 20 NODE_CLEANUP_ERR = 21 @@ -110,6 +111,22 @@ def __str__(self) -> str: ) +class RemoteDirectoryExistsError(DTSError): + """ + Raised when a remote directory to be created already exists. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.REMOTE_CMD_EXEC_ERR + + +class DPDKBuildError(DTSError): + """ + Raised when DPDK build fails for any reason. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.DPDK_BUILD_ERR + + class NodeSetupError(DTSError): """ Raised when setting up a node. diff --git a/dts/framework/remote_session/os/os_session.py b/dts/framework/remote_session/os/os_session.py index 2a72082628..57e2865282 100644 --- a/dts/framework/remote_session/os/os_session.py +++ b/dts/framework/remote_session/os/os_session.py @@ -2,12 +2,15 @@ # Copyright(c) 2022 PANTHEON.tech s.r.o. # Copyright(c) 2022 University of New Hampshire -from abc import ABC +from abc import ABC, abstractmethod +from pathlib import PurePath -from framework.config import NodeConfiguration +from framework.config import Architecture, NodeConfiguration from framework.logger import DTSLOG from framework.remote_session.factory import create_remote_session from framework.remote_session.remote_session import RemoteSession +from framework.settings import SETTINGS +from framework.utils import EnvVarsDict class OSSession(ABC): @@ -44,3 +47,86 @@ def is_alive(self) -> bool: Check whether the remote session is still responding. """ return self.remote_session.is_alive() + + @abstractmethod + def guess_dpdk_remote_dir(self, remote_dir) -> PurePath: + """ + Try to find DPDK remote dir in remote_dir. + """ + + @abstractmethod + def get_remote_tmp_dir(self) -> PurePath: + """ + Get the path of the temporary directory of the remote OS. + """ + + @abstractmethod + def get_dpdk_build_env_vars(self, arch: Architecture) -> dict: + """ + Create extra environment variables needed for the target architecture. Get + information from the node if needed. + """ + + @abstractmethod + def join_remote_path(self, *args: str | PurePath) -> PurePath: + """ + Join path parts using the path separator that fits the remote OS. + """ + + @abstractmethod + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + """ + Copy source_file from local storage to destination_file on the remote Node + associated with the remote session. + If source_remote is True, reverse the direction - copy source_file from the + associated remote Node to destination_file on local storage. + """ + + @abstractmethod + def remove_remote_dir( + self, + remote_dir_path: str | PurePath, + recursive: bool = True, + force: bool = True, + ) -> None: + """ + Remove remote directory, by default remove recursively and forcefully. + """ + + @abstractmethod + def extract_remote_tarball( + self, + remote_tarball_path: str | PurePath, + expected_dir: str | PurePath | None = None, + ) -> None: + """ + Extract remote tarball in place. If expected_dir is a non-empty string, check + whether the dir exists after extracting the archive. + """ + + @abstractmethod + def build_dpdk( + self, + env_vars: EnvVarsDict, + meson_args: str, + remote_dpdk_dir: str | PurePath, + target_name: str, + rebuild: bool = False, + timeout: float = SETTINGS.compile_timeout, + ) -> PurePath: + """ + Build DPDK in the input dir with specified environment variables and meson + arguments. + Return the directory path where DPDK was built. + """ + + @abstractmethod + def get_dpdk_version(self, version_path: str | PurePath) -> str: + """ + Inspect DPDK version on the remote node from version_path. + """ diff --git a/dts/framework/remote_session/os/posix_session.py b/dts/framework/remote_session/os/posix_session.py index 9622a4ea30..a36b8e8c1a 100644 --- a/dts/framework/remote_session/os/posix_session.py +++ b/dts/framework/remote_session/os/posix_session.py @@ -2,6 +2,13 @@ # Copyright(c) 2022 PANTHEON.tech s.r.o. # Copyright(c) 2022 University of New Hampshire +from pathlib import PurePath, PurePosixPath + +from framework.config import Architecture +from framework.exception import DPDKBuildError, RemoteCommandExecutionError +from framework.settings import SETTINGS +from framework.utils import EnvVarsDict + from .os_session import OSSession @@ -10,3 +17,124 @@ class PosixSession(OSSession): An intermediary class implementing the Posix compliant parts of Linux and other OS remote sessions. """ + + @staticmethod + def combine_short_options(**opts: [str, bool]) -> str: + ret_opts = "" + for opt, include in opts.items(): + if include: + ret_opts = f"{ret_opts}{opt}" + + if ret_opts: + ret_opts = f" -{ret_opts}" + + return ret_opts + + def guess_dpdk_remote_dir(self, remote_dir) -> PurePosixPath: + remote_guess = self.join_remote_path(remote_dir, "dpdk-*") + result = self.remote_session.send_command(f"ls -d {remote_guess} | tail -1") + return PurePosixPath(result.stdout) + + def get_remote_tmp_dir(self) -> PurePosixPath: + return PurePosixPath("/tmp") + + def get_dpdk_build_env_vars(self, arch: Architecture) -> dict: + """ + Create extra environment variables needed for i686 arch build. Get information + from the node if needed. + """ + env_vars = {} + if arch == Architecture.i686: + # find the pkg-config path and store it in PKG_CONFIG_LIBDIR + out = self.remote_session.send_command("find /usr -type d -name pkgconfig") + pkg_path = "" + res_path = out.stdout.split("\r\n") + for cur_path in res_path: + if "i386" in cur_path: + pkg_path = cur_path + break + assert pkg_path != "", "i386 pkg-config path not found" + + env_vars["CFLAGS"] = "-m32" + env_vars["PKG_CONFIG_LIBDIR"] = pkg_path + + return env_vars + + def join_remote_path(self, *args: str | PurePath) -> PurePosixPath: + return PurePosixPath(*args) + + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + self.remote_session.copy_file(source_file, destination_file, source_remote) + + def remove_remote_dir( + self, + remote_dir_path: str | PurePath, + recursive: bool = True, + force: bool = True, + ) -> None: + opts = PosixSession.combine_short_options(r=recursive, f=force) + self.remote_session.send_command(f"rm{opts} {remote_dir_path}") + + def extract_remote_tarball( + self, + remote_tarball_path: str | PurePath, + expected_dir: str | PurePath | None = None, + ) -> None: + self.remote_session.send_command( + f"tar xfm {remote_tarball_path} " + f"-C {PurePosixPath(remote_tarball_path).parent}", + 60, + ) + if expected_dir: + self.remote_session.send_command(f"ls {expected_dir}", verify=True) + + def build_dpdk( + self, + env_vars: EnvVarsDict, + meson_args: str, + remote_dpdk_dir: str | PurePath, + target_name: str, + rebuild: bool = False, + timeout: float = SETTINGS.compile_timeout, + ) -> PurePosixPath: + build_dir = self.join_remote_path(remote_dpdk_dir, target_name) + try: + if rebuild: + # reconfigure, then build + self.logger.info("Reconfiguring DPDK build.") + self.remote_session.send_command( + f"meson configure {meson_args} {build_dir}", + timeout, + verify=True, + env=env_vars, + ) + else: + # fresh build - remove target dir first, then build from scratch + self.logger.info("Configuring DPDK build from scratch.") + self.remove_remote_dir(build_dir) + self.remote_session.send_command( + f"meson {meson_args} {remote_dpdk_dir} {build_dir}", + timeout, + verify=True, + env=env_vars, + ) + + self.logger.info("Building DPDK.") + self.remote_session.send_command( + f"ninja -C {build_dir}", timeout, verify=True, env=env_vars + ) + except RemoteCommandExecutionError as e: + raise DPDKBuildError(f"DPDK build failed when doing '{e.command}'.") + + return build_dir + + def get_dpdk_version(self, build_dir: str | PurePath) -> str: + out = self.remote_session.send_command( + f"cat {self.join_remote_path(build_dir, 'VERSION')}", verify=True + ) + return out.stdout diff --git a/dts/framework/remote_session/remote_session.py b/dts/framework/remote_session/remote_session.py index fccd80a529..f10b1023f8 100644 --- a/dts/framework/remote_session/remote_session.py +++ b/dts/framework/remote_session/remote_session.py @@ -10,6 +10,7 @@ from framework.exception import RemoteCommandExecutionError from framework.logger import DTSLOG from framework.settings import SETTINGS +from framework.utils import EnvVarsDict @dataclasses.dataclass(slots=True, frozen=True) @@ -83,15 +84,22 @@ def _connect(self) -> None: """ def send_command( - self, command: str, timeout: float = SETTINGS.timeout, verify: bool = False + self, + command: str, + timeout: float = SETTINGS.timeout, + verify: bool = False, + env: EnvVarsDict | None = None, ) -> CommandResult: """ - Send a command to the connected node and return CommandResult. + Send a command to the connected node using optional env vars + and return CommandResult. If verify is True, check the return code of the executed command and raise a RemoteCommandExecutionError if the command failed. """ - self.logger.info(f"Sending: '{command}'") - result = self._send_command(command, timeout) + self.logger.info( + f"Sending: '{command}'" + (f" with env vars: '{env}'" if env else "") + ) + result = self._send_command(command, timeout, env) if verify and result.return_code: self.logger.debug( f"Command '{command}' failed with return code '{result.return_code}'" @@ -104,9 +112,12 @@ def send_command( return result @abstractmethod - def _send_command(self, command: str, timeout: float) -> CommandResult: + def _send_command( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> CommandResult: """ - Use the underlying protocol to execute the command and return CommandResult. + Use the underlying protocol to execute the command using optional env vars + and return CommandResult. """ def close(self, force: bool = False) -> None: @@ -127,3 +138,14 @@ def is_alive(self) -> bool: """ Check whether the remote session is still responding. """ + + @abstractmethod + def copy_file( + self, source_file: str, destination_file: str, source_remote: bool = False + ) -> None: + """ + Copy source_file from local storage to destination_file on the remote Node + associated with the remote session. + If source_remote is True, reverse the direction - copy source_file from the + associated Node to destination_file on local storage. + """ diff --git a/dts/framework/remote_session/ssh_session.py b/dts/framework/remote_session/ssh_session.py index fb2f01dbc1..d4a6714e6b 100644 --- a/dts/framework/remote_session/ssh_session.py +++ b/dts/framework/remote_session/ssh_session.py @@ -5,12 +5,13 @@ import time +import pexpect # type: ignore from pexpect import pxssh # type: ignore from framework.config import NodeConfiguration from framework.exception import SSHConnectionError, SSHSessionDeadError, SSHTimeoutError from framework.logger import DTSLOG -from framework.utils import GREEN, RED +from framework.utils import GREEN, RED, EnvVarsDict from .remote_session import CommandResult, RemoteSession @@ -163,16 +164,22 @@ def _flush(self) -> None: def is_alive(self) -> bool: return self.session.isalive() - def _send_command(self, command: str, timeout: float) -> CommandResult: - output = self._send_command_get_output(command, timeout) - return_code = int(self._send_command_get_output("echo $?", timeout)) + def _send_command( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> CommandResult: + output = self._send_command_get_output(command, timeout, env) + return_code = int(self._send_command_get_output("echo $?", timeout, None)) # we're capturing only stdout return CommandResult(self.name, command, output, "", return_code) - def _send_command_get_output(self, command: str, timeout: float) -> str: + def _send_command_get_output( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> str: try: self._clean_session() + if env: + command = f"{env} {command}" self._send_line(command) except Exception as e: raise e @@ -189,3 +196,50 @@ def _close(self, force: bool = False) -> None: else: if self.is_alive(): self.session.logout() + + def copy_file( + self, source_file: str, destination_file: str, source_remote: bool = False + ) -> None: + """ + Send a local file to a remote host. + """ + if source_remote: + source_file = f"{self.username}@{self.ip}:{source_file}" + else: + destination_file = f"{self.username}@{self.ip}:{destination_file}" + + port = "" + if self.port: + port = f" -P {self.port}" + + # this is not OS agnostic, find a Pythonic (and thus OS agnostic) way + # TODO Fabric should handle this + command = ( + f"scp -v{port} -o NoHostAuthenticationForLocalhost=yes" + f" {source_file} {destination_file}" + ) + + self._spawn_scp(command) + + def _spawn_scp(self, scp_cmd: str) -> None: + """ + Transfer a file with SCP + """ + self.logger.info(scp_cmd) + p: pexpect.spawn = pexpect.spawn(scp_cmd) + time.sleep(0.5) + ssh_newkey: str = "Are you sure you want to continue connecting" + i: int = p.expect( + [ssh_newkey, "[pP]assword", "# ", pexpect.EOF, pexpect.TIMEOUT], 120 + ) + if i == 0: # add once in trust list + p.sendline("yes") + i = p.expect([ssh_newkey, "[pP]assword", pexpect.EOF], 2) + + if i == 1: + time.sleep(0.5) + p.sendline(self.password) + p.expect("Exit status 0", 60) + if i == 4: + self.logger.error("SCP TIMEOUT error %d" % i) + p.close() diff --git a/dts/framework/settings.py b/dts/framework/settings.py index 800f2c7b7f..e2bf3d2ce4 100644 --- a/dts/framework/settings.py +++ b/dts/framework/settings.py @@ -7,6 +7,7 @@ import os from collections.abc import Callable, Iterable, Sequence from dataclasses import dataclass +from pathlib import Path from typing import Any, TypeVar _T = TypeVar("_T") @@ -60,6 +61,9 @@ class _Settings: output_dir: str timeout: float verbose: bool + skip_setup: bool + dpdk_ref: Path + compile_timeout: float def _get_parser() -> argparse.ArgumentParser: @@ -88,6 +92,7 @@ def _get_parser() -> argparse.ArgumentParser: "--timeout", action=_env_arg("DTS_TIMEOUT"), default=15, + type=float, required=False, help="[DTS_TIMEOUT] The default timeout for all DTS operations except for " "compiling DPDK.", @@ -103,6 +108,36 @@ def _get_parser() -> argparse.ArgumentParser: "to the console.", ) + parser.add_argument( + "-s", + "--skip-setup", + action=_env_arg("DTS_SKIP_SETUP"), + required=False, + help="[DTS_SKIP_SETUP] Set to 'Y' to skip all setup steps on SUT and TG nodes.", + ) + + parser.add_argument( + "--dpdk-ref", + "--git", + "--snapshot", + action=_env_arg("DTS_DPDK_REF"), + default="dpdk.tar.xz", + type=Path, + required=False, + help="[DTS_DPDK_REF] Reference to DPDK source code, " + "can be either a path to a tarball or a git refspec. " + "In case of a tarball, it will be extracted in the same directory.", + ) + + parser.add_argument( + "--compile-timeout", + action=_env_arg("DTS_COMPILE_TIMEOUT"), + default=1200, + type=float, + required=False, + help="[DTS_COMPILE_TIMEOUT] The timeout for compiling DPDK.", + ) + return parser @@ -111,8 +146,11 @@ def _get_settings() -> _Settings: return _Settings( config_file_path=parsed_args.config_file, output_dir=parsed_args.output_dir, - timeout=float(parsed_args.timeout), + timeout=parsed_args.timeout, verbose=(parsed_args.verbose == "Y"), + skip_setup=(parsed_args.skip_setup == "Y"), + dpdk_ref=parsed_args.dpdk_ref, + compile_timeout=parsed_args.compile_timeout, ) diff --git a/dts/framework/testbed_model/node/sut_node.py b/dts/framework/testbed_model/node/sut_node.py index 79d54585c9..53268a7565 100644 --- a/dts/framework/testbed_model/node/sut_node.py +++ b/dts/framework/testbed_model/node/sut_node.py @@ -2,6 +2,14 @@ # Copyright(c) 2010-2014 Intel Corporation # Copyright(c) 2022 PANTHEON.tech s.r.o. +import os +import tarfile +from pathlib import PurePath + +from framework.config import BuildTargetConfiguration, NodeConfiguration +from framework.settings import SETTINGS +from framework.utils import EnvVarsDict, skip_setup + from .node import Node @@ -10,4 +18,127 @@ class SutNode(Node): A class for managing connections to the System under Test, providing methods that retrieve the necessary information about the node (such as cpu, memory and NIC details) and configuration capabilities. + Another key capability is building DPDK according to given build target. """ + + _build_target_config: BuildTargetConfiguration | None + _env_vars: EnvVarsDict + _remote_tmp_dir: PurePath + __remote_dpdk_dir: PurePath | None + _app_compile_timeout: float + + def __init__(self, node_config: NodeConfiguration): + super(SutNode, self).__init__(node_config) + self._build_target_config = None + self._env_vars = EnvVarsDict() + self._remote_tmp_dir = self.main_session.get_remote_tmp_dir() + self.__remote_dpdk_dir = None + self._app_compile_timeout = 90 + + @property + def _remote_dpdk_dir(self) -> PurePath: + if self.__remote_dpdk_dir is None: + self.__remote_dpdk_dir = self._guess_dpdk_remote_dir() + return self.__remote_dpdk_dir + + @_remote_dpdk_dir.setter + def _remote_dpdk_dir(self, value: PurePath) -> None: + self.__remote_dpdk_dir = value + + def _guess_dpdk_remote_dir(self) -> PurePath: + return self.main_session.guess_dpdk_remote_dir(self._remote_tmp_dir) + + def _setup_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + Setup DPDK on the SUT node. + """ + self._configure_build_target(build_target_config) + self._copy_dpdk_tarball() + self._build_dpdk() + + def _configure_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + Populate common environment variables and set build target config. + """ + self._build_target_config = build_target_config + self._env_vars.update( + self.main_session.get_dpdk_build_env_vars(build_target_config.arch) + ) + self._env_vars["CC"] = build_target_config.compiler.name + + @skip_setup + def _copy_dpdk_tarball(self) -> None: + """ + Copy to and extract DPDK tarball on the SUT node. + """ + # check local path + assert SETTINGS.dpdk_ref.exists(), f"Package {SETTINGS.dpdk_ref} doesn't exist." + + self.logger.info("Copying DPDK tarball to SUT.") + self.main_session.copy_file(SETTINGS.dpdk_ref, self._remote_tmp_dir) + + # construct remote tarball path + # the basename is the same on local host and on remote Node + remote_tarball_path = self.main_session.join_remote_path( + self._remote_tmp_dir, os.path.basename(SETTINGS.dpdk_ref) + ) + + # construct remote path after extracting + with tarfile.open(SETTINGS.dpdk_ref) as dpdk_tar: + dpdk_top_dir = dpdk_tar.getnames()[0] + self._remote_dpdk_dir = self.main_session.join_remote_path( + self._remote_tmp_dir, dpdk_top_dir + ) + + self.logger.info("Extracting DPDK tarball on SUT.") + # clean remote path where we're extracting + self.main_session.remove_remote_dir(self._remote_dpdk_dir) + + # then extract to remote path + self.main_session.extract_remote_tarball( + remote_tarball_path, self._remote_dpdk_dir + ) + + @skip_setup + def _build_dpdk(self) -> None: + """ + Build DPDK. Uses the already configured target. Assumes that the tarball has + already been copied to and extracted on the SUT node. + """ + meson_args = "-Denable_kmods=True -Dlibdir=lib --default-library=static" + self.main_session.build_dpdk( + self._env_vars, + meson_args, + self._remote_dpdk_dir, + self._build_target_config.name if self._build_target_config else "build", + ) + self.logger.info( + f"DPDK version: {self.main_session.get_dpdk_version(self._remote_dpdk_dir)}" + ) + + def build_dpdk_app(self, app_name: str) -> PurePath: + """ + Build one or all DPDK apps. Requires DPDK to be already built on the SUT node. + When app_name is 'all', build all example apps. + When app_name is any other string, tries to build that example app. + Return the directory path of the built app. If building all apps, return + the path to the examples directory (where all apps reside). + """ + meson_args = f"-Dexamples={app_name}" + build_dir = self.main_session.build_dpdk( + self._env_vars, + meson_args, + self._remote_dpdk_dir, + self._build_target_config.name if self._build_target_config else "build", + rebuild=True, + timeout=self._app_compile_timeout, + ) + if app_name == "all": + return self.main_session.join_remote_path(build_dir, "examples") + return self.main_session.join_remote_path( + build_dir, "examples", f"dpdk-{app_name}" + ) diff --git a/dts/framework/utils.py b/dts/framework/utils.py index c28c8f1082..91e58f3218 100644 --- a/dts/framework/utils.py +++ b/dts/framework/utils.py @@ -4,6 +4,9 @@ # Copyright(c) 2022 University of New Hampshire import sys +from typing import Callable + +from framework.settings import SETTINGS def check_dts_python_version() -> None: @@ -22,9 +25,21 @@ def check_dts_python_version() -> None: print(RED("Please use Python >= 3.10 instead"), file=sys.stderr) +def skip_setup(func) -> Callable[..., None]: + if SETTINGS.skip_setup: + return lambda *args: None + else: + return func + + def GREEN(text: str) -> str: return f"\u001B[32;1m{str(text)}\u001B[0m" def RED(text: str) -> str: return f"\u001B[31;1m{str(text)}\u001B[0m" + + +class EnvVarsDict(dict): + def __str__(self) -> str: + return " ".join(["=".join(item) for item in self.items()]) From patchwork Mon Nov 14 16:54:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119843 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 621CFA00C4; Mon, 14 Nov 2022 17:55:18 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A592142D14; Mon, 14 Nov 2022 17:54:51 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 37D3C42D28 for ; Mon, 14 Nov 2022 17:54:48 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 884B021C5D7; Mon, 14 Nov 2022 17:54:47 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id bhi-u_cs7KDH; Mon, 14 Nov 2022 17:54:45 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 1C71221C5DC; Mon, 14 Nov 2022 17:54:41 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 04/10] dts: add dpdk execution handling Date: Mon, 14 Nov 2022 16:54:32 +0000 Message-Id: <20221114165438.1133783-5-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add methods for setting up and shutting down DPDK apps and for constructing EAL parameters. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 4 + dts/framework/config/__init__.py | 85 ++++++++- dts/framework/config/conf_yaml_schema.json | 22 +++ .../remote_session/os/linux_session.py | 15 ++ dts/framework/remote_session/os/os_session.py | 16 +- .../remote_session/os/posix_session.py | 80 ++++++++ dts/framework/testbed_model/hw/__init__.py | 17 ++ dts/framework/testbed_model/hw/cpu.py | 164 ++++++++++++++++ dts/framework/testbed_model/node/node.py | 36 ++++ dts/framework/testbed_model/node/sut_node.py | 178 +++++++++++++++++- dts/framework/utils.py | 20 ++ 11 files changed, 634 insertions(+), 3 deletions(-) create mode 100644 dts/framework/testbed_model/hw/__init__.py create mode 100644 dts/framework/testbed_model/hw/cpu.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 6b0bc5c2bf..976888a88e 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -12,4 +12,8 @@ nodes: - name: "SUT 1" hostname: sut1.change.me.localhost user: root + arch: x86_64 os: linux + bypass_core0: true + cpus: "" + memory_channels: 4 diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 1b97dc3ab9..344d697a69 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -11,12 +11,13 @@ import pathlib from dataclasses import dataclass from enum import Enum, auto, unique -from typing import Any +from typing import Any, Iterable import warlock # type: ignore import yaml from framework.settings import SETTINGS +from framework.utils import expand_range class StrEnum(Enum): @@ -60,6 +61,80 @@ class Compiler(StrEnum): msvc = auto() +@dataclass(slots=True, frozen=True) +class CPU: + cpu: int + core: int + socket: int + node: int + + def __str__(self) -> str: + return str(self.cpu) + + +class CPUList(object): + """ + Convert these options into a list of int cpus + cpu_list=[CPU1, CPU2] - a list of CPUs + cpu_list=[0,1,2,3] - a list of int indices + cpu_list=['0','1','2-3'] - a list of str indices; ranges are supported + cpu_list='0,1,2-3' - a comma delimited str of indices; ranges are supported + + The class creates a unified format used across the framework and allows + the user to use either a str representation (using str(instance) or directly + in f-strings) or a list representation (by accessing instance.cpu_list). + Empty cpu_list is allowed. + """ + + _cpu_list: list[int] + + def __init__(self, cpu_list: list[int | str | CPU] | str): + self._cpu_list = [] + if isinstance(cpu_list, str): + self._from_str(cpu_list.split(",")) + else: + self._from_str((str(cpu) for cpu in cpu_list)) + + # the input cpus may not be sorted + self._cpu_list.sort() + + @property + def cpu_list(self) -> list[int]: + return self._cpu_list + + def _from_str(self, cpu_list: Iterable[str]) -> None: + for cpu in cpu_list: + self._cpu_list.extend(expand_range(cpu)) + + def _get_consecutive_cpus_range(self, cpu_list: list[int]) -> list[str]: + formatted_core_list = [] + tmp_cpus_list = list(sorted(cpu_list)) + segment = tmp_cpus_list[:1] + for core_id in tmp_cpus_list[1:]: + if core_id - segment[-1] == 1: + segment.append(core_id) + else: + formatted_core_list.append( + f"{segment[0]}-{segment[-1]}" + if len(segment) > 1 + else f"{segment[0]}" + ) + current_core_index = tmp_cpus_list.index(core_id) + formatted_core_list.extend( + self._get_consecutive_cpus_range(tmp_cpus_list[current_core_index:]) + ) + segment.clear() + break + if len(segment) > 0: + formatted_core_list.append( + f"{segment[0]}-{segment[-1]}" if len(segment) > 1 else f"{segment[0]}" + ) + return formatted_core_list + + def __str__(self) -> str: + return f'{",".join(self._get_consecutive_cpus_range(self._cpu_list))}' + + # Slots enables some optimizations, by pre-allocating space for the defined # attributes in the underlying data structure. # @@ -71,7 +146,11 @@ class NodeConfiguration: hostname: str user: str password: str | None + arch: Architecture os: OS + bypass_core0: bool + cpus: CPUList + memory_channels: int @staticmethod def from_dict(d: dict) -> "NodeConfiguration": @@ -80,7 +159,11 @@ def from_dict(d: dict) -> "NodeConfiguration": hostname=d["hostname"], user=d["user"], password=d.get("password"), + arch=Architecture(d["arch"]), os=OS(d["os"]), + bypass_core0=d.get("bypass_core0", False), + cpus=CPUList(d.get("cpus", "1")), + memory_channels=d.get("memory_channels", 1), ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 409ce7ac74..c59d3e30e6 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -6,6 +6,12 @@ "type": "string", "description": "A unique identifier for a node" }, + "ARCH": { + "type": "string", + "enum": [ + "x86_64" + ] + }, "OS": { "type": "string", "enum": [ @@ -82,8 +88,23 @@ "type": "string", "description": "The password to use on this node. Use only as a last resort. SSH keys are STRONGLY preferred." }, + "arch": { + "$ref": "#/definitions/ARCH" + }, "os": { "$ref": "#/definitions/OS" + }, + "bypass_core0": { + "type": "boolean", + "description": "Indicate that DPDK should omit using the first core." + }, + "cpus": { + "type": "string", + "description": "Optional comma-separated list of cpus to use, e.g.: 1,2,3,4,5,18-22. Defaults to 1. An empty string means use all cpus." + }, + "memory_channels": { + "type": "integer", + "description": "How many memory channels to use. Optional, defaults to 1." } }, "additionalProperties": false, @@ -91,6 +112,7 @@ "name", "hostname", "user", + "arch", "os" ] }, diff --git a/dts/framework/remote_session/os/linux_session.py b/dts/framework/remote_session/os/linux_session.py index 39e80631dd..21f117b714 100644 --- a/dts/framework/remote_session/os/linux_session.py +++ b/dts/framework/remote_session/os/linux_session.py @@ -2,6 +2,8 @@ # Copyright(c) 2022 PANTHEON.tech s.r.o. # Copyright(c) 2022 University of New Hampshire +from framework.config import CPU + from .posix_session import PosixSession @@ -9,3 +11,16 @@ class LinuxSession(PosixSession): """ The implementation of non-Posix compliant parts of Linux remote sessions. """ + + def get_remote_cpus(self, bypass_core0: bool) -> list[CPU]: + cpu_info = self.remote_session.send_command( + "lscpu -p=CPU,CORE,SOCKET,NODE|grep -v \\#" + ).stdout + cpus = [] + for cpu_line in cpu_info.splitlines(): + cpu, core, socket, node = cpu_line.split(",") + if bypass_core0 and core == 0 and socket == 0: + self.logger.info("Core0 bypassed.") + continue + cpus.append(CPU(int(cpu), int(core), int(socket), int(node))) + return cpus diff --git a/dts/framework/remote_session/os/os_session.py b/dts/framework/remote_session/os/os_session.py index 57e2865282..6f6b6a979e 100644 --- a/dts/framework/remote_session/os/os_session.py +++ b/dts/framework/remote_session/os/os_session.py @@ -3,9 +3,10 @@ # Copyright(c) 2022 University of New Hampshire from abc import ABC, abstractmethod +from collections.abc import Iterable from pathlib import PurePath -from framework.config import Architecture, NodeConfiguration +from framework.config import CPU, Architecture, NodeConfiguration from framework.logger import DTSLOG from framework.remote_session.factory import create_remote_session from framework.remote_session.remote_session import RemoteSession @@ -130,3 +131,16 @@ def get_dpdk_version(self, version_path: str | PurePath) -> str: """ Inspect DPDK version on the remote node from version_path. """ + + @abstractmethod + def get_remote_cpus(self, bypass_core0: bool) -> list[CPU]: + """ + Compose a list of CPUs present on the remote node. + """ + + @abstractmethod + def kill_cleanup_dpdk_apps(self, dpdk_prefix_list: Iterable[str]) -> None: + """ + Kill and cleanup all DPDK apps identified by dpdk_prefix_list. If + dpdk_prefix_list is empty, attempt to find running DPDK apps to kill and clean. + """ diff --git a/dts/framework/remote_session/os/posix_session.py b/dts/framework/remote_session/os/posix_session.py index a36b8e8c1a..7151263c7a 100644 --- a/dts/framework/remote_session/os/posix_session.py +++ b/dts/framework/remote_session/os/posix_session.py @@ -2,6 +2,8 @@ # Copyright(c) 2022 PANTHEON.tech s.r.o. # Copyright(c) 2022 University of New Hampshire +import re +from collections.abc import Iterable from pathlib import PurePath, PurePosixPath from framework.config import Architecture @@ -138,3 +140,81 @@ def get_dpdk_version(self, build_dir: str | PurePath) -> str: f"cat {self.join_remote_path(build_dir, 'VERSION')}", verify=True ) return out.stdout + + def kill_cleanup_dpdk_apps(self, dpdk_prefix_list: Iterable[str]) -> None: + self.logger.info("Cleaning up DPDK apps.") + dpdk_runtime_dirs = self._get_dpdk_runtime_dirs(dpdk_prefix_list) + if dpdk_runtime_dirs: + # kill and cleanup only if DPDK is running + dpdk_pids = self._get_dpdk_pids(dpdk_runtime_dirs) + for dpdk_pid in dpdk_pids: + self.remote_session.send_command(f"kill -9 {dpdk_pid}", 20) + self._check_dpdk_hugepages(dpdk_runtime_dirs) + self._remove_dpdk_runtime_dirs(dpdk_runtime_dirs) + + def _get_dpdk_runtime_dirs( + self, dpdk_prefix_list: Iterable[str] + ) -> list[PurePosixPath]: + prefix = PurePosixPath("/var", "run", "dpdk") + if not dpdk_prefix_list: + remote_prefixes = self._list_remote_dirs(prefix) + if not remote_prefixes: + dpdk_prefix_list = [] + else: + dpdk_prefix_list = remote_prefixes + + return [PurePosixPath(prefix, dpdk_prefix) for dpdk_prefix in dpdk_prefix_list] + + def _list_remote_dirs(self, remote_path: str | PurePath) -> list[str] | None: + """ + Return a list of directories of the remote_dir. + If remote_path doesn't exist, return None. + """ + out = self.remote_session.send_command( + f"ls -l {remote_path} | awk '/^d/ {{print $NF}}'" + ).stdout + if "No such file or directory" in out: + return None + else: + return out.splitlines() + + def _get_dpdk_pids(self, dpdk_runtime_dirs: Iterable[str | PurePath]) -> list[int]: + pids = [] + pid_regex = r"p(\d+)" + for dpdk_runtime_dir in dpdk_runtime_dirs: + dpdk_config_file = PurePosixPath(dpdk_runtime_dir, "config") + if self._remote_files_exists(dpdk_config_file): + out = self.remote_session.send_command( + f"lsof -Fp {dpdk_config_file}" + ).stdout + if out and "No such file or directory" not in out: + for out_line in out.splitlines(): + match = re.match(pid_regex, out_line) + if match: + pids.append(int(match.group(1))) + return pids + + def _remote_files_exists(self, remote_path: PurePath) -> bool: + result = self.remote_session.send_command(f"test -e {remote_path}") + return not result.return_code + + def _check_dpdk_hugepages( + self, dpdk_runtime_dirs: Iterable[str | PurePath] + ) -> None: + for dpdk_runtime_dir in dpdk_runtime_dirs: + hugepage_info = PurePosixPath(dpdk_runtime_dir, "hugepage_info") + if self._remote_files_exists(hugepage_info): + out = self.remote_session.send_command( + f"lsof -Fp {hugepage_info}" + ).stdout + if out and "No such file or directory" not in out: + self.logger.warning("Some DPDK processes did not free hugepages.") + self.logger.warning("*******************************************") + self.logger.warning(out) + self.logger.warning("*******************************************") + + def _remove_dpdk_runtime_dirs( + self, dpdk_runtime_dirs: Iterable[str | PurePath] + ) -> None: + for dpdk_runtime_dir in dpdk_runtime_dirs: + self.remove_remote_dir(dpdk_runtime_dir) diff --git a/dts/framework/testbed_model/hw/__init__.py b/dts/framework/testbed_model/hw/__init__.py new file mode 100644 index 0000000000..7d79a7efd0 --- /dev/null +++ b/dts/framework/testbed_model/hw/__init__.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +from framework.config import CPU, CPUList + +from .cpu import CPUAmount, CPUAmountFilter, CPUFilter, CPUListFilter + + +def cpu_filter( + core_list: list[CPU], filter_specifier: CPUAmount | CPUList, ascending: bool +) -> CPUFilter: + if isinstance(filter_specifier, CPUList): + return CPUListFilter(core_list, filter_specifier, ascending) + elif isinstance(filter_specifier, CPUAmount): + return CPUAmountFilter(core_list, filter_specifier, ascending) + else: + raise ValueError(f"Unsupported filter r{filter_specifier}") diff --git a/dts/framework/testbed_model/hw/cpu.py b/dts/framework/testbed_model/hw/cpu.py new file mode 100644 index 0000000000..87e87bcb4e --- /dev/null +++ b/dts/framework/testbed_model/hw/cpu.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +import dataclasses +from abc import ABC, abstractmethod +from collections.abc import Iterable + +from framework.config import CPU, CPUList + + +@dataclasses.dataclass(slots=True, frozen=True) +class CPUAmount: + """ + Define the amounts of cpus to use. If sockets is not None, socket_amount + is ignored. + """ + + cpus_per_core: int = 1 + cores_per_socket: int = 2 + socket_amount: int = 1 + sockets: list[int] | None = None + + +class CPUFilter(ABC): + """ + Filter according to the input filter specifier. Each filter needs to be + implemented in a derived class. + This class only implements operations common to all filters, such as sorting + the list to be filtered beforehand. + """ + + _filter_specifier: CPUAmount | CPUList + _cpus_to_filter: list[CPU] + + def __init__( + self, + core_list: list[CPU], + filter_specifier: CPUAmount | CPUList, + ascending: bool = True, + ) -> None: + self._filter_specifier = filter_specifier + + # sorting by core is needed in case hyperthreading is enabled + self._cpus_to_filter = sorted( + core_list, key=lambda x: x.core, reverse=not ascending + ) + self.filter() + + @abstractmethod + def filter(self) -> list[CPU]: + """ + Use the input self._filter_specifier to filter self._cpus_to_filter + and return the list of filtered CPUs. self._cpus_to_filter is a + sorter copy of the original list, so it may be modified. + """ + + +class CPUAmountFilter(CPUFilter): + """ + Filter the input list of CPUs according to specified rules: + Use cores from the specified amount of sockets or from the specified socket ids. + If sockets is specified, it takes precedence over socket_amount. + From each of those sockets, use only cores_per_socket of cores. + And for each core, use cpus_per_core of cpus. Hypertheading + must be enabled for this to take effect. + If ascending is True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the highest + id and continue in descending order. This ordering affects which + sockets to consider first as well. + """ + + _filter_specifier: CPUAmount + + def filter(self) -> list[CPU]: + return self._filter_cpus(self._filter_sockets(self._cpus_to_filter)) + + def _filter_sockets(self, cpus_to_filter: Iterable[CPU]) -> list[CPU]: + allowed_sockets: set[int] = set() + socket_amount = self._filter_specifier.socket_amount + if self._filter_specifier.sockets: + socket_amount = len(self._filter_specifier.sockets) + allowed_sockets = set(self._filter_specifier.sockets) + + filtered_cpus = [] + for cpu in cpus_to_filter: + if not self._filter_specifier.sockets: + if len(allowed_sockets) < socket_amount: + allowed_sockets.add(cpu.socket) + if cpu.socket in allowed_sockets: + filtered_cpus.append(cpu) + + if len(allowed_sockets) < socket_amount: + raise ValueError( + f"The amount of sockets from which to use cores " + f"({socket_amount}) exceeds the actual amount present " + f"on the node ({len(allowed_sockets)})" + ) + + return filtered_cpus + + def _filter_cpus(self, cpus_to_filter: Iterable[CPU]) -> list[CPU]: + # no need to use ordered dict, from Python3.7 the dict + # insertion order is preserved (LIFO). + allowed_cpu_per_core_count_map: dict[int, int] = {} + filtered_cpus = [] + for cpu in cpus_to_filter: + if cpu.core in allowed_cpu_per_core_count_map: + cpu_count = allowed_cpu_per_core_count_map[cpu.core] + if self._filter_specifier.cpus_per_core > cpu_count: + # only add cpus of the given core + allowed_cpu_per_core_count_map[cpu.core] += 1 + filtered_cpus.append(cpu) + else: + raise ValueError( + f"The amount of CPUs per core to use " + f"({self._filter_specifier.cpus_per_core}) " + f"exceeds the actual amount present. Is hyperthreading enabled?" + ) + elif self._filter_specifier.cores_per_socket > len( + allowed_cpu_per_core_count_map + ): + # only add cpus if we need more + allowed_cpu_per_core_count_map[cpu.core] = 1 + filtered_cpus.append(cpu) + else: + # cpus are sorted by core, at this point we won't encounter new cores + break + + cores_per_socket = len(allowed_cpu_per_core_count_map) + if cores_per_socket < self._filter_specifier.cores_per_socket: + raise ValueError( + f"The amount of cores per socket to use " + f"({self._filter_specifier.cores_per_socket}) " + f"exceeds the actual amount present ({cores_per_socket})" + ) + + return filtered_cpus + + +class CPUListFilter(CPUFilter): + """ + Filter the input list of CPUs according to the input list of + core indices. + An empty CPUList won't filter anything. + """ + + _filter_specifier: CPUList + + def filter(self) -> list[CPU]: + if not len(self._filter_specifier.cpu_list): + return self._cpus_to_filter + + filtered_cpus = [] + for core in self._cpus_to_filter: + if core.cpu in self._filter_specifier.cpu_list: + filtered_cpus.append(core) + + if len(filtered_cpus) != len(self._filter_specifier.cpu_list): + raise ValueError( + f"Not all cpus from {self._filter_specifier.cpu_list} were found" + f"among {self._cpus_to_filter}" + ) + + return filtered_cpus diff --git a/dts/framework/testbed_model/node/node.py b/dts/framework/testbed_model/node/node.py index 86654e55ae..5ee7023335 100644 --- a/dts/framework/testbed_model/node/node.py +++ b/dts/framework/testbed_model/node/node.py @@ -8,13 +8,16 @@ """ from framework.config import ( + CPU, BuildTargetConfiguration, + CPUList, ExecutionConfiguration, NodeConfiguration, ) from framework.exception import NodeCleanupError, NodeSetupError, convert_exception from framework.logger import DTSLOG, getLogger from framework.remote_session import OSSession, create_session +from framework.testbed_model.hw import CPUAmount, cpu_filter class Node(object): @@ -28,6 +31,7 @@ class Node(object): main_session: OSSession logger: DTSLOG config: NodeConfiguration + cpus: list[CPU] _other_sessions: list[OSSession] def __init__(self, node_config: NodeConfiguration): @@ -38,6 +42,7 @@ def __init__(self, node_config: NodeConfiguration): self.logger = getLogger(self.name) self.logger.info(f"Created node: {self.name}") self.main_session = create_session(self.config, self.name, self.logger) + self._get_remote_cpus() @convert_exception(NodeSetupError) def setup_execution(self, execution_config: ExecutionConfiguration) -> None: @@ -109,6 +114,37 @@ def create_session(self, name: str) -> OSSession: self._other_sessions.append(connection) return connection + def filter_cpus( + self, + filter_specifier: CPUAmount | CPUList, + ascending: bool = True, + ) -> list[CPU]: + """ + Filter the logical cpus found on the Node according to specified rules: + Use cores from the specified amount of sockets or from the specified + socket ids. If sockets is specified, it takes precedence over socket_amount. + From each of those sockets, use only cpus_per_socket of cores. + And for each core, use cpus_per_core of cpus. Hypertheading + must be enabled for this to take effect. + If ascending is True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the highest + id and continue in descending order. This ordering affects which + sockets to consider first as well. + """ + self.logger.info("Filtering ") + return cpu_filter( + self.cpus, + filter_specifier, + ascending, + ).filter() + + def _get_remote_cpus(self) -> None: + """ + Scan cpus in the remote OS and store a list of CPUs. + """ + self.logger.info("Getting CPU information.") + self.cpus = self.main_session.get_remote_cpus(self.config.bypass_core0) + def close(self) -> None: """ Close all connections and free other resources. diff --git a/dts/framework/testbed_model/node/sut_node.py b/dts/framework/testbed_model/node/sut_node.py index 53268a7565..ff3be845b4 100644 --- a/dts/framework/testbed_model/node/sut_node.py +++ b/dts/framework/testbed_model/node/sut_node.py @@ -4,10 +4,13 @@ import os import tarfile +import time from pathlib import PurePath -from framework.config import BuildTargetConfiguration, NodeConfiguration +from framework.config import CPU, BuildTargetConfiguration, CPUList, NodeConfiguration +from framework.remote_session import OSSession from framework.settings import SETTINGS +from framework.testbed_model.hw import CPUAmount, CPUListFilter from framework.utils import EnvVarsDict, skip_setup from .node import Node @@ -21,19 +24,31 @@ class SutNode(Node): Another key capability is building DPDK according to given build target. """ + cpus: list[CPU] + dpdk_prefix_list: list[str] + dpdk_prefix_subfix: str _build_target_config: BuildTargetConfiguration | None _env_vars: EnvVarsDict _remote_tmp_dir: PurePath __remote_dpdk_dir: PurePath | None _app_compile_timeout: float + _dpdk_kill_session: OSSession | None def __init__(self, node_config: NodeConfiguration): super(SutNode, self).__init__(node_config) + self.dpdk_prefix_list = [] self._build_target_config = None self._env_vars = EnvVarsDict() self._remote_tmp_dir = self.main_session.get_remote_tmp_dir() self.__remote_dpdk_dir = None self._app_compile_timeout = 90 + self._dpdk_kill_session = None + + # filter the node cpus according to user config + self.cpus = CPUListFilter(self.cpus, self.config.cpus).filter() + self.dpdk_prefix_subfix = ( + f"{str(os.getpid())}_{time.strftime('%Y%m%d%H%M%S', time.localtime())}" + ) @property def _remote_dpdk_dir(self) -> PurePath: @@ -142,3 +157,164 @@ def build_dpdk_app(self, app_name: str) -> PurePath: return self.main_session.join_remote_path( build_dir, "examples", f"dpdk-{app_name}" ) + + def kill_cleanup_dpdk_apps(self) -> None: + """ + Kill all dpdk applications on the SUT. Cleanup hugepages. + """ + if self._dpdk_kill_session and self._dpdk_kill_session.is_alive(): + # we can use the session if it exists and responds + self._dpdk_kill_session.kill_cleanup_dpdk_apps(self.dpdk_prefix_list) + else: + # otherwise, we need to (re)create it + self._dpdk_kill_session = self.create_session("dpdk_kill") + self.dpdk_prefix_list = [] + + def create_eal_parameters( + self, + fixed_prefix: bool = False, + core_filter_specifier: CPUAmount | CPUList = CPUAmount(), + ascending_cores: bool = True, + prefix: str = "", + no_pci: bool = False, + vdevs: list[str] = None, + other_eal_param: str = "", + ) -> str: + """ + Generate eal parameters character string; + :param fixed_prefix: use fixed file-prefix or not, when it is true, + the file-prefix will not be added a timestamp + :param core_filter_specifier: an amount of cpus/cores/sockets to use + or a list of cpu ids to use. + The default will select one cpu for each of two cores + on one socket, in ascending order of core ids. + :param ascending_cores: True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the + highest id and continue in descending order. This ordering + affects which sockets to consider first as well. + :param prefix: set file prefix string, eg: + prefix='vf'; + :param no_pci: switch of disable PCI bus eg: + no_pci=True; + :param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1']; + :param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments'; + :return: eal param string, eg: + '-c 0xf -a 0000:88:00.0 --file-prefix=dpdk_1112_20190809143420'; + if DPDK version < 20.11-rc4, eal_str eg: + '-c 0xf -w 0000:88:00.0 --file-prefix=dpdk_1112_20190809143420'; + """ + if vdevs is None: + vdevs = [] + + config = { + "core_filter_specifier": core_filter_specifier, + "ascending_cores": ascending_cores, + "prefix": prefix, + "no_pci": no_pci, + "vdevs": vdevs, + "other_eal_param": other_eal_param, + } + + eal_parameter_creator = _EalParameter( + sut_node=self, fixed_prefix=fixed_prefix, **config + ) + eal_str = eal_parameter_creator.make_eal_param() + + return eal_str + + +class _EalParameter(object): + def __init__( + self, + sut_node: SutNode, + fixed_prefix: bool, + core_filter_specifier: CPUAmount | CPUList, + ascending_cores: bool, + prefix: str, + no_pci: bool, + vdevs: list[str], + other_eal_param: str, + ): + """ + Generate eal parameters character string; + :param sut_node: SUT Node; + :param fixed_prefix: use fixed file-prefix or not, when it is true, + he file-prefix will not be added a timestamp + :param core_filter_specifier: an amount of cpus/cores/sockets to use + or a list of cpu ids to use. + :param ascending_cores: True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the + highest id and continue in descending order. This ordering + affects which sockets to consider first as well. + :param prefix: set file prefix string, eg: + prefix='vf'; + :param no_pci: switch of disable PCI bus eg: + no_pci=True; + :param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1']; + :param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments'; + """ + self.os = sut_node.config.os + self.fixed_prefix = fixed_prefix + self.sut_node = sut_node + self.core_filter_specifier = core_filter_specifier + self.ascending_cores = ascending_cores + self.prefix = prefix + self.no_pci = no_pci + self.vdevs = vdevs + self.other_eal_param = other_eal_param + + def _make_lcores_param(self) -> str: + filtered_cpus = self.sut_node.filter_cpus( + self.core_filter_specifier, self.ascending_cores + ) + return f"-l {CPUList(filtered_cpus)}" + + def _make_memory_channels(self) -> str: + param_template = "-n {}" + return param_template.format(self.sut_node.config.memory_channels) + + def _make_no_pci_param(self) -> str: + if self.no_pci is True: + return "--no-pci" + else: + return "" + + def _make_prefix_param(self) -> str: + if self.prefix == "": + fixed_file_prefix = f"dpdk_{self.sut_node.dpdk_prefix_subfix}" + else: + fixed_file_prefix = self.prefix + if not self.fixed_prefix: + fixed_file_prefix = ( + f"{fixed_file_prefix}_{self.sut_node.dpdk_prefix_subfix}" + ) + fixed_file_prefix = self._do_os_handle_with_prefix_param(fixed_file_prefix) + return fixed_file_prefix + + def _make_vdevs_param(self) -> str: + if len(self.vdevs) == 0: + return "" + else: + return " ".join(f"--vdev {vdev}" for vdev in self.vdevs) + + def _do_os_handle_with_prefix_param(self, file_prefix: str) -> str: + self.sut_node.dpdk_prefix_list.append(file_prefix) + return f"--file-prefix={file_prefix}" + + def make_eal_param(self) -> str: + _eal_str = " ".join( + [ + self._make_lcores_param(), + self._make_memory_channels(), + self._make_prefix_param(), + self._make_no_pci_param(), + self._make_vdevs_param(), + # append user defined eal parameters + self.other_eal_param, + ] + ) + return _eal_str diff --git a/dts/framework/utils.py b/dts/framework/utils.py index 91e58f3218..3c2f0adff9 100644 --- a/dts/framework/utils.py +++ b/dts/framework/utils.py @@ -32,6 +32,26 @@ def skip_setup(func) -> Callable[..., None]: return func +def expand_range(range_str: str) -> list[int]: + """ + Process range string into a list of integers. There are two possible formats: + n - a single integer + n-m - a range of integers + + The returned range includes both n and m. Empty string returns an empty list. + """ + expanded_range: list[int] = [] + if range_str: + range_boundaries = range_str.split("-") + # will throw an exception when items in range_boundaries can't be converted, + # serving as type check + expanded_range.extend( + range(int(range_boundaries[0]), int(range_boundaries[-1]) + 1) + ) + + return expanded_range + + def GREEN(text: str) -> str: return f"\u001B[32;1m{str(text)}\u001B[0m" From patchwork Mon Nov 14 16:54:33 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119844 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 45E66A00C4; Mon, 14 Nov 2022 17:55:26 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 715DB42D37; Mon, 14 Nov 2022 17:54:52 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id CBED342D31 for ; Mon, 14 Nov 2022 17:54:49 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id EE24221C5DC; Mon, 14 Nov 2022 17:54:48 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 4V5JmXqdFEbj; Mon, 14 Nov 2022 17:54:46 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id DD6BF243083; Mon, 14 Nov 2022 17:54:41 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 05/10] dts: add node memory setup Date: Mon, 14 Nov 2022 16:54:33 +0000 Message-Id: <20221114165438.1133783-6-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Setup hugepages on nodes. This is useful not only on SUT nodes, but also on TG nodes which use TGs that utilize hugepages. Signed-off-by: Juraj Linkeš Signed-off-by: Juraj Linkeš > --- dts/framework/remote_session/__init__.py | 1 + dts/framework/remote_session/arch/__init__.py | 20 +++++ dts/framework/remote_session/arch/arch.py | 57 +++++++++++++ .../remote_session/os/linux_session.py | 85 +++++++++++++++++++ dts/framework/remote_session/os/os_session.py | 10 +++ dts/framework/testbed_model/node/node.py | 15 +++- 6 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 dts/framework/remote_session/arch/__init__.py create mode 100644 dts/framework/remote_session/arch/arch.py diff --git a/dts/framework/remote_session/__init__.py b/dts/framework/remote_session/__init__.py index f2339b20bd..f0deeadac6 100644 --- a/dts/framework/remote_session/__init__.py +++ b/dts/framework/remote_session/__init__.py @@ -11,4 +11,5 @@ # pylama:ignore=W0611 +from .arch import Arch, create_arch from .os import OSSession, create_session diff --git a/dts/framework/remote_session/arch/__init__.py b/dts/framework/remote_session/arch/__init__.py new file mode 100644 index 0000000000..d78ad42ac5 --- /dev/null +++ b/dts/framework/remote_session/arch/__init__.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +from framework.config import Architecture, NodeConfiguration + +from .arch import PPC64, Arch, Arm64, i686, x86_32, x86_64 + + +def create_arch(node_config: NodeConfiguration) -> Arch: + match node_config.arch: + case Architecture.x86_64: + return x86_64() + case Architecture.x86_32: + return x86_32() + case Architecture.i686: + return i686() + case Architecture.ppc64le: + return PPC64() + case Architecture.arm64: + return Arm64() diff --git a/dts/framework/remote_session/arch/arch.py b/dts/framework/remote_session/arch/arch.py new file mode 100644 index 0000000000..05c7602def --- /dev/null +++ b/dts/framework/remote_session/arch/arch.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2022 PANTHEON.tech s.r.o. + + +class Arch(object): + """ + Stores architecture-specific information. + """ + + @property + def default_hugepage_memory(self) -> int: + """ + Return the default amount of memory allocated for hugepages DPDK will use. + The default is an amount equal to 256 2MB hugepages (512MB memory). + """ + return 256 * 2048 + + @property + def hugepage_force_first_numa(self) -> bool: + """ + An architecture may need to force configuration of hugepages to first socket. + """ + return False + + +class x86_64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 4096 * 2048 + + +class x86_32(Arch): + @property + def hugepage_force_first_numa(self) -> bool: + return True + + +class i686(Arch): + @property + def default_hugepage_memory(self) -> int: + return 512 * 2048 + + @property + def hugepage_force_first_numa(self) -> bool: + return True + + +class PPC64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 512 * 2048 + + +class Arm64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 2048 * 2048 diff --git a/dts/framework/remote_session/os/linux_session.py b/dts/framework/remote_session/os/linux_session.py index 21f117b714..fad33d7613 100644 --- a/dts/framework/remote_session/os/linux_session.py +++ b/dts/framework/remote_session/os/linux_session.py @@ -3,6 +3,8 @@ # Copyright(c) 2022 University of New Hampshire from framework.config import CPU +from framework.exception import RemoteCommandExecutionError +from framework.utils import expand_range from .posix_session import PosixSession @@ -24,3 +26,86 @@ def get_remote_cpus(self, bypass_core0: bool) -> list[CPU]: continue cpus.append(CPU(int(cpu), int(core), int(socket), int(node))) return cpus + + def setup_hugepages( + self, hugepage_amount: int = -1, force_first_numa: bool = False + ) -> None: + self.logger.info("Getting Hugepage information.") + hugepage_size = self._get_hugepage_size() + hugepages_total = self._get_hugepages_total() + self._numa_nodes = self._get_numa_nodes() + + target_hugepages_total = int(hugepage_amount / hugepage_size) + if hugepage_amount % hugepage_size: + target_hugepages_total += 1 + if force_first_numa or hugepages_total != target_hugepages_total: + # when forcing numa, we need to clear existing hugepages regardless + # of size, so they can be moved to the first numa node + self._configure_huge_pages( + target_hugepages_total, hugepage_size, force_first_numa + ) + else: + self.logger.info("Hugepages already configured.") + self._mount_huge_pages() + + def _get_hugepage_size(self) -> int: + hugepage_size = self.remote_session.send_command( + "awk '/Hugepagesize/ {print $2}' /proc/meminfo" + ).stdout + return int(hugepage_size) + + def _get_hugepages_total(self) -> int: + hugepages_total = self.remote_session.send_command( + "awk '/HugePages_Total/ { print $2 }' /proc/meminfo" + ).stdout + return int(hugepages_total) + + def _get_numa_nodes(self) -> list[int]: + try: + numa_range = self.remote_session.send_command( + "cat /sys/devices/system/node/online", verify=True + ).stdout + numa_range = expand_range(numa_range) + except RemoteCommandExecutionError: + # the file doesn't exist, meaning the node doesn't support numa + numa_range = [] + return numa_range + + def _mount_huge_pages(self) -> None: + self.logger.info("Re-mounting Hugepages.") + hugapge_fs_cmd = "awk '/hugetlbfs/ { print $2 }' /proc/mounts" + self.remote_session.send_command(f"umount $({hugapge_fs_cmd})") + result = self.remote_session.send_command(hugapge_fs_cmd) + if result.stdout == "": + remote_mount_path = "/mnt/huge" + self.remote_session.send_command(f"mkdir -p {remote_mount_path}") + self.remote_session.send_command( + f"mount -t hugetlbfs nodev {remote_mount_path}" + ) + + def _supports_numa(self) -> bool: + # the system supports numa if self._numa_nodes is non-empty and there are more + # than one numa node (in the latter case it may actually support numa, but + # there's no reason to do any numa specific configuration) + return len(self._numa_nodes) > 1 + + def _configure_huge_pages( + self, amount: int, size: int, force_first_numa: bool + ) -> None: + self.logger.info("Configuring Hugepages.") + hugepage_config_path = ( + f"/sys/kernel/mm/hugepages/hugepages-{size}kB/nr_hugepages" + ) + if force_first_numa and self._supports_numa(): + # clear non-numa hugepages + self.remote_session.send_command( + f"echo 0 | sudo tee {hugepage_config_path}" + ) + hugepage_config_path = ( + f"/sys/devices/system/node/node{self._numa_nodes[0]}/hugepages" + f"/hugepages-{size}kB/nr_hugepages" + ) + + self.remote_session.send_command( + f"echo {amount} | sudo tee {hugepage_config_path}" + ) diff --git a/dts/framework/remote_session/os/os_session.py b/dts/framework/remote_session/os/os_session.py index 6f6b6a979e..f84f3ce63c 100644 --- a/dts/framework/remote_session/os/os_session.py +++ b/dts/framework/remote_session/os/os_session.py @@ -144,3 +144,13 @@ def kill_cleanup_dpdk_apps(self, dpdk_prefix_list: Iterable[str]) -> None: Kill and cleanup all DPDK apps identified by dpdk_prefix_list. If dpdk_prefix_list is empty, attempt to find running DPDK apps to kill and clean. """ + + @abstractmethod + def setup_hugepages( + self, hugepage_amount: int = -1, force_first_numa: bool = False + ) -> None: + """ + Get the node's Hugepage Size, configure the specified amount of hugepages + if needed and mount the hugepages if needed. + If force_first_numa is True, configure hugepages just on the first socket. + """ diff --git a/dts/framework/testbed_model/node/node.py b/dts/framework/testbed_model/node/node.py index 5ee7023335..96a1724f4c 100644 --- a/dts/framework/testbed_model/node/node.py +++ b/dts/framework/testbed_model/node/node.py @@ -16,7 +16,7 @@ ) from framework.exception import NodeCleanupError, NodeSetupError, convert_exception from framework.logger import DTSLOG, getLogger -from framework.remote_session import OSSession, create_session +from framework.remote_session import Arch, OSSession, create_arch, create_session from framework.testbed_model.hw import CPUAmount, cpu_filter @@ -33,6 +33,7 @@ class Node(object): config: NodeConfiguration cpus: list[CPU] _other_sessions: list[OSSession] + _arch: Arch def __init__(self, node_config: NodeConfiguration): self.config = node_config @@ -42,6 +43,7 @@ def __init__(self, node_config: NodeConfiguration): self.logger = getLogger(self.name) self.logger.info(f"Created node: {self.name}") self.main_session = create_session(self.config, self.name, self.logger) + self._arch = create_arch(self.config) self._get_remote_cpus() @convert_exception(NodeSetupError) @@ -50,6 +52,7 @@ def setup_execution(self, execution_config: ExecutionConfiguration) -> None: Perform the execution setup that will be done for each execution this node is part of. """ + self._setup_hugepages() self._setup_execution(execution_config) def _setup_execution(self, execution_config: ExecutionConfiguration) -> None: @@ -145,6 +148,16 @@ def _get_remote_cpus(self) -> None: self.logger.info("Getting CPU information.") self.cpus = self.main_session.get_remote_cpus(self.config.bypass_core0) + def _setup_hugepages(self): + """ + Setup hugepages on the Node. Different architectures can supply different + amounts of memory for hugepages and numa-based hugepage allocation may need + to be considered. + """ + self.main_session.setup_hugepages( + self._arch.default_hugepage_memory, self._arch.hugepage_force_first_numa + ) + def close(self) -> None: """ Close all connections and free other resources. From patchwork Mon Nov 14 16:54:34 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119845 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DD329A00C4; Mon, 14 Nov 2022 17:55:33 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4A29642D34; Mon, 14 Nov 2022 17:54:53 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 1B06F42D31 for ; Mon, 14 Nov 2022 17:54:50 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 42F4026AEFE; Mon, 14 Nov 2022 17:54:49 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id A1D5npxuGhBJ; Mon, 14 Nov 2022 17:54:48 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 964ED243CCC; Mon, 14 Nov 2022 17:54:42 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 06/10] dts: add test results module Date: Mon, 14 Nov 2022 16:54:34 +0000 Message-Id: <20221114165438.1133783-7-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The module keeps track of test case results along with miscellaneous information, such as on which SUT's did a failure occur and during the testing of which build target. Signed-off-by: Juraj Linkeš --- dts/framework/dts.py | 5 + dts/framework/test_result.py | 217 +++++++++++++++++++++++++++++++++++ 2 files changed, 222 insertions(+) create mode 100644 dts/framework/test_result.py diff --git a/dts/framework/dts.py b/dts/framework/dts.py index 262c392d8e..d606f8de2e 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -14,9 +14,11 @@ from .exception import DTSError, ReturnCode from .logger import DTSLOG, getLogger from .settings import SETTINGS +from .test_result import Result from .utils import check_dts_python_version dts_logger: DTSLOG = getLogger("dts") +result: Result = Result() def run_all() -> None: @@ -26,6 +28,7 @@ def run_all() -> None: """ return_code = ReturnCode.NO_ERR global dts_logger + global result # check the python version of the server that run dts check_dts_python_version() @@ -45,6 +48,7 @@ def run_all() -> None: # for all Execution sections for execution in CONFIGURATION.executions: sut_node = init_nodes(execution, nodes) + result.sut = sut_node run_execution(sut_node, execution) except DTSError as e: @@ -104,6 +108,7 @@ def run_build_target( Run the given build target. """ dts_logger.info(f"Running target '{build_target.name}'.") + result.target = build_target try: sut_node.setup_build_target(build_target) run_suite(sut_node, build_target, execution) diff --git a/dts/framework/test_result.py b/dts/framework/test_result.py new file mode 100644 index 0000000000..a12517b9bc --- /dev/null +++ b/dts/framework/test_result.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +""" +Generic result container and reporters +""" + + +class Result(object): + """ + Generic result container. Useful to store/retrieve results during + a DTF execution. + + It manages and hide an internal complex structure like the one shown below. + This is presented to the user with a property based interface. + + internals = [ + 'sut1', [ + 'kdriver', + 'firmware', + 'pkg', + 'driver', + 'dpdk_version', + 'target1', 'nic1', [ + 'suite1', [ + 'case1', ['PASSED', ''], + 'case2', ['PASSED', ''], + ], + ], + 'target2', 'nic1', [ + 'suite2', [ + 'case3', ['PASSED', ''], + 'case4', ['FAILED', 'message'], + ], + 'suite3', [ + 'case5', ['BLOCKED', 'message'], + ], + ] + ] + ] + + """ + + def __init__(self): + self.__sut = 0 + self.__target = 0 + self.__test_suite = 0 + self.__test_case = 0 + self.__test_result = None + self.__message = None + self.__internals = [] + self.__failed_suts = {} + self.__failed_targets = {} + + def __set_sut(self, sut): + if sut not in self.__internals: + self.__internals.append(sut) + self.__internals.append([]) + self.__sut = self.__internals.index(sut) + + def __get_sut(self): + return self.__internals[self.__sut] + + def current_dpdk_version(self, sut): + """ + Returns the dpdk version for a given SUT + """ + try: + sut_idx = self.__internals.index(sut) + return self.__internals[sut_idx + 1][4] + except: + return "" + + def __set_dpdk_version(self, dpdk_version): + if dpdk_version not in self.internals[self.__sut + 1]: + dpdk_current = self.__get_dpdk_version() + if dpdk_current: + if dpdk_version not in dpdk_current: + self.internals[self.__sut + 1][4] = ( + dpdk_current + "/" + dpdk_version + ) + else: + self.internals[self.__sut + 1].append(dpdk_version) + + def __get_dpdk_version(self): + try: + return self.internals[self.__sut + 1][4] + except: + return "" + + def __current_targets(self): + return self.internals[self.__sut + 1] + + def __set_target(self, target): + targets = self.__current_targets() + if target not in targets: + targets.append(target) + targets.append("_nic_") + targets.append([]) + self.__target = targets.index(target) + + def __get_target(self): + return self.__current_targets()[self.__target] + + def __current_suites(self): + return self.__current_targets()[self.__target + 2] + + def __set_test_suite(self, test_suite): + suites = self.__current_suites() + if test_suite not in suites: + suites.append(test_suite) + suites.append([]) + self.__test_suite = suites.index(test_suite) + + def __get_test_suite(self): + return self.__current_suites()[self.__test_suite] + + def __current_cases(self): + return self.__current_suites()[self.__test_suite + 1] + + def __set_test_case(self, test_case): + cases = self.__current_cases() + cases.append(test_case) + cases.append([]) + self.__test_case = cases.index(test_case) + + def __get_test_case(self): + return self.__current_cases()[self.__test_case] + + def __get_internals(self): + return self.__internals + + def __current_result(self): + return self.__current_cases()[self.__test_case + 1] + + def __set_test_case_result(self, result, message): + test_case = self.__current_result() + test_case.append(result) + test_case.append(message) + self.__test_result = result + self.__message = message + + def copy_suite(self, suite_result): + self.__current_suites()[self.__test_suite + 1] = suite_result.__current_cases() + + def test_case_passed(self): + """ + Set last test case added as PASSED + """ + self.__set_test_case_result(result="PASSED", message="") + + def test_case_failed(self, message): + """ + Set last test case added as FAILED + """ + self.__set_test_case_result(result="FAILED", message=message) + + def test_case_blocked(self, message): + """ + Set last test case added as BLOCKED + """ + self.__set_test_case_result(result="BLOCKED", message=message) + + def all_suts(self): + """ + Returns all the SUTs it's aware of. + """ + return self.__internals[::2] + + def all_targets(self, sut): + """ + Returns the targets for a given SUT + """ + try: + sut_idx = self.__internals.index(sut) + except: + return None + return self.__internals[sut_idx + 1][5::3] + + def add_failed_sut(self, sut, msg): + """ + Sets the given SUT as failing due to msg + """ + self.__failed_suts[sut] = msg + + def remove_failed_sut(self, sut): + """ + Remove the given SUT from failed SUTs collection + """ + if sut in self.__failed_suts: + self.__failed_suts.pop(sut) + + def add_failed_target(self, sut, target, msg): + """ + Sets the given SUT, target as failing due to msg + """ + self.__failed_targets[sut + target] = msg + + def remove_failed_target(self, sut, target): + """ + Remove the given SUT, target from failed targets collection + """ + key_word = sut + target + if key_word in self.__failed_targets: + self.__failed_targets.pop(key_word) + + """ + Attributes defined as properties to hide the implementation from the + presented interface. + """ + sut = property(__get_sut, __set_sut) + dpdk_version = property(__get_dpdk_version, __set_dpdk_version) + target = property(__get_target, __set_target) + test_suite = property(__get_test_suite, __set_test_suite) + test_case = property(__get_test_case, __set_test_case) + internals = property(__get_internals) From patchwork Mon Nov 14 16:54:35 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119846 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7E87A00C4; Mon, 14 Nov 2022 17:55:42 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 50A5C42D47; Mon, 14 Nov 2022 17:54:54 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 74E3F42D29 for ; Mon, 14 Nov 2022 17:54:51 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id B8AED243083; Mon, 14 Nov 2022 17:54:50 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 9y5M1B_PaRHt; Mon, 14 Nov 2022 17:54:49 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 14B5F165617; Mon, 14 Nov 2022 17:54:43 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 07/10] dts: add simple stats report Date: Mon, 14 Nov 2022 16:54:35 +0000 Message-Id: <20221114165438.1133783-8-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Provide a summary of testcase passed/failed/blocked counts. Signed-off-by: Juraj Linkeš --- dts/framework/dts.py | 3 ++ dts/framework/stats_reporter.py | 65 +++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 dts/framework/stats_reporter.py diff --git a/dts/framework/dts.py b/dts/framework/dts.py index d606f8de2e..a7c243a5c3 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -14,11 +14,13 @@ from .exception import DTSError, ReturnCode from .logger import DTSLOG, getLogger from .settings import SETTINGS +from .stats_reporter import TestStats from .test_result import Result from .utils import check_dts_python_version dts_logger: DTSLOG = getLogger("dts") result: Result = Result() +test_stats: TestStats = TestStats(SETTINGS.output_dir + "/statistics.txt") def run_all() -> None: @@ -29,6 +31,7 @@ def run_all() -> None: return_code = ReturnCode.NO_ERR global dts_logger global result + global test_stats # check the python version of the server that run dts check_dts_python_version() diff --git a/dts/framework/stats_reporter.py b/dts/framework/stats_reporter.py new file mode 100644 index 0000000000..a2735d0a1d --- /dev/null +++ b/dts/framework/stats_reporter.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +""" +Simple text file statistics generator +""" + + +class TestStats(object): + """ + Generates a small statistics file containing the number of passing, + failing and blocked tests. It makes use of a Result instance as input. + """ + + def __init__(self, filename): + self.filename = filename + + def __add_stat(self, test_result): + if test_result is not None: + if test_result[0] == "PASSED": + self.passed += 1 + if test_result[0] == "FAILED": + self.failed += 1 + if test_result[0] == "BLOCKED": + self.blocked += 1 + self.total += 1 + + def __count_stats(self): + for sut in self.result.all_suts(): + for target in self.result.all_targets(sut): + for suite in self.result.all_test_suites(sut, target): + for case in self.result.all_test_cases(sut, target, suite): + test_result = self.result.result_for(sut, target, suite, case) + if len(test_result): + self.__add_stat(test_result) + + def __write_stats(self): + sut_nodes = self.result.all_suts() + if len(sut_nodes) == 1: + self.stats_file.write( + f"dpdk_version = {self.result.current_dpdk_version(sut_nodes[0])}\n" + ) + else: + for sut in sut_nodes: + dpdk_version = self.result.current_dpdk_version(sut) + self.stats_file.write(f"{sut}.dpdk_version = {dpdk_version}\n") + self.__count_stats() + self.stats_file.write(f"Passed = {self.passed}\n") + self.stats_file.write(f"Failed = {self.failed}\n") + self.stats_file.write(f"Blocked = {self.blocked}\n") + rate = 0 + if self.total > 0: + rate = self.passed * 100.0 / self.total + self.stats_file.write(f"Pass rate = {rate:.1f}\n") + + def save(self, result): + self.passed = 0 + self.failed = 0 + self.blocked = 0 + self.total = 0 + self.stats_file = open(self.filename, "w+") + self.result = result + self.__write_stats() + self.stats_file.close() From patchwork Mon Nov 14 16:54:36 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119847 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 98180A00C4; Mon, 14 Nov 2022 17:55:48 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2EDB042D4A; Mon, 14 Nov 2022 17:54:55 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 4AF5F42D34 for ; Mon, 14 Nov 2022 17:54:52 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id A279D165617; Mon, 14 Nov 2022 17:54:51 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 9WLyfAV_t_aU; Mon, 14 Nov 2022 17:54:49 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 87F03243CF8; Mon, 14 Nov 2022 17:54:43 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 08/10] dts: add testsuite class Date: Mon, 14 Nov 2022 16:54:36 +0000 Message-Id: <20221114165438.1133783-9-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This is the base class that all test suites inherit from. The base class implements methods common to all test suites. The derived test suites implement tests and any particular setup needed for the suite or tests. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 4 + dts/framework/config/__init__.py | 33 ++- dts/framework/config/conf_yaml_schema.json | 49 ++++ dts/framework/dts.py | 29 +++ dts/framework/exception.py | 65 ++++++ dts/framework/settings.py | 25 +++ dts/framework/test_case.py | 246 +++++++++++++++++++++ 7 files changed, 450 insertions(+), 1 deletion(-) create mode 100644 dts/framework/test_case.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 976888a88e..0b0f2c59b0 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -7,6 +7,10 @@ executions: os: linux cpu: native compiler: gcc + perf: false + func: true + test_suites: + - hello_world system_under_test: "SUT 1" nodes: - name: "SUT 1" diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 344d697a69..8874b10030 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -11,7 +11,7 @@ import pathlib from dataclasses import dataclass from enum import Enum, auto, unique -from typing import Any, Iterable +from typing import Any, Iterable, TypedDict import warlock # type: ignore import yaml @@ -186,9 +186,34 @@ def from_dict(d: dict) -> "BuildTargetConfiguration": ) +class TestSuiteConfigDict(TypedDict): + suite: str + cases: list[str] + + +@dataclass(slots=True, frozen=True) +class TestSuiteConfig: + test_suite: str + test_cases: list[str] + + @staticmethod + def from_dict( + entry: str | TestSuiteConfigDict, + ) -> "TestSuiteConfig": + if isinstance(entry, str): + return TestSuiteConfig(test_suite=entry, test_cases=[]) + elif isinstance(entry, dict): + return TestSuiteConfig(test_suite=entry["suite"], test_cases=entry["cases"]) + else: + raise TypeError(f"{type(entry)} is not valid for a test suite config.") + + @dataclass(slots=True, frozen=True) class ExecutionConfiguration: build_targets: list[BuildTargetConfiguration] + perf: bool + func: bool + test_suites: list[TestSuiteConfig] system_under_test: NodeConfiguration @staticmethod @@ -196,11 +221,17 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": build_targets: list[BuildTargetConfiguration] = list( map(BuildTargetConfiguration.from_dict, d["build_targets"]) ) + test_suites: list[TestSuiteConfig] = list( + map(TestSuiteConfig.from_dict, d["test_suites"]) + ) sut_name = d["system_under_test"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" return ExecutionConfiguration( build_targets=build_targets, + perf=d["perf"], + func=d["func"], + test_suites=test_suites, system_under_test=node_map[sut_name], ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index c59d3e30e6..e37ced65fe 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -63,6 +63,31 @@ } }, "additionalProperties": false + }, + "test_suite": { + "type": "string", + "enum": [ + "hello_world" + ] + }, + "test_target": { + "type": "object", + "properties": { + "suite": { + "$ref": "#/definitions/test_suite" + }, + "cases": { + "type": "array", + "items": { + "type": "string" + }, + "minimum": 1 + } + }, + "required": [ + "suite" + ], + "additionalProperties": false } }, "type": "object", @@ -130,6 +155,27 @@ }, "minimum": 1 }, + "perf": { + "type": "boolean", + "description": "Enable performance testing" + }, + "func": { + "type": "boolean", + "description": "Enable functional testing" + }, + "test_suites": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/test_suite" + }, + { + "$ref": "#/definitions/test_target" + } + ] + } + }, "system_under_test": { "$ref": "#/definitions/node_name" } @@ -137,6 +183,9 @@ "additionalProperties": false, "required": [ "build_targets", + "perf", + "func", + "test_suites", "system_under_test" ] }, diff --git a/dts/framework/dts.py b/dts/framework/dts.py index a7c243a5c3..ba3f4b4168 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -15,6 +15,7 @@ from .logger import DTSLOG, getLogger from .settings import SETTINGS from .stats_reporter import TestStats +from .test_case import TestCase from .test_result import Result from .utils import check_dts_python_version @@ -129,6 +130,34 @@ def run_suite( Use the given build_target to run the test suite with possibly only a subset of tests. If no subset is specified, run all tests. """ + for test_suite_config in execution.test_suites: + result.test_suite = test_suite_config.test_suite + full_suite_path = f"tests.TestSuite_{test_suite_config.test_suite}" + testcase_classes = TestCase.get_testcases(full_suite_path) + dts_logger.debug( + f"Found testcase classes '{testcase_classes}' in '{full_suite_path}'" + ) + for testcase_class in testcase_classes: + testcase = testcase_class( + sut_node, test_suite_config.test_suite, build_target, execution + ) + + testcase.init_log() + testcase.set_requested_cases(SETTINGS.test_cases) + testcase.set_requested_cases(test_suite_config.test_cases) + + dts_logger.info(f"Running test suite '{testcase_class.__name__}'") + try: + testcase.execute_setup_all() + testcase.execute_test_cases() + dts_logger.info( + f"Finished running test suite '{testcase_class.__name__}'" + ) + result.copy_suite(testcase.get_result()) + test_stats.save(result) # this was originally after teardown + + finally: + testcase.execute_tear_downall() def quit_execution(nodes: Iterable[Node], return_code: ReturnCode) -> None: diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 93d99432ae..a35eeff640 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -29,6 +29,10 @@ class ReturnCode(IntEnum): DPDK_BUILD_ERR = 10 NODE_SETUP_ERR = 20 NODE_CLEANUP_ERR = 21 + SUITE_SETUP_ERR = 30 + SUITE_EXECUTION_ERR = 31 + TESTCASE_VERIFY_ERR = 32 + SUITE_CLEANUP_ERR = 33 class DTSError(Exception): @@ -153,6 +157,67 @@ def __init__(self): ) +class TestSuiteNotFound(DTSError): + """ + Raised when a configured test suite cannot be imported. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.SUITE_SETUP_ERR + + +class SuiteSetupError(DTSError): + """ + Raised when an error occurs during suite setup. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.SUITE_SETUP_ERR + + def __init__(self): + super(SuiteSetupError, self).__init__("An error occurred during suite setup.") + + +class SuiteExecutionError(DTSError): + """ + Raised when an error occurs during suite execution. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.SUITE_EXECUTION_ERR + + def __init__(self): + super(SuiteExecutionError, self).__init__( + "An error occurred during suite execution." + ) + + +class VerifyError(DTSError): + """ + To be used within the test cases to verify if a command output + is as it was expected. + """ + + value: str + return_code: ClassVar[ReturnCode] = ReturnCode.TESTCASE_VERIFY_ERR + + def __init__(self, value: str): + self.value = value + + def __str__(self) -> str: + return repr(self.value) + + +class SuiteCleanupError(DTSError): + """ + Raised when an error occurs during suite cleanup. + """ + + return_code: ClassVar[ReturnCode] = ReturnCode.SUITE_CLEANUP_ERR + + def __init__(self): + super(SuiteCleanupError, self).__init__( + "An error occurred during suite cleanup." + ) + + def convert_exception(exception: type[DTSError]) -> Callable[..., Callable[..., None]]: """ When a non-DTS exception is raised while executing the decorated function, diff --git a/dts/framework/settings.py b/dts/framework/settings.py index e2bf3d2ce4..069f28ce81 100644 --- a/dts/framework/settings.py +++ b/dts/framework/settings.py @@ -64,6 +64,8 @@ class _Settings: skip_setup: bool dpdk_ref: Path compile_timeout: float + test_cases: list + re_run: int def _get_parser() -> argparse.ArgumentParser: @@ -138,6 +140,25 @@ def _get_parser() -> argparse.ArgumentParser: help="[DTS_COMPILE_TIMEOUT] The timeout for compiling DPDK.", ) + parser.add_argument( + "--test-cases", + action=_env_arg("DTS_TESTCASES"), + default="", + required=False, + help="[DTS_TESTCASES] Comma-separated list of testcases to execute", + ) + + parser.add_argument( + "--re-run", + "--re_run", + action=_env_arg("DTS_RERUN"), + default=0, + type=int, + required=False, + help="[DTS_RERUN] Re-run tests the specified amount of times if a test failure " + "occurs", + ) + return parser @@ -151,6 +172,10 @@ def _get_settings() -> _Settings: skip_setup=(parsed_args.skip_setup == "Y"), dpdk_ref=parsed_args.dpdk_ref, compile_timeout=parsed_args.compile_timeout, + test_cases=parsed_args.test_cases.split(",") + if parsed_args.test_cases != "" + else [], + re_run=parsed_args.re_run, ) diff --git a/dts/framework/test_case.py b/dts/framework/test_case.py new file mode 100644 index 0000000000..0479f795bb --- /dev/null +++ b/dts/framework/test_case.py @@ -0,0 +1,246 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. + +""" +A base class for creating DTS test cases. +""" + +import importlib +import inspect +import re +import time +import traceback + +from .exception import ( + SSHTimeoutError, + SuiteCleanupError, + SuiteExecutionError, + SuiteSetupError, + TestSuiteNotFound, + VerifyError, + convert_exception, +) +from .logger import getLogger +from .settings import SETTINGS +from .test_result import Result + + +class TestCase(object): + def __init__(self, sut_node, suitename, target, execution): + self.sut_node = sut_node + self.suite_name = suitename + self.target = target + + # local variable + self._requested_tests = [] + self._subtitle = None + + # result object for save suite result + self._suite_result = Result() + self._suite_result.sut = self.sut_node.config.hostname + self._suite_result.target = target + self._suite_result.test_suite = self.suite_name + if self._suite_result is None: + raise ValueError("Result object should not None") + + self._enable_func = execution.func + + # command history + self.setup_history = list() + self.test_history = list() + + def init_log(self): + # get log handler + class_name = self.__class__.__name__ + self.logger = getLogger(class_name) + + def set_up_all(self): + pass + + def set_up(self): + pass + + def tear_down(self): + pass + + def tear_down_all(self): + pass + + def verify(self, passed, description): + if not passed: + raise VerifyError(description) + + def _get_functional_cases(self): + """ + Get all functional test cases. + """ + return self._get_test_cases(r"test_(?!perf_)") + + def _has_it_been_requested(self, test_case, test_name_regex): + """ + Check whether test case has been requested for validation. + """ + name_matches = re.match(test_name_regex, test_case.__name__) + if self._requested_tests: + return name_matches and test_case.__name__ in self._requested_tests + + return name_matches + + def set_requested_cases(self, case_list): + """ + Pass down input cases list for check + """ + self._requested_tests += case_list + + def _get_test_cases(self, test_name_regex): + """ + Return case list which name matched regex. + """ + self.logger.debug(f"Searching for testcases in {self.__class__}") + for test_case_name in dir(self): + test_case = getattr(self, test_case_name) + if callable(test_case) and self._has_it_been_requested( + test_case, test_name_regex + ): + yield test_case + + @convert_exception(SuiteSetupError) + def execute_setup_all(self): + """ + Execute suite setup_all function before cases. + """ + try: + self.set_up_all() + return True + except Exception as v: + self.logger.error("set_up_all failed:\n" + traceback.format_exc()) + # record all cases blocked + if self._enable_func: + for case_obj in self._get_functional_cases(): + self._suite_result.test_case = case_obj.__name__ + self._suite_result.test_case_blocked( + "set_up_all failed: {}".format(str(v)) + ) + return False + + def _execute_test_case(self, case_obj): + """ + Execute specified test case in specified suite. If any exception occurred in + validation process, save the result and tear down this case. + """ + case_name = case_obj.__name__ + self._suite_result.test_case = case_obj.__name__ + + case_result = True + try: + self.logger.info("Test Case %s Begin" % case_name) + + self.running_case = case_name + # run set_up function for each case + self.set_up() + # run test case + case_obj() + + self._suite_result.test_case_passed() + + self.logger.info("Test Case %s Result PASSED:" % case_name) + + except VerifyError as v: + case_result = False + self._suite_result.test_case_failed(str(v)) + self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(v)) + except KeyboardInterrupt: + self._suite_result.test_case_blocked("Skipped") + self.logger.error("Test Case %s SKIPPED: " % (case_name)) + self.tear_down() + raise KeyboardInterrupt("Stop DTS") + except SSHTimeoutError as e: + case_result = False + self._suite_result.test_case_failed(str(e)) + self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(e)) + self.logger.error("%s" % (e.get_output())) + except Exception: + case_result = False + trace = traceback.format_exc() + self._suite_result.test_case_failed(trace) + self.logger.error("Test Case %s Result ERROR: " % (case_name) + trace) + finally: + self.execute_tear_down() + return case_result + + @convert_exception(SuiteExecutionError) + def execute_test_cases(self): + """ + Execute all test cases in one suite. + """ + # prepare debugger rerun case environment + if self._enable_func: + for case_obj in self._get_functional_cases(): + for i in range(SETTINGS.re_run + 1): + ret = self.execute_test_case(case_obj) + + if ret is False and SETTINGS.re_run: + self.sut_node.get_session_output(timeout=0.5 * (i + 1)) + time.sleep(i + 1) + self.logger.info( + " Test case %s failed and re-run %d time" + % (case_obj.__name__, i + 1) + ) + else: + break + + def execute_test_case(self, case_obj): + """ + Execute test case or enter into debug mode. + """ + return self._execute_test_case(case_obj) + + def get_result(self): + """ + Return suite test result + """ + return self._suite_result + + @convert_exception(SuiteCleanupError) + def execute_tear_downall(self): + """ + execute suite tear_down_all function + """ + self.tear_down_all() + + self.sut_node.kill_cleanup_dpdk_apps() + + def execute_tear_down(self): + """ + execute suite tear_down function + """ + try: + self.tear_down() + except Exception: + self.logger.error("tear_down failed:\n" + traceback.format_exc()) + self.logger.warning( + "tear down %s failed, might iterfere next case's result!" + % self.running_case + ) + + @staticmethod + def get_testcases(testsuite_module_path: str) -> list[type["TestCase"]]: + def is_testcase(object) -> bool: + try: + if issubclass(object, TestCase) and object != TestCase: + return True + except TypeError: + return False + return False + + try: + testcase_module = importlib.import_module(testsuite_module_path) + except ModuleNotFoundError as e: + raise TestSuiteNotFound( + f"Testsuite '{testsuite_module_path}' not found." + ) from e + return [ + testcase_class + for _, testcase_class in inspect.getmembers(testcase_module, is_testcase) + ] From patchwork Mon Nov 14 16:54:37 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119848 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8D457A00C4; Mon, 14 Nov 2022 17:55:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 62D8A42D55; Mon, 14 Nov 2022 17:54:56 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id EEA9742D2A for ; Mon, 14 Nov 2022 17:54:52 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 50B6E21C5E8; Mon, 14 Nov 2022 17:54:52 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id mSRZIzFlgfec; Mon, 14 Nov 2022 17:54:51 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 3E62124794E; Mon, 14 Nov 2022 17:54:44 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 09/10] dts: add hello world testplan Date: Mon, 14 Nov 2022 16:54:37 +0000 Message-Id: <20221114165438.1133783-10-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The testplan describes the capabilities of the tested application along with the description of testcases to test it. Signed-off-by: Juraj Linkeš --- dts/test_plans/hello_world_test_plan.rst | 68 ++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 dts/test_plans/hello_world_test_plan.rst diff --git a/dts/test_plans/hello_world_test_plan.rst b/dts/test_plans/hello_world_test_plan.rst new file mode 100644 index 0000000000..566a9bb10c --- /dev/null +++ b/dts/test_plans/hello_world_test_plan.rst @@ -0,0 +1,68 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2010-2017 Intel Corporation + +============================================= +Sample Application Tests: Hello World Example +============================================= + +This example is one of the most simple RTE application that can be +done. The program will just print a "helloworld" message on every +enabled lcore. + +Command Usage:: + + ./dpdk-helloworld -c COREMASK [-m NB] [-r NUM] [-n NUM] + + EAL option list: + -c COREMASK: hexadecimal bitmask of cores we are running on + -m MB : memory to allocate (default = size of hugemem) + -n NUM : force number of memory channels (don't detect) + -r NUM : force number of memory ranks (don't detect) + --huge-file: base filename for hugetlbfs entries + debug options: + --no-huge : use malloc instead of hugetlbfs + --no-pci : disable pci + --no-hpet : disable hpet + --no-shconf: no shared config (mmap'd files) + + +Prerequisites +============= + +Support igb_uio and vfio driver, if used vfio, kernel need 3.6+ and enable vt-d in bios. +When used vfio , used "modprobe vfio" and "modprobe vfio-pci" insmod vfio driver, then used +"./tools/dpdk_nic_bind.py --bind=vfio-pci device_bus_id" to bind vfio driver to test driver. + +To find out the mapping of lcores (processor) to core id and socket (physical +id), the command below can be used:: + + $ grep "processor\|physical id\|core id\|^$" /proc/cpuinfo + +The total logical core number will be used as ``helloworld`` input parameters. + + +Test Case: run hello world on single lcores +=========================================== + +To run example in single lcore :: + + $ ./dpdk-helloworld -c 1 + hello from core 0 + +Check the output is exact the lcore 0 + + +Test Case: run hello world on every lcores +========================================== + +To run the example in all the enabled lcore :: + + $ ./dpdk-helloworld -cffffff + hello from core 1 + hello from core 2 + hello from core 3 + ... + ... + hello from core 0 + +Verify the output of according to all the core masks. From patchwork Mon Nov 14 16:54:38 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 119849 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AF3CEA00C4; Mon, 14 Nov 2022 17:56:03 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 49F8B42D59; Mon, 14 Nov 2022 17:54:57 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 1B60442D44 for ; Mon, 14 Nov 2022 17:54:54 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 6DD67243CCC; Mon, 14 Nov 2022 17:54:53 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id sZGmlLkky3bw; Mon, 14 Nov 2022 17:54:52 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id E600C1703B8; Mon, 14 Nov 2022 17:54:44 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v2 10/10] dts: add hello world testsuite Date: Mon, 14 Nov 2022 16:54:38 +0000 Message-Id: <20221114165438.1133783-11-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> <20221114165438.1133783-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The testsuite implements the testcases defined in the corresponding test plan. Signed-off-by: Juraj Linkeš --- dts/framework/remote_session/os/os_session.py | 16 +++++- dts/framework/testbed_model/__init__.py | 1 + dts/framework/testbed_model/node/sut_node.py | 11 ++++ dts/tests/TestSuite_hello_world.py | 53 +++++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 dts/tests/TestSuite_hello_world.py diff --git a/dts/framework/remote_session/os/os_session.py b/dts/framework/remote_session/os/os_session.py index f84f3ce63c..1548e3c6c8 100644 --- a/dts/framework/remote_session/os/os_session.py +++ b/dts/framework/remote_session/os/os_session.py @@ -9,7 +9,7 @@ from framework.config import CPU, Architecture, NodeConfiguration from framework.logger import DTSLOG from framework.remote_session.factory import create_remote_session -from framework.remote_session.remote_session import RemoteSession +from framework.remote_session.remote_session import CommandResult, RemoteSession from framework.settings import SETTINGS from framework.utils import EnvVarsDict @@ -49,6 +49,20 @@ def is_alive(self) -> bool: """ return self.remote_session.is_alive() + def send_command( + self, + command: str, + timeout: float, + verify: bool = False, + env: EnvVarsDict | None = None, + ) -> CommandResult: + """ + An all-purpose API in case the command to be executed is already + OS-agnostic, such as when the path to the executed command has been + constructed beforehand. + """ + return self.remote_session.send_command(command, timeout, verify, env) + @abstractmethod def guess_dpdk_remote_dir(self, remote_dir) -> PurePath: """ diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index 13c29c59c8..0a4862d7d6 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -8,4 +8,5 @@ # pylama:ignore=W0611 +from .hw import CPUAmount, CPUAmountFilter, CPUListFilter, CPUList from .node import Node, SutNode diff --git a/dts/framework/testbed_model/node/sut_node.py b/dts/framework/testbed_model/node/sut_node.py index ff3be845b4..d56f7467ba 100644 --- a/dts/framework/testbed_model/node/sut_node.py +++ b/dts/framework/testbed_model/node/sut_node.py @@ -9,6 +9,7 @@ from framework.config import CPU, BuildTargetConfiguration, CPUList, NodeConfiguration from framework.remote_session import OSSession +from framework.remote_session.remote_session import CommandResult from framework.settings import SETTINGS from framework.testbed_model.hw import CPUAmount, CPUListFilter from framework.utils import EnvVarsDict, skip_setup @@ -224,6 +225,16 @@ def create_eal_parameters( return eal_str + def run_dpdk_app( + self, app_path: PurePath, eal_args: str, timeout: float = 30 + ) -> CommandResult: + """ + Run DPDK application on the remote node. + """ + return self.main_session.send_command( + f"{app_path} {eal_args}", timeout, verify=True + ) + class _EalParameter(object): def __init__( diff --git a/dts/tests/TestSuite_hello_world.py b/dts/tests/TestSuite_hello_world.py new file mode 100644 index 0000000000..d3661bb243 --- /dev/null +++ b/dts/tests/TestSuite_hello_world.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +""" +DPDK Test suite. +Test HelloWorld example. +""" + +from framework.test_case import TestCase +from framework.testbed_model import CPUAmount, CPUAmountFilter, CPUList + + +class TestHelloWorld(TestCase): + def set_up_all(self): + """ + Run at the start of each test suite. + hello_world Prerequisites: + helloworld build pass + """ + self.app_helloworld_path = self.sut_node.build_dpdk_app("helloworld") + + def test_hello_world_single_core(self): + """ + Run hello world on single lcores + Only received hello message from core0 + """ + + # get the mask for the first core + cpu_amount = CPUAmount(1, 1, 1) + cores = CPUAmountFilter(self.sut_node.cpus, cpu_amount).filter() + eal_para = self.sut_node.create_eal_parameters(core_filter_specifier=cpu_amount) + result = self.sut_node.run_dpdk_app(self.app_helloworld_path, eal_para) + self.verify( + f"hello from core {str(cores[0]) in result.stdout}", + f"EAL not started on core{cores[0]}", + ) + + def test_hello_world_all_cores(self): + """ + Run hello world on all lcores + Received hello message from all lcores + """ + + # get the maximum logical core number + eal_para = self.sut_node.create_eal_parameters( + core_filter_specifier=CPUList(self.sut_node.cpus) + ) + result = self.sut_node.run_dpdk_app(self.app_helloworld_path, eal_para, 50) + for core in self.sut_node.cpus: + self.verify( + f"hello from core {str(core) in result.stdout}", + f"EAL not started on core{core}", + )