From patchwork Wed Aug 24 16:24:45 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115385 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7F8BFA0543; Wed, 24 Aug 2022 18:25:04 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F3139427F2; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 389964067B for ; Wed, 24 Aug 2022 18:24:59 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 1B611CD261; Wed, 24 Aug 2022 18:24:58 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id B2Ov44iDuPcn; Wed, 24 Aug 2022 18:24:55 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 2AB34CD26A; Wed, 24 Aug 2022 18:24:55 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 01/10] dts: hello world config options Date: Wed, 24 Aug 2022 16:24:45 +0000 Message-Id: <20220824162454.394285-2-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org There are two categories of new config options: DPDK build/config options and testing options. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 20 ++- dts/framework/config/__init__.py | 141 ++++++++++++++++++++- dts/framework/config/conf_yaml_schema.json | 139 +++++++++++++++++++- 3 files changed, 289 insertions(+), 11 deletions(-) diff --git a/dts/conf.yaml b/dts/conf.yaml index cb12ea3d0f..36399c6e74 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -1,7 +1,21 @@ executions: - - system_under_test: "SUT 1" + - target_descriptions: + - cpu: native + compiler: gcc + arch: x86_64 + os: linux + perf: false + func: true + test_suites: + - hello_world + system_under_test: "SUT 1" nodes: - name: "SUT 1" - hostname: "SUT IP address or hostname" + hostname: sut1.change.me.localhost + os: linux user: root - password: "Leave blank to use SSH keys" + password: a + arch: x86_64 + bypass_core0: true + cores: 1 + memory_channels: 4 diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index a0fdffcd77..158baac143 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -6,11 +6,14 @@ """ Generic port and topology nodes configuration file load function """ +import abc import json import os.path import pathlib +from abc import abstractmethod from dataclasses import dataclass -from typing import Any, Optional +from enum import Enum, auto, unique +from typing import Any, Optional, TypedDict, Union import warlock import yaml @@ -18,6 +21,53 @@ from framework.settings import SETTINGS +class StrEnum(Enum): + @staticmethod + def _generate_next_value_( + name: str, start: int, count: int, last_values: object + ) -> str: + return name + + +@unique +class OS(StrEnum): + linux = auto() + freebsd = auto() + windows = auto() + + +@unique +class Architecture(StrEnum): + i686 = auto() + x86_64 = auto() + x86_32 = auto() + arm64 = auto() + ppc64le = auto() + + +@unique +class Compiler(StrEnum): + gcc = auto() + clang = auto() + icc = auto() + msvc = auto() + + +@unique +class CPU(StrEnum): + native = auto() + armv8a = auto() + dpaa2 = auto() + thunderx = auto() + xgene1 = auto() + + +@unique +class NodeType(StrEnum): + physical = auto() + virtual = auto() + + # Slots enables some optimizations, by pre-allocating space for the defined # attributes in the underlying data structure. # @@ -28,7 +78,12 @@ class NodeConfiguration: name: str hostname: str user: str + os: OS + arch: Architecture password: Optional[str] + bypass_core0: bool + cores: str + memory_channels: int @staticmethod def from_dict(d: dict) -> "NodeConfiguration": @@ -36,20 +91,101 @@ def from_dict(d: dict) -> "NodeConfiguration": name=d["name"], hostname=d["hostname"], user=d["user"], + os=OS(d["os"]), + arch=Architecture(d["arch"]), password=d.get("password"), + bypass_core0=d.get("bypass_core0", False), + cores=d["cores"], + memory_channels=d["memory_channels"], ) +@dataclass(slots=True, frozen=True) +class TargetDescription: + cpu: CPU + compiler: Compiler + arch: Architecture + os: OS + + @staticmethod + def from_dict(d: dict) -> "TargetDescription": + return TargetDescription( + cpu=CPU(d["cpu"]), + compiler=Compiler(d["compiler"]), + arch=Architecture(d["arch"]), + os=OS(d["os"]), + ) + + def __str__(self): + return f"{self.arch}-{self.os}-{self.cpu}-{self.compiler}" + + +class TestSuiteConfigDict(TypedDict): + suite: str + cases: list[str] + + +# https://github.com/python/mypy/issues/5374 +@dataclass(slots=True, frozen=True) # type: ignore +class TestSuiteConfig(abc.ABC): + test_suite: str + + @staticmethod + def from_dict( + entry: str | TestSuiteConfigDict, + ) -> Union["AllTestCasesTestSuiteConfig", "SelectedTestCasesTestSuiteConfig"]: + if isinstance(entry, str): + return AllTestCasesTestSuiteConfig(test_suite=entry) + elif isinstance(entry, dict): + return SelectedTestCasesTestSuiteConfig( + test_suite=entry["suite"], test_cases=entry["cases"] + ) + else: + raise TypeError(f"{type(entry)} is not valid for a test suite config.") + + @abstractmethod + def get_requested_test_cases(self) -> Optional[list[str]]: + raise NotImplementedError() + + +@dataclass(slots=True, frozen=True) +class AllTestCasesTestSuiteConfig(TestSuiteConfig): + def get_requested_test_cases(self) -> Optional[list[str]]: + return None + + +@dataclass(slots=True, frozen=True) +class SelectedTestCasesTestSuiteConfig(TestSuiteConfig): + test_cases: list[str] + + def get_requested_test_cases(self) -> Optional[list[str]]: + return self.test_cases + + @dataclass(slots=True, frozen=True) class ExecutionConfiguration: + target_descriptions: list[TargetDescription] + perf: bool + func: bool + test_suites: list[TestSuiteConfig] system_under_test: NodeConfiguration @staticmethod def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": + target_descriptions: list[TargetDescription] = list( + map(TargetDescription.from_dict, d["target_descriptions"]) + ) + test_suites: list[TestSuiteConfig] = list( + map(TestSuiteConfig.from_dict, d["test_suites"]) + ) sut_name = d["system_under_test"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" return ExecutionConfiguration( + target_descriptions=target_descriptions, + perf=d["perf"], + func=d["func"], + test_suites=test_suites, system_under_test=node_map[sut_name], ) @@ -57,6 +193,7 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": @dataclass(slots=True, frozen=True) class Configuration: executions: list[ExecutionConfiguration] + nodes: list[NodeConfiguration] @staticmethod def from_dict(d: dict) -> "Configuration": @@ -74,7 +211,7 @@ def from_dict(d: dict) -> "Configuration": ) ) - return Configuration(executions=executions) + return Configuration(executions=executions, nodes=nodes) def load_config() -> Configuration: diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 04b2bec3a5..d1cc990fd5 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -6,13 +6,88 @@ "type": "string", "description": "A unique identifier for a node" }, - "node_role": { + "OS": { "type": "string", - "description": "The role a node plays in DTS", "enum": [ - "system_under_test", - "traffic_generator" + "linux" ] + }, + "ARCH": { + "type": "string", + "enum": [ + "x86_64" + ] + }, + "compiler": { + "type": "string", + "enum": [ + "gcc", + "clang", + "icc", + "mscv" + ] + }, + "cpu": { + "type": "string", + "description": "Native should be the default on x86", + "enum": [ + "native", + "armv8a", + "dpaa2", + "thunderx", + "xgene1" + ] + }, + "target": { + "type": "object", + "description": "Targets supported by DTS", + "properties": { + "arch": { + "type": "string", + "enum": [ + "ALL", + "x86_64", + "arm64", + "ppc64le", + "other" + ] + }, + "cpu": { + "$ref": "#/definitions/cpu" + }, + "os": { + "$ref": "#/definitions/OS" + }, + "compiler": { + "$ref": "#/definitions/compiler" + } + }, + "additionalProperties": false + }, + "test_suite": { + "type": "string", + "enum": [ + "hello_world" + ] + }, + "test_target": { + "type": "object", + "properties": { + "suite": { + "$ref": "#/definitions/test_suite" + }, + "cases": { + "type": "array", + "items": { + "type": "string" + }, + "minimum": 1 + } + }, + "required": [ + "suite" + ], + "additionalProperties": false } }, "type": "object", @@ -34,16 +109,36 @@ "type": "string", "description": "The user to access this node with." }, + "os": { + "$ref": "#/definitions/OS" + }, + "arch": { + "$ref": "#/definitions/ARCH" + }, "password": { "type": "string", "description": "The password to use on this node. SSH keys are preferred." + }, + "bypass_core0": { + "type": "boolean", + "description": "Indicate whether DPDK should omit using the first core or not." + }, + "cores": { + "type": "string", + "description": "Comma-separated list of cores to use, e.g.: 1,2,3,4,5,18-22" + }, + "memory_channels": { + "type": "integer", + "description": "How many memory channels to use." } }, "additionalProperties": false, "required": [ "name", + "os", + "user", "hostname", - "user" + "arch" ] }, "minimum": 1 @@ -55,11 +150,43 @@ "properties": { "system_under_test": { "$ref": "#/definitions/node_name" + }, + "target_descriptions": { + "type": "array", + "items": { + "$ref": "#/definitions/target" + }, + "minimum": 1 + }, + "test_suites": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/test_suite" + }, + { + "$ref": "#/definitions/test_target" + } + ] + } + }, + "perf": { + "type": "boolean", + "description": "Enable performance testing" + }, + "func": { + "type": "boolean", + "description": "Enable functional testing" } }, "additionalProperties": false, "required": [ - "system_under_test" + "system_under_test", + "target_descriptions", + "perf", + "func", + "test_suites" ] }, "minimum": 1 From patchwork Wed Aug 24 16:24:46 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115386 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A6C08A0543; Wed, 24 Aug 2022 18:25:10 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C0ED84280C; Wed, 24 Aug 2022 18:25:03 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 4695E41144 for ; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 1DFCDCD26A; Wed, 24 Aug 2022 18:24:59 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 2qi_Xs7zdonV; Wed, 24 Aug 2022 18:24:58 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id AD333CD26D; Wed, 24 Aug 2022 18:24:55 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 02/10] dts: hello world cli parameters and env vars Date: Wed, 24 Aug 2022 16:24:46 +0000 Message-Id: <20220824162454.394285-3-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add command line arguments (and the corresponding env variables) that specify DPDK build and test execution workflow. Also split the configuration into two parts, one that can be changed during runtime and one that can't. We will need to change the git refspec to a DPDK tarball path when support is added for building DPDK from the repo. Signed-off-by: Juraj Linkeš --- dts/framework/logger.py | 9 ++-- dts/framework/settings.py | 96 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 96 insertions(+), 9 deletions(-) diff --git a/dts/framework/logger.py b/dts/framework/logger.py index 920ce0fb15..15cae3e4f9 100644 --- a/dts/framework/logger.py +++ b/dts/framework/logger.py @@ -8,6 +8,8 @@ import os.path from typing import TypedDict +from .settings import SETTINGS + """ DTS logger module with several log level. DTS framework and TestSuite log will saved into different log files. @@ -66,10 +68,9 @@ def __init__(self, logger: logging.Logger, node: str = "suite"): self.logger.addHandler(sh) self.sh = sh - if not os.path.exists("output"): - os.mkdir("output") + logging_file_prefix = os.path.join(SETTINGS.output_dir, node) - fh = logging.FileHandler(f"output/{node}.log") + fh = logging.FileHandler(f"{logging_file_prefix}.log") fh.setFormatter( logging.Formatter( fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s", @@ -83,7 +84,7 @@ def __init__(self, logger: logging.Logger, node: str = "suite"): # This outputs EVERYTHING, intended for post-mortem debugging # Also optimized for processing via AWK (awk -F '|' ...) - verbose_handler = logging.FileHandler(f"output/{node}.verbose.log") + verbose_handler = logging.FileHandler(f"{logging_file_prefix}.verbose.log") verbose_handler.setFormatter( logging.Formatter( fmt="%(asctime)s|%(name)s|%(levelname)s|%(pathname)s|%(lineno)d|%(funcName)s|" diff --git a/dts/framework/settings.py b/dts/framework/settings.py index c9621d4e3d..1ff3af4438 100644 --- a/dts/framework/settings.py +++ b/dts/framework/settings.py @@ -7,6 +7,7 @@ import argparse import os from dataclasses import dataclass +from enum import Enum, unique from typing import Any @@ -38,10 +39,40 @@ def wrapper(**kwargs) -> _EnvironmentArgument: @dataclass(slots=True, frozen=True) -class _Settings: +class _EnvSettings: config_file_path: str + compile_timeout: int timeout: float verbose: bool + output_dir: str + skip_setup: bool + test_cases: list + re_run: int + remote_dpdk_dir: str + + +@dataclass(slots=True) +class _RuntimeSettings: + dpdk_ref: str + + +class _Settings(_EnvSettings, _RuntimeSettings): + pass + + +@unique +class DTSRuntimeErrors(Enum): + NO_ERR = 0 + GENERIC_ERR = 1 + DPDK_BUILD_ERR = (2,) + SUT_SETUP_ERR = (3,) + TG_SETUP_ERR = (4,) + SUITE_SETUP_ERR = (5,) + SUITE_EXECUTE_ERR = 6 + + +# TODO singleton +DTSRuntimeError: DTSRuntimeErrors = DTSRuntimeErrors.NO_ERR def _get_parser() -> argparse.ArgumentParser: @@ -63,6 +94,14 @@ def _get_parser() -> argparse.ArgumentParser: help="[DTS_TIMEOUT] The default timeout for all DTS operations except for compiling DPDK.", ) + parser.add_argument( + "--compile-timeout", + action=_env_arg("DTS_COMPILE_TIMEOUT"), + default=1200, + required=False, + help="[DTS_COMPILE_TIMEOUT] The timeout for compiling DPDK.", + ) + parser.add_argument( "-v", "--verbose", @@ -72,15 +111,62 @@ def _get_parser() -> argparse.ArgumentParser: help="[DTS_VERBOSE] Set to 'Y' to enable verbose output, logging all messages to the console.", ) + parser.add_argument( + "--dpdk-ref", + "--git", + "--snapshot", + action=_env_arg("DTS_DPDK_REF"), + default="dep/dpdk.tar.gz", + help="[DTS_DPDK_REF] Reference to DPDK source code, " + "can be either a path to a tarball or a git refspec. " + "In case of a tarball, it will be extracted in the same directory.", + ) + + parser.add_argument( + "--output-dir", + "--output", + action=_env_arg("DTS_OUTPUT_DIR"), + default="output", + help="[DTS_OUTPUT_DIR] Output directory where dts logs and results are saved.", + ) + + parser.add_argument( + "-s", + "--skip-setup", + action=_env_arg("DTS_SKIP_SETUP"), + help="[DTS_SKIP_SETUP] Set to 'Y' to skip all setup steps on SUT and TG nodes.", + ) + + parser.add_argument( + "--test-cases", + action=_env_arg("DTS_TESTCASES"), + help="[DTS_TESTCASES] Comma-separated list of testcases to execute", + ) + + parser.add_argument( + "--re-run", + "--re_run", + action=_env_arg("DTS_RERUN"), + default=0, + help="[DTS_RERUN] Re-run tests the specified amount of times if a test failure occurs", + ) + return parser def _get_settings() -> _Settings: - args = _get_parser().parse_args() + parsed_args = _get_parser().parse_args() return _Settings( - config_file_path=args.config_file, - timeout=float(args.timeout), - verbose=(args.verbose == "Y"), + config_file_path=parsed_args.config_file, + compile_timeout=parsed_args.compile_timeout, + timeout=parsed_args.timeout, + verbose=(parsed_args.verbose == "Y"), + output_dir=parsed_args.output_dir, + skip_setup=(parsed_args.skip_setup == "Y"), + test_cases=parsed_args.test_cases.split(","), + re_run=int(parsed_args.re_run), + remote_dpdk_dir="/tmp/", + dpdk_ref=parsed_args.dpdk_ref, ) From patchwork Wed Aug 24 16:24:47 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115387 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 63DE6A0543; Wed, 24 Aug 2022 18:25:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A9C184281C; Wed, 24 Aug 2022 18:25:04 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id A98474280C for ; Wed, 24 Aug 2022 18:25:01 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 30500CD273; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id rM5oXeHUylfV; Wed, 24 Aug 2022 18:24:59 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 28774CD26F; Wed, 24 Aug 2022 18:24:56 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 03/10] dts: ssh connection additions for hello world Date: Wed, 24 Aug 2022 16:24:47 +0000 Message-Id: <20220824162454.394285-4-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org SCP is needed to transfer DPDK tarballs between nodes. Also add keepalive method that testcases use. Signed-off-by: Juraj Linkeš --- dts/framework/ssh_connection.py | 19 ++++++++++ dts/framework/ssh_pexpect.py | 61 +++++++++++++++++++++++++++++++-- 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/dts/framework/ssh_connection.py b/dts/framework/ssh_connection.py index bbf7c8ef01..ec7333e565 100644 --- a/dts/framework/ssh_connection.py +++ b/dts/framework/ssh_connection.py @@ -8,6 +8,7 @@ from typing import Any, Optional from .logger import DTSLOG +from .settings import SETTINGS from .ssh_pexpect import SSHPexpect @@ -68,3 +69,21 @@ def close(self, force: bool = False) -> None: self.logger.logger_exit() self.session.close(force) + + def check_available(self) -> bool: + MAGIC_STR = "DTS_CHECK_SESSION" + out = self.session.send_command("echo %s" % MAGIC_STR, timeout=0.1) + # if not available, try to send ^C and check again + if MAGIC_STR not in out: + self.logger.info("Try to recover session...") + self.session.send_command("^C", timeout=SETTINGS.timeout) + out = self.session.send_command("echo %s" % MAGIC_STR, timeout=0.1) + if MAGIC_STR not in out: + return False + + return True + + def copy_file_to( + self, src: str, dst: str = "~/", password: str = "", node_session: Any = None + ) -> None: + self.session.copy_file_to(src, dst, password, node_session) diff --git a/dts/framework/ssh_pexpect.py b/dts/framework/ssh_pexpect.py index e8f64515c0..b8eb10025e 100644 --- a/dts/framework/ssh_pexpect.py +++ b/dts/framework/ssh_pexpect.py @@ -5,11 +5,16 @@ # import time -from typing import Optional +from typing import Any, Optional +import pexpect from pexpect import pxssh -from .exception import SSHConnectionException, SSHSessionDeadException, TimeoutException +from .exception import ( + SSHConnectionException, + SSHSessionDeadException, + TimeoutException, +) from .logger import DTSLOG from .utils import GREEN, RED @@ -203,3 +208,55 @@ def close(self, force: bool = False) -> None: def isalive(self) -> bool: return self.session.isalive() + + def copy_file_to( + self, src: str, dst: str = "~/", password: str = "", node_session: Any = None + ) -> None: + """ + Sends a local file to a remote place. + """ + command: str + if ":" in self.node: + command = "scp -v -P {0} -o NoHostAuthenticationForLocalhost=yes {1} {2}@{3}:{4}".format( + str(self.port), src, self.username, self.ip, dst + ) + else: + command = "scp -v {0} {1}@{2}:{3}".format( + src, self.username, self.node, dst + ) + if password == "": + self._spawn_scp(command, self.password, node_session) + else: + self._spawn_scp(command, password, node_session) + + def _spawn_scp(self, scp_cmd: str, password: str, node_session: Any) -> None: + """ + Transfer a file with SCP + """ + self.logger.info(scp_cmd) + # if node_session is not None, copy file from/to node env + # if node_session is None, copy file from/to current dts env + p: pexpect.spawn + if node_session is not None: + node_session.session.clean_session() + node_session.session.__sendline(scp_cmd) + p = node_session.session.session + else: + p = pexpect.spawn(scp_cmd) + time.sleep(0.5) + ssh_newkey: str = "Are you sure you want to continue connecting" + i: int = p.expect( + [ssh_newkey, "[pP]assword", "# ", pexpect.EOF, pexpect.TIMEOUT], 120 + ) + if i == 0: # add once in trust list + p.sendline("yes") + i = p.expect([ssh_newkey, "[pP]assword", pexpect.EOF], 2) + + if i == 1: + time.sleep(0.5) + p.sendline(password) + p.expect("Exit status 0", 60) + if i == 4: + self.logger.error("SCP TIMEOUT error %d" % i) + if node_session is None: + p.close() From patchwork Wed Aug 24 16:24:48 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115388 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E198EA0543; Wed, 24 Aug 2022 18:25:23 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 134BB4282B; Wed, 24 Aug 2022 18:25:06 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 5A3844280C for ; Wed, 24 Aug 2022 18:25:02 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 7553ACD278; Wed, 24 Aug 2022 18:25:01 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id EY9xfvrB5A99; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 99D91CD270; Wed, 24 Aug 2022 18:24:56 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 04/10] dts: add basic node management methods Date: Wed, 24 Aug 2022 16:24:48 +0000 Message-Id: <20220824162454.394285-5-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The nodes DTS is working with are either a system under test node (where DPDK runs) and a traffic generator node. The added methods are common to both system under test nodes and traffic generator nodes. Signed-off-by: Juraj Linkeš --- dts/framework/node.py | 395 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 387 insertions(+), 8 deletions(-) diff --git a/dts/framework/node.py b/dts/framework/node.py index e5c5454ebe..c08c79cca3 100644 --- a/dts/framework/node.py +++ b/dts/framework/node.py @@ -4,9 +4,13 @@ # Copyright(c) 2022 University of New Hampshire # +import dataclasses +import re +from abc import ABC from typing import Optional -from .config import NodeConfiguration +from framework.config import OS, NodeConfiguration + from .logger import DTSLOG, getLogger from .settings import SETTINGS from .ssh_connection import SSHConnection @@ -16,22 +20,41 @@ """ -class Node(object): +@dataclasses.dataclass(slots=True, frozen=True) +class CPUCore: + thread: str + socket: str + core: int + + +class Node(ABC): """ Basic module for node management. This module implements methods that manage a node, such as information gathering (of CPU/PCI/NIC) and environment setup. """ - _config: NodeConfiguration + name: str + skip_setup: bool + sessions: list[SSHConnection] + default_hugepages_cleared: bool + prefix_list: list[str] + cores: list[CPUCore] + number_of_cores: int logger: DTSLOG main_session: SSHConnection - name: str + _config: NodeConfiguration _other_sessions: list[SSHConnection] def __init__(self, node_config: NodeConfiguration): self._config = node_config self.name = node_config.name + self.skip_setup = SETTINGS.skip_setup + self.default_hugepages_cleared = False + self.prefix_list = [] + self.cores = [] + self.number_of_cores = 0 + self._dpdk_dir = None self.logger = getLogger(self.name) self.logger.info(f"Created node: {self.name}") @@ -42,22 +65,23 @@ def __init__(self, node_config: NodeConfiguration): self.get_username(), self.get_password(), ) + self._other_sessions = [] def get_ip_address(self) -> str: """ - Get SUT's ip address. + Get Node's ip address. """ return self._config.hostname def get_password(self) -> Optional[str]: """ - Get SUT's login password. + Get Node's login password. """ return self._config.password def get_username(self) -> str: """ - Get SUT's login username. + Get Node's login username. """ return self._config.user @@ -66,6 +90,7 @@ def send_expect( command: str, expected: str, timeout: float = SETTINGS.timeout, + alt_session: bool = False, verify: bool = False, trim_whitespace: bool = True, ) -> str | int: @@ -81,19 +106,373 @@ def send_expect( if trim_whitespace: expected = expected.strip() + if alt_session and len(self._other_sessions): + return self._other_sessions[0].send_expect( + command, expected, timeout, verify + ) + return self.main_session.send_expect(command, expected, timeout, verify) - def send_command(self, cmds: str, timeout: float = SETTINGS.timeout) -> str: + def send_command( + self, cmds: str, timeout: float = SETTINGS.timeout, alt_session: bool = False + ) -> str: """ Send commands to node and return string before timeout. """ + if alt_session and len(self._other_sessions): + return self._other_sessions[0].send_command(cmds, timeout) + return self.main_session.send_command(cmds, timeout) + def get_session_output(self, timeout: float = SETTINGS.timeout): + """ + Get session output message before timeout + """ + return self.main_session.get_session_before(timeout) + + def get_total_huge_pages(self): + """ + Get the huge page number of Node. + """ + huge_pages = self.send_expect( + "awk '/HugePages_Total/ { print $2 }' /proc/meminfo", "# ", alt_session=True + ) + if huge_pages != "": + return int(huge_pages.split()[0]) + return 0 + + def mount_huge_pages(self): + """ + Mount hugepage file system on Node. + """ + self.send_expect("umount `awk '/hugetlbfs/ { print $2 }' /proc/mounts`", "# ") + out = self.send_expect("awk '/hugetlbfs/ { print $2 }' /proc/mounts", "# ") + # only mount hugepage when no hugetlbfs mounted + if not len(out): + self.send_expect("mkdir -p /mnt/huge", "# ") + self.send_expect("mount -t hugetlbfs nodev /mnt/huge", "# ") + + def strip_hugepage_path(self): + mounts = self.send_expect("cat /proc/mounts |grep hugetlbfs", "# ") + infos = mounts.split() + if len(infos) >= 2: + return infos[1] + else: + return "" + + def set_huge_pages(self, huge_pages, numa=""): + """ + Set numbers of huge pages + """ + page_size = self.send_expect( + "awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# " + ) + + if not numa: + self.send_expect( + "echo %d > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages" + % (huge_pages, page_size), + "# ", + 5, + ) + else: + # sometimes we set hugepage on kernel cmdline, so we clear it + if not self.default_hugepages_cleared: + self.send_expect( + "echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages" + % (page_size), + "# ", + 5, + ) + self.default_hugepages_cleared = True + + # some platform not support numa, example VM SUT + try: + self.send_expect( + "echo %d > /sys/devices/system/node/%s/hugepages/hugepages-%skB/nr_hugepages" + % (huge_pages, numa, page_size), + "# ", + 5, + ) + except: + self.logger.warning("set %d hugepage on %s error" % (huge_pages, numa)) + self.send_expect( + "echo %d > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages" + % (huge_pages.page_size), + "# ", + 5, + ) + + def get_dpdk_pids(self, prefix_list, alt_session): + """ + get all dpdk applications on Node. + """ + file_directories = [ + "/var/run/dpdk/%s/config" % file_prefix for file_prefix in prefix_list + ] + pids = [] + pid_reg = r"p(\d+)" + for config_file in file_directories: + # Covers case where the process is run as a unprivileged user and does not generate the file + isfile = self.send_expect( + "ls -l {}".format(config_file), "# ", 20, alt_session + ) + if isfile: + cmd = "lsof -Fp %s" % config_file + out = self.send_expect(cmd, "# ", 20, alt_session) + if len(out): + lines = out.split("\r\n") + for line in lines: + m = re.match(pid_reg, line) + if m: + pids.append(m.group(1)) + for pid in pids: + self.send_expect("kill -9 %s" % pid, "# ", 20, alt_session) + self.get_session_output(timeout=2) + + hugepage_info = [ + "/var/run/dpdk/%s/hugepage_info" % file_prefix + for file_prefix in prefix_list + ] + for hugepage in hugepage_info: + # Covers case where the process is run as a unprivileged user and does not generate the file + isfile = self.send_expect( + "ls -l {}".format(hugepage), "# ", 20, alt_session + ) + if isfile: + cmd = "lsof -Fp %s" % hugepage + out = self.send_expect(cmd, "# ", 20, alt_session) + if len(out) and "No such file or directory" not in out: + self.logger.warning("There are some dpdk process not free hugepage") + self.logger.warning("**************************************") + self.logger.warning(out) + self.logger.warning("**************************************") + + # remove directory + directorys = ["/var/run/dpdk/%s" % file_prefix for file_prefix in prefix_list] + for directory in directorys: + cmd = "rm -rf %s" % directory + self.send_expect(cmd, "# ", 20, alt_session) + + # delete hugepage on mnt path + if getattr(self, "hugepage_path", None): + for file_prefix in prefix_list: + cmd = "rm -rf %s/%s*" % (self.hugepage_path, file_prefix) + self.send_expect(cmd, "# ", 20, alt_session) + + def kill_all(self, alt_session=True): + """ + Kill all dpdk applications on Node. + """ + if "Traffic" in str(self): + self.logger.info("kill_all: called by tg") + pass + else: + if self.prefix_list: + self.logger.info("kill_all: called by SUT and prefix list has value.") + self.get_dpdk_pids(self.prefix_list, alt_session) + # init prefix_list + self.prefix_list = [] + else: + self.logger.info("kill_all: called by SUT and has no prefix list.") + out = self.send_command( + "ls -l /var/run/dpdk |awk '/^d/ {print $NF}'", + timeout=0.5, + alt_session=True, + ) + # the last directory is expect string, eg: [PEXPECT]# + if out != "": + dir_list = out.split("\r\n") + self.get_dpdk_pids(dir_list[:-1], alt_session) + + def get_os(self) -> OS: + return self._config.os + + def init_core_list(self): + """ + Load or create core information of Node. + """ + if not self.cores or not self.number_of_cores: + self.init_core_list_uncached() + + def init_core_list_uncached(self): + """ + Scan cores on Node and create core information list. + """ + init_core_list_uncached = getattr( + self, "init_core_list_uncached_%s" % self.get_os() + ) + init_core_list_uncached() + + def init_core_list_uncached_linux(self): + """ + Scan cores in linux and create core information list. + """ + self.cores = [] + + cpuinfo = self.send_expect( + "lscpu -p=CPU,CORE,SOCKET,NODE|grep -v \#", "#", alt_session=True + ) + + cpuinfo = [i for i in cpuinfo.split() if re.match("^\d.+", i)] + # haswell cpu on cottonwood core id not correct + # need additional coremap for haswell cpu + core_id = 0 + coremap = {} + for line in cpuinfo: + (thread, core, socket, node) = line.split(",")[0:4] + + if core not in list(coremap.keys()): + coremap[core] = core_id + core_id += 1 + + if self._config.bypass_core0 and core == "0" and socket == "0": + self.logger.info("Core0 bypassed") + continue + if self._config.arch == "arm64" or self._config.arch == "ppc64": + self.cores.append( + CPUCore(thread=thread, socket=node, core=coremap[core]) + ) + else: + self.cores.append( + CPUCore(thread=thread, socket=socket, core=coremap[core]) + ) + + self.number_of_cores = len(self.cores) + + def get_core_list(self, config, socket=-1, from_last=False): + """ + Get lcore array according to the core config like "all", "1S/1C/1T". + We can specify the physical CPU socket by the "socket" parameter. + """ + if config == "all": + cores = [] + if socket != -1: + for core in self.cores: + if int(core.socket) == socket: + cores.append(core.thread) + else: + cores = [core.thread for core in self.cores] + return cores + + m = re.match("([1234])S/([0-9]+)C/([12])T", config) + + if m: + nr_sockets = int(m.group(1)) + nr_cores = int(m.group(2)) + nr_threads = int(m.group(3)) + + partial_cores = self.cores + + # If not specify socket sockList will be [0,1] in numa system + # If specify socket will just use the socket + if socket < 0: + sockList = set([int(core.socket) for core in partial_cores]) + else: + for n in partial_cores: + if int(n.socket) == socket: + sockList = [int(n.socket)] + + if from_last: + sockList = list(sockList)[-nr_sockets:] + else: + sockList = list(sockList)[:nr_sockets] + partial_cores = [n for n in partial_cores if int(n.socket) in sockList] + thread_list = set([int(n.thread) for n in partial_cores]) + thread_list = list(thread_list) + + # filter usable core to core_list + temp = [] + for sock in sockList: + core_list = set( + [int(n.core) for n in partial_cores if int(n.socket) == sock] + ) + if from_last: + core_list = list(core_list)[-nr_cores:] + else: + core_list = list(core_list)[:nr_cores] + temp.extend(core_list) + + core_list = temp + + # if system core less than request just use all cores in in socket + if len(core_list) < (nr_cores * nr_sockets): + partial_cores = self.cores + sockList = set([int(n.socket) for n in partial_cores]) + + if from_last: + sockList = list(sockList)[-nr_sockets:] + else: + sockList = list(sockList)[:nr_sockets] + partial_cores = [n for n in partial_cores if int(n.socket) in sockList] + + temp = [] + for sock in sockList: + core_list = list( + [int(n.thread) for n in partial_cores if int(n.socket) == sock] + ) + if from_last: + core_list = core_list[-nr_cores:] + else: + core_list = core_list[:nr_cores] + temp.extend(core_list) + + core_list = temp + + partial_cores = [n for n in partial_cores if int(n.core) in core_list] + temp = [] + if len(core_list) < nr_cores: + raise ValueError( + "Cannot get requested core configuration " + "requested {} have {}".format(config, self.cores) + ) + if len(sockList) < nr_sockets: + raise ValueError( + "Cannot get requested core configuration " + "requested {} have {}".format(config, self.cores) + ) + # recheck the core_list and create the thread_list + i = 0 + for sock in sockList: + coreList_aux = [ + int(core_list[n]) + for n in range((nr_cores * i), (nr_cores * i + nr_cores)) + ] + for core in coreList_aux: + thread_list = list( + [ + int(n.thread) + for n in partial_cores + if ((int(n.core) == core) and (int(n.socket) == sock)) + ] + ) + if from_last: + thread_list = thread_list[-nr_threads:] + else: + thread_list = thread_list[:nr_threads] + temp.extend(thread_list) + thread_list = temp + i += 1 + return list(map(str, thread_list)) + + def create_session(self, name: str) -> SSHConnection: + connection = SSHConnection( + self.get_ip_address(), + name, + getLogger(name, node=self.name), + self.get_username(), + self.get_password(), + ) + self._other_sessions.append(connection) + return connection + def node_exit(self) -> None: """ Recover all resource before node exit """ if self.main_session: self.main_session.close() + for session in self._other_sessions: + session.close() self.logger.logger_exit() From patchwork Wed Aug 24 16:24:49 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115390 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BC2CEA0543; Wed, 24 Aug 2022 18:25:38 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3C70E42B76; Wed, 24 Aug 2022 18:25:08 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 47C4B4281C for ; Wed, 24 Aug 2022 18:25:04 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 27C05CD270; Wed, 24 Aug 2022 18:25:03 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id x3jAPhKq4faP; Wed, 24 Aug 2022 18:25:01 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 33F32CD272; Wed, 24 Aug 2022 18:24:57 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 05/10] dts: add system under test node Date: Wed, 24 Aug 2022 16:24:49 +0000 Message-Id: <20220824162454.394285-6-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The SUT node contains methods to configure the node and build and configure DPDK. Signed-off-by: Juraj Linkeš --- dts/framework/sut_node.py | 603 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 603 insertions(+) create mode 100644 dts/framework/sut_node.py diff --git a/dts/framework/sut_node.py b/dts/framework/sut_node.py new file mode 100644 index 0000000000..c9f5e69d73 --- /dev/null +++ b/dts/framework/sut_node.py @@ -0,0 +1,603 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +import os +import re +import tarfile +import time +from typing import List, Optional, Union + +from framework.config import NodeConfiguration + +from .exception import ParameterInvalidException +from .node import Node +from .settings import SETTINGS + + +class SutNode(Node): + """ + A class for managing connections to the System under test, providing + methods that retrieve the necessary information about the node (such as + cpu, memory and NIC details) and configuration capabilities. + """ + + def __init__(self, node_config: NodeConfiguration): + super(SutNode, self).__init__(node_config) + self.tg_node = None + self.architecture = node_config.arch + self.prefix_subfix = ( + str(os.getpid()) + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + ) + self.hugepage_path = None + self.dpdk_version = "" + self.testpmd = None + + def prerequisites(self): + """ + Copy DPDK package to SUT and apply patch files. + """ + self.prepare_package() + self.sut_prerequisites() + + def prepare_package(self): + if not self.skip_setup: + assert os.path.isfile(SETTINGS.dpdk_ref) is True, "Invalid package" + + out = self.send_expect( + "ls -d %s" % SETTINGS.remote_dpdk_dir, "# ", verify=True + ) + if out == 2: + self.send_expect("mkdir -p %s" % SETTINGS.remote_dpdk_dir, "# ") + + out = self.send_expect( + "ls %s && cd %s" % (SETTINGS.remote_dpdk_dir, SETTINGS.remote_dpdk_dir), + "#", + verify=True, + ) + if out == -1: + raise IOError( + f"A failure occurred when creating {SETTINGS.remote_dpdk_dir} on " + f"{self}." + ) + self.main_session.copy_file_to(SETTINGS.dpdk_ref, SETTINGS.remote_dpdk_dir) + self.kill_all() + + # enable core dump + self.send_expect("ulimit -c unlimited", "#") + + with tarfile.open(SETTINGS.dpdk_ref) as dpdk_tar: + dpdk_top_dir = dpdk_tar.getnames()[0] + + remote_dpdk_top_dir = os.path.join(SETTINGS.remote_dpdk_dir, dpdk_top_dir) + + # unpack the code and change to the working folder + self.send_expect("rm -rf %s" % remote_dpdk_top_dir, "#") + + remote_dpdk_path = os.path.join( + SETTINGS.remote_dpdk_dir, os.path.basename(SETTINGS.dpdk_ref) + ) + + # unpack dpdk + out = self.send_expect( + f"tar xfm {remote_dpdk_path} -C {SETTINGS.remote_dpdk_dir}", + "# ", + 60, + verify=True, + ) + if out == -1: + raise IOError( + f"Extracting remote DPDK package {remote_dpdk_path} to " + f"{SETTINGS.remote_dpdk_dir} failed." + ) + + # check dpdk dir name is expect + out = self.send_expect("ls %s" % remote_dpdk_top_dir, "# ", 20, verify=True) + if out == -1: + raise FileNotFoundError( + f"Remote DPDK dir {remote_dpdk_top_dir} not found." + ) + + def set_target(self, target): + """ + Set env variable, these have to be setup all the time. Some tests + need to compile example apps by themselves and will fail otherwise. + Set hugepage on SUT and install modules required by DPDK. + Configure default ixgbe PMD function. + """ + self.target = target + + self.set_toolchain(target) + + # set env variable + self.set_env_variable() + + if not self.skip_setup: + self.build_install_dpdk(target) + + self.setup_memory() + + def set_env_variable(self): + # These have to be setup all the time. Some tests need to compile + # example apps by themselves and will fail otherwise. + self.send_expect("export RTE_TARGET=" + self.target, "#") + self.send_expect("export RTE_SDK=`pwd`", "#") + + def build_install_dpdk(self, target): + """ + Build DPDK source code with specified target. + """ + if self.get_os() == "linux": + self.build_install_dpdk_linux_meson(target) + + def build_install_dpdk_linux_meson(self, target): + """ + Build DPDK source code on linux use meson + """ + build_time = 1800 + target_info = target.split("-") + arch = target_info[0] + toolchain = target_info[3] + + default_library = "static" + if arch == "i686": + # find the pkg-config path and set the PKG_CONFIG_LIBDIR environmental variable to point it + out = self.send_expect("find /usr -type d -name pkgconfig", "# ") + pkg_path = "" + res_path = out.split("\r\n") + for cur_path in res_path: + if "i386" in cur_path: + pkg_path = cur_path + break + assert ( + pkg_path != "" + ), "please make sure you env have the i386 pkg-config path" + + self.send_expect("export CFLAGS=-m32", "# ") + self.send_expect("export PKG_CONFIG_LIBDIR=%s" % pkg_path, "# ") + + self.send_expect("rm -rf " + target, "#") + out = self.send_expect( + "CC=%s meson -Denable_kmods=True -Dlibdir=lib --default-library=%s %s" + % (toolchain, default_library, target), + "[~|~\]]# ", + build_time, + ) + assert "FAILED" not in out, "meson setup failed ..." + + out = self.send_expect("ninja -C %s" % target, "[~|~\]]# ", build_time) + assert "FAILED" not in out, "ninja complie failed ..." + + # copy kmod file to the folder same as make + out = self.send_expect( + "find ./%s/kernel/ -name *.ko" % target, "# ", verify=True + ) + self.send_expect("mkdir -p %s/kmod" % target, "# ") + if not isinstance(out, int) and len(out) > 0: + kmod = out.split("\r\n") + for mod in kmod: + self.send_expect("cp %s %s/kmod/" % (mod, target), "# ") + + def build_dpdk_apps(self, folder): + """ + Build dpdk sample applications. + """ + if self.get_os() == "linux": + return self.build_dpdk_apps_linux_meson(folder) + + def build_dpdk_apps_linux_meson(self, folder): + """ + Build dpdk sample applications on linux use meson + """ + # icc compile need more time + if "icc" in self.target: + timeout = 300 + else: + timeout = 90 + + target_info = self.target.split("-") + arch = target_info[0] + if arch == "i686": + # find the pkg-config path and set the PKG_CONFIG_LIBDIR environmental variable to point it + out = self.send_expect("find /usr -type d -name pkgconfig", "# ") + pkg_path = "" + res_path = out.split("\r\n") + for cur_path in res_path: + if "i386" in cur_path: + pkg_path = cur_path + break + assert ( + pkg_path != "" + ), "please make sure you env have the i386 pkg-config path" + + self.send_expect("export CFLAGS=-m32", "# ", alt_session=True) + self.send_expect( + "export PKG_CONFIG_LIBDIR=%s" % pkg_path, "# ", alt_session=True + ) + + folder_info = folder.split("/") + name = folder_info[-1] + + if name == "examples": + example = "all" + else: + example = "/".join(folder_info[folder_info.index("examples") + 1 :]) + out = self.send_expect( + "meson configure -Dexamples=%s %s" % (example, self.target), "# " + ) + assert "FAILED" not in out, "Compilation error..." + out = self.send_expect("ninja -C %s" % self.target, "[~|~\]]# ", timeout) + assert "FAILED" not in out, "Compilation error..." + + # verify the app build in the config path + if example != "all": + out = self.send_expect("ls %s" % self.apps_name[name], "# ", verify=True) + assert isinstance(out, str), ( + "please confirm %s app path and name in app_name.cfg" % name + ) + + return out + + def filter_cores_from_node_cfg(self) -> None: + # get core list from conf.yaml + core_list = [] + all_core_list = [str(core.core) for core in self.cores] + core_list_str = self._config.cores + if core_list_str == "": + core_list = all_core_list + split_by_comma = core_list_str.split(",") + range_cores = [] + for item in split_by_comma: + if "-" in item: + tmp = item.split("-") + range_cores.extend( + [str(i) for i in range(int(tmp[0]), int(tmp[1]) + 1)] + ) + else: + core_list.append(item) + core_list.extend(range_cores) + + abnormal_core_list = [] + for core in core_list: + if core not in all_core_list: + abnormal_core_list.append(core) + + if abnormal_core_list: + self.logger.info( + "those %s cores are out of range system, all core list of system are %s" + % (abnormal_core_list, all_core_list) + ) + raise Exception("configured cores out of range system") + + core_list = [core for core in self.cores if str(core.core) in core_list] + self.cores = core_list + self.number_of_cores = len(self.cores) + + def create_eal_parameters( + self, + fixed_prefix: bool = False, + socket: Optional[int] = None, + cores: Union[str, List[int], List[str]] = "default", + prefix: str = "", + no_pci: bool = False, + vdevs: List[str] = None, + other_eal_param: str = "", + ) -> str: + """ + generate eal parameters character string; + :param fixed_prefix: use fixed file-prefix or not, when it is true, + the file-prefix will not be added a timestamp + :param socket: the physical CPU socket index, -1 means no care cpu socket; + :param cores: set the core info, eg: + cores=[0,1,2,3], + cores=['0', '1', '2', '3'], + cores='default', + cores='1S/4C/1T', + cores='all'; + :param prefix: set file prefix string, eg: + prefix='vf'; + :param no_pci: switch of disable PCI bus eg: + no_pci=True; + :param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1']; + :param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments'; + :return: eal param string, eg: + '-c 0xf -a 0000:88:00.0 --file-prefix=dpdk_1112_20190809143420'; + if DPDK version < 20.11-rc4, eal_str eg: + '-c 0xf -w 0000:88:00.0 --file-prefix=dpdk_1112_20190809143420'; + """ + if vdevs is None: + vdevs = [] + + if socket is None: + socket = -1 + + config = { + "cores": cores, + "prefix": prefix, + "no_pci": no_pci, + "vdevs": vdevs, + "other_eal_param": other_eal_param, + } + + eal_parameter_creator = _EalParameter( + sut_node=self, fixed_prefix=fixed_prefix, socket=socket, **config + ) + eal_str = eal_parameter_creator.make_eal_param() + + return eal_str + + def set_toolchain(self, target): + """ + This looks at the current target and instantiates an attribute to + be either a NodeLinuxApp or NodeBareMetal object. These latter two + classes are private and should not be used directly by client code. + """ + self.kill_all() + self.target = target + [arch, _, _, toolchain] = target.split("-") + + if toolchain == "icc": + icc_vars = os.getenv("ICC_VARS", "/opt/intel/composer_xe_2013/bin/") + icc_vars += "compilervars.sh" + + if arch == "x86_64": + icc_arch = "intel64" + elif arch == "i686": + icc_arch = "ia32" + self.send_expect("source " + icc_vars + " " + icc_arch, "# ") + + self.architecture = arch + + def sut_prerequisites(self): + """ + Prerequest function should be called before execute any test case. + Will call function to scan all lcore's information which on SUT. + Then call pci scan function to collect nic device information. + At last setup SUT' environment for validation. + """ + out = self.send_expect(f"cd {SETTINGS.remote_dpdk_dir}", "# ") + assert "No such file or directory" not in out, "Can't switch to dpdk folder!!!" + out = self.send_expect("cat VERSION", "# ") + if "No such file or directory" in out: + self.logger.error("Can't get DPDK version due to VERSION not exist!!!") + else: + self.dpdk_version = out + self.send_expect("alias ls='ls --color=none'", "#") + + self.init_core_list() + self.filter_cores_from_node_cfg() + + def setup_memory(self, hugepages=-1): + """ + Setup hugepage on SUT. + """ + function_name = "setup_memory_%s" % self.get_os() + try: + setup_memory = getattr(self, function_name) + setup_memory(hugepages) + except AttributeError: + self.logger.error("%s is not implemented" % function_name) + + def setup_memory_linux(self, hugepages=-1): + """ + Setup Linux hugepages. + """ + hugepages_size = self.send_expect( + "awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# " + ) + total_huge_pages = self.get_total_huge_pages() + numa_nodes = self.send_expect("ls /sys/devices/system/node | grep node*", "# ") + if not numa_nodes: + total_numa_nodes = -1 + else: + numa_nodes = numa_nodes.splitlines() + total_numa_nodes = len(numa_nodes) + self.logger.info(numa_nodes) + + force_socket = False + + if int(hugepages_size) < (1024 * 1024): + if hugepages <= 0: + if self.architecture == "x86_64": + arch_huge_pages = 4096 + elif self.architecture == "i686": + arch_huge_pages = 512 + force_socket = True + # set huge pagesize for x86_x32 abi target + elif self.architecture == "x86_x32": + arch_huge_pages = 256 + force_socket = True + elif self.architecture == "ppc_64": + arch_huge_pages = 512 + elif self.architecture == "arm64": + if int(hugepages_size) >= (512 * 1024): + arch_huge_pages = 8 + else: + arch_huge_pages = 2048 + else: + arch_huge_pages = 256 + else: + arch_huge_pages = hugepages + + if total_huge_pages != arch_huge_pages: + if total_numa_nodes == -1: + self.set_huge_pages(arch_huge_pages) + else: + # before all hugepage average distribution by all socket, + # but sometimes create mbuf pool on socket 0 failed when + # setup testpmd, so set all huge page on first socket + if force_socket: + self.set_huge_pages(arch_huge_pages, numa_nodes[0]) + self.logger.info("force_socket on %s" % numa_nodes[0]) + else: + # set huge pages to all numa_nodes + for numa_node in numa_nodes: + self.set_huge_pages(arch_huge_pages, numa_node) + + self.mount_huge_pages() + self.hugepage_path = self.strip_hugepage_path() + + def get_memory_channels(self): + n = self._config.memory_channels + if n is not None and n > 0: + return n + else: + return 1 + + +class _EalParameter(object): + def __init__( + self, + sut_node: SutNode, + fixed_prefix: bool, + socket: int, + cores: Union[str, List[int], List[str]], + prefix: str, + no_pci: bool, + vdevs: List[str], + other_eal_param: str, + ): + """ + generate eal parameters character string; + :param sut_node: SUT Node; + :param fixed_prefix: use fixed file-prefix or not, when it is true, + the file-prefix will not be added a timestamp + :param socket: the physical CPU socket index, -1 means no care cpu socket; + :param cores: set the core info, eg: + cores=[0,1,2,3], + cores=['0','1','2','3'], + cores='default', + cores='1S/4C/1T', + cores='all'; + param prefix: set file prefix string, eg: + prefix='vf'; + param no_pci: switch of disable PCI bus eg: + no_pci=True; + param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1']; + param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments'; + """ + self.os = sut_node.get_os() + self.fixed_prefix = fixed_prefix + self.socket = socket + self.sut_node = sut_node + self.cores = self._validate_cores(cores) + self.prefix = prefix + self.no_pci = no_pci + self.vdevs = vdevs + self.other_eal_param = other_eal_param + + _param_validate_exception_info_template = ( + "Invalid parameter of %s about value of %s, Please reference API doc." + ) + + @staticmethod + def _validate_cores(cores: Union[str, List[int], List[str]]): + core_string_match = r"default|all|\d+S/\d+C/\d+T|$" + if isinstance(cores, list) and ( + all(map(lambda _core: type(_core) == int, cores)) + or all(map(lambda _core: type(_core) == str, cores)) + ): + return cores + elif type(cores) == str and re.match(core_string_match, cores, re.I): + return cores + else: + raise ParameterInvalidException("cores", cores) + + def _make_cores_param(self) -> str: + is_use_default_cores = ( + self.cores == "" + or isinstance(self.cores, str) + and self.cores.lower() == "default" + ) + if is_use_default_cores: + default_cores = "1S/2C/1T" + core_list = self.sut_node.get_core_list(default_cores) + else: + core_list = self._get_cores() + + def _get_consecutive_cores_range(_cores: List[int]): + _formated_core_list = [] + _tmp_cores_list = list(sorted(map(int, _cores))) + _segment = _tmp_cores_list[:1] + for _core_num in _tmp_cores_list[1:]: + if _core_num - _segment[-1] == 1: + _segment.append(_core_num) + else: + _formated_core_list.append( + f"{_segment[0]}-{_segment[-1]}" + if len(_segment) > 1 + else f"{_segment[0]}" + ) + _index = _tmp_cores_list.index(_core_num) + _formated_core_list.extend( + _get_consecutive_cores_range(_tmp_cores_list[_index:]) + ) + _segment.clear() + break + if len(_segment) > 0: + _formated_core_list.append( + f"{_segment[0]}-{_segment[-1]}" + if len(_segment) > 1 + else f"{_segment[0]}" + ) + return _formated_core_list + + return f'-l {",".join(_get_consecutive_cores_range(core_list))}' + + def _make_memory_channels(self) -> str: + param_template = "-n {}" + return param_template.format(self.sut_node.get_memory_channels()) + + def _make_no_pci_param(self) -> str: + if self.no_pci is True: + return "--no-pci" + else: + return "" + + def _make_prefix_param(self) -> str: + if self.prefix == "": + fixed_file_prefix = "dpdk" + "_" + self.sut_node.prefix_subfix + else: + fixed_file_prefix = self.prefix + if not self.fixed_prefix: + fixed_file_prefix = ( + fixed_file_prefix + "_" + self.sut_node.prefix_subfix + ) + fixed_file_prefix = self._do_os_handle_with_prefix_param(fixed_file_prefix) + return fixed_file_prefix + + def _make_vdevs_param(self) -> str: + if len(self.vdevs) == 0: + return "" + else: + _vdevs = ["--vdev " + vdev for vdev in self.vdevs] + return " ".join(_vdevs) + + def _get_cores(self) -> List[int]: + if type(self.cores) == list: + return self.cores + elif isinstance(self.cores, str): + return self.sut_node.get_core_list(self.cores, socket=self.socket) + + def _do_os_handle_with_prefix_param(self, file_prefix: str) -> str: + self.sut_node.prefix_list.append(file_prefix) + return "--file-prefix=" + file_prefix + + def make_eal_param(self) -> str: + _eal_str = " ".join( + [ + self._make_cores_param(), + self._make_memory_channels(), + self._make_prefix_param(), + self._make_no_pci_param(), + self._make_vdevs_param(), + # append user defined eal parameters + self.other_eal_param, + ] + ) + return _eal_str From patchwork Wed Aug 24 16:24:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115389 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DD648A0543; Wed, 24 Aug 2022 18:25:32 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5891542B71; Wed, 24 Aug 2022 18:25:07 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 171534281A for ; Wed, 24 Aug 2022 18:25:04 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 10604CD26F; Wed, 24 Aug 2022 18:25:03 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id x8WnTq2axTQu; Wed, 24 Aug 2022 18:25:02 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id F0974CD267; Wed, 24 Aug 2022 18:24:57 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 06/10] dts: add traffic generator node Date: Wed, 24 Aug 2022 16:24:50 +0000 Message-Id: <20220824162454.394285-7-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The Traffic Generator node is responsible for configuring and running traffic generators. For HelloWorld, we don't need any traffic, so this is just a barebones implementation demonstrating the two nodes in use in DTS. Signed-off-by: Juraj Linkeš --- dts/framework/tg_node.py | 78 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 dts/framework/tg_node.py diff --git a/dts/framework/tg_node.py b/dts/framework/tg_node.py new file mode 100644 index 0000000000..109019e740 --- /dev/null +++ b/dts/framework/tg_node.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2019 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +""" +Interface for bulk traffic generators. +""" + +from framework.config import NodeConfiguration + +from .node import Node + + +class TrafficGeneratorNode(Node): + """ + A class for managing connections to the node running the Traffic generator, + providing methods that retrieve the necessary information about the node + (such as cpu, memory and NIC details), configure it and configure and + manage the Traffic generator. + """ + + def __init__(self, node_config: NodeConfiguration): + super(TrafficGeneratorNode, self).__init__(node_config) + # check the python version of TG + self.sut_nodes = [] + self.re_run_time = 0 + + def prerequisites(self): + """ + Setup hugepages and kernel modules on TG. + """ + self.kill_all() + + if not self.skip_setup: + total_huge_pages = self.get_total_huge_pages() + hugepages_size = self.send_expect( + "awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# " + ) + if total_huge_pages == 0: + self.mount_huge_pages() + if hugepages_size == "524288": + self.set_huge_pages(8) + else: + self.set_huge_pages(1024) + + self.send_expect("modprobe uio", "# ") + + self.tg_prerequisites() + + def tg_prerequisites(self): + """ + Prerequest function should be called before execute any test case. + Will call function to scan all lcore's information which on TG. + """ + + self.init_core_list() + + self.disable_lldp() + + def set_re_run(self, re_run_time): + """ + set failed case re-run time + """ + self.re_run_time = int(re_run_time) + + def disable_lldp(self): + """ + Disable TG ports LLDP. + """ + result = self.send_expect("lldpad -d", "# ") + if result: + self.logger.error(result.strip()) From patchwork Wed Aug 24 16:24:51 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115392 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D6F0EA0543; Wed, 24 Aug 2022 18:25:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6505042B88; Wed, 24 Aug 2022 18:25:10 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 865C14284D for ; Wed, 24 Aug 2022 18:25:06 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 5BC38CD272; Wed, 24 Aug 2022 18:25:05 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id bd4szD3sz0J7; Wed, 24 Aug 2022 18:25:03 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id BFD17CD274; Wed, 24 Aug 2022 18:24:58 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 07/10] dts: add testcase and basic test results Date: Wed, 24 Aug 2022 16:24:51 +0000 Message-Id: <20220824162454.394285-8-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org TestCase implements methods for setting up and tearing down testcases and basic workflow methods. Result stores information about the testbed and the results of testcases that ran on the testbed. Signed-off-by: Juraj Linkeš --- dts/framework/exception.py | 15 ++ dts/framework/test_case.py | 274 +++++++++++++++++++++++++++++++++++ dts/framework/test_result.py | 218 ++++++++++++++++++++++++++++ dts/framework/utils.py | 14 ++ 4 files changed, 521 insertions(+) create mode 100644 dts/framework/test_case.py create mode 100644 dts/framework/test_result.py diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 8466990aa5..6a0d133c65 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -28,6 +28,21 @@ def get_output(self) -> str: return self.output +class VerifyFailure(Exception): + """ + To be used within the test cases to verify if a command output + is as it was expected. + """ + + value: str + + def __init__(self, value: str): + self.value = value + + def __str__(self): + return repr(self.value) + + class SSHConnectionException(Exception): """ SSH connection error. diff --git a/dts/framework/test_case.py b/dts/framework/test_case.py new file mode 100644 index 0000000000..301711f656 --- /dev/null +++ b/dts/framework/test_case.py @@ -0,0 +1,274 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +""" +A base class for creating DTS test cases. +""" + +import re +import time +import traceback + +from .exception import TimeoutException, VerifyFailure +from .logger import getLogger +from .test_result import Result + + +class TestCase(object): + def __init__(self, sut_nodes, tg_node, suitename, target, func): + self.sut_node = sut_nodes[0] + self.sut_nodes = sut_nodes + self.tg_node = tg_node + self.suite_name = suitename + self.target = target + + # local variable + self._requested_tests = None + self._subtitle = None + + # check session and reconnect if possible + for sut_node in self.sut_nodes: + self._check_and_reconnect(node=sut_node) + self._check_and_reconnect(node=self.tg_node) + + # result object for save suite result + self._suite_result = Result() + self._suite_result.sut = self.sut_node.node["IP"] + self._suite_result.target = target + self._suite_result.test_suite = self.suite_name + if self._suite_result is None: + raise ValueError("Result object should not None") + + self._enable_func = func + + # command history + self.setup_history = list() + self.test_history = list() + + def init_log(self): + # get log handler + class_name = self.__class__.__name__ + self.logger = getLogger(class_name) + + def _check_and_reconnect(self, node=None): + try: + result = node.session.check_available() + except: + result = False + + if result is False: + node.reconnect_session() + if "sut" in str(type(node)): + node.send_expect("cd %s" % node.base_dir, "#") + node.set_env_variable() + + try: + result = node.alt_session.check_available() + except: + result = False + + if result is False: + node.reconnect_session(alt_session=True) + + def set_up_all(self): + pass + + def set_up(self): + pass + + def tear_down(self): + pass + + def tear_down_all(self): + pass + + def verify(self, passed, description): + if not passed: + raise VerifyFailure(description) + + def _get_functional_cases(self): + """ + Get all functional test cases. + """ + return self._get_test_cases(r"test_(?!perf_)") + + def _has_it_been_requested(self, test_case, test_name_regex): + """ + Check whether test case has been requested for validation. + """ + name_matches = re.match(test_name_regex, test_case.__name__) + + if self._requested_tests is not None: + return name_matches and test_case.__name__ in self._requested_tests + + return name_matches + + def set_requested_cases(self, case_list): + """ + Pass down input cases list for check + """ + if self._requested_tests is None: + self._requested_tests = case_list + elif case_list is not None: + self._requested_tests += case_list + + def _get_test_cases(self, test_name_regex): + """ + Return case list which name matched regex. + """ + for test_case_name in dir(self): + test_case = getattr(self, test_case_name) + if callable(test_case) and self._has_it_been_requested( + test_case, test_name_regex + ): + yield test_case + + def execute_setup_all(self): + """ + Execute suite setup_all function before cases. + """ + # clear all previous output + for sut_node in self.sut_nodes: + sut_node.get_session_output(timeout=0.1) + self.tg_node.get_session_output(timeout=0.1) + + # save into setup history list + self.enable_history(self.setup_history) + + try: + self.set_up_all() + return True + except Exception as v: + self.logger.error("set_up_all failed:\n" + traceback.format_exc()) + # record all cases blocked + if self._enable_func: + for case_obj in self._get_functional_cases(): + self._suite_result.test_case = case_obj.__name__ + self._suite_result.test_case_blocked( + "set_up_all failed: {}".format(str(v)) + ) + return False + + def _execute_test_case(self, case_obj): + """ + Execute specified test case in specified suite. If any exception occurred in + validation process, save the result and tear down this case. + """ + case_name = case_obj.__name__ + self._suite_result.test_case = case_obj.__name__ + + # save into test command history + self.test_history = list() + self.enable_history(self.test_history) + + case_result = True + try: + self.logger.info("Test Case %s Begin" % case_name) + + self.running_case = case_name + # clean session + for sut_node in self.sut_nodes: + sut_node.get_session_output(timeout=0.1) + self.tg_node.get_session_output(timeout=0.1) + # run set_up function for each case + self.set_up() + # run test case + case_obj() + + self._suite_result.test_case_passed() + + self.logger.info("Test Case %s Result PASSED:" % case_name) + + except VerifyFailure as v: + case_result = False + self._suite_result.test_case_failed(str(v)) + self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(v)) + except KeyboardInterrupt: + self._suite_result.test_case_blocked("Skipped") + self.logger.error("Test Case %s SKIPPED: " % (case_name)) + self.tear_down() + raise KeyboardInterrupt("Stop DTS") + except TimeoutException as e: + case_result = False + self._suite_result.test_case_failed(str(e)) + self.logger.error("Test Case %s Result FAILED: " % (case_name) + str(e)) + self.logger.error("%s" % (e.get_output())) + except Exception: + case_result = False + trace = traceback.format_exc() + self._suite_result.test_case_failed(trace) + self.logger.error("Test Case %s Result ERROR: " % (case_name) + trace) + finally: + self.execute_tear_down() + return case_result + + def execute_test_cases(self): + """ + Execute all test cases in one suite. + """ + # prepare debugger rerun case environment + if self._enable_func: + for case_obj in self._get_functional_cases(): + for i in range(self.tg_node.re_run_time + 1): + ret = self.execute_test_case(case_obj) + + if ret is False and self.tg_node.re_run_time: + for sut_node in self.sut_nodes: + sut_node.get_session_output(timeout=0.5 * (i + 1)) + self.tg_node.get_session_output(timeout=0.5 * (i + 1)) + time.sleep(i + 1) + self.logger.info( + " Test case %s failed and re-run %d time" + % (case_obj.__name__, i + 1) + ) + else: + break + + def execute_test_case(self, case_obj): + """ + Execute test case or enter into debug mode. + """ + return self._execute_test_case(case_obj) + + def get_result(self): + """ + Return suite test result + """ + return self._suite_result + + def execute_tear_downall(self): + """ + execute suite tear_down_all function + """ + try: + self.tear_down_all() + except Exception: + self.logger.error("tear_down_all failed:\n" + traceback.format_exc()) + + for sut_node in self.sut_nodes: + sut_node.kill_all() + self.tg_node.kill_all() + + def execute_tear_down(self): + """ + execute suite tear_down function + """ + try: + self.tear_down() + except Exception: + self.logger.error("tear_down failed:\n" + traceback.format_exc()) + self.logger.warning( + "tear down %s failed, might iterfere next case's result!" + % self.running_case + ) + + def enable_history(self, history): + """ + Enable history for all Node's default session + """ + for sut_node in self.sut_nodes: + sut_node.session.set_history(history) + + self.tg_node.session.set_history(history) diff --git a/dts/framework/test_result.py b/dts/framework/test_result.py new file mode 100644 index 0000000000..7be79df7f2 --- /dev/null +++ b/dts/framework/test_result.py @@ -0,0 +1,218 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +""" +Generic result container and reporters +""" + + +class Result(object): + """ + Generic result container. Useful to store/retrieve results during + a DTF execution. + + It manages and hide an internal complex structure like the one shown below. + This is presented to the user with a property based interface. + + internals = [ + 'sut1', [ + 'kdriver', + 'firmware', + 'pkg', + 'driver', + 'dpdk_version', + 'target1', 'nic1', [ + 'suite1', [ + 'case1', ['PASSED', ''], + 'case2', ['PASSED', ''], + ], + ], + 'target2', 'nic1', [ + 'suite2', [ + 'case3', ['PASSED', ''], + 'case4', ['FAILED', 'message'], + ], + 'suite3', [ + 'case5', ['BLOCKED', 'message'], + ], + ] + ] + ] + + """ + + def __init__(self): + self.__sut = 0 + self.__target = 0 + self.__test_suite = 0 + self.__test_case = 0 + self.__test_result = None + self.__message = None + self.__internals = [] + self.__failed_suts = {} + self.__failed_targets = {} + + def __set_sut(self, sut): + if sut not in self.__internals: + self.__internals.append(sut) + self.__internals.append([]) + self.__sut = self.__internals.index(sut) + + def __get_sut(self): + return self.__internals[self.__sut] + + def current_dpdk_version(self, sut): + """ + Returns the dpdk version for a given SUT + """ + try: + sut_idx = self.__internals.index(sut) + return self.__internals[sut_idx + 1][4] + except: + return "" + + def __set_dpdk_version(self, dpdk_version): + if dpdk_version not in self.internals[self.__sut + 1]: + dpdk_current = self.__get_dpdk_version() + if dpdk_current: + if dpdk_version not in dpdk_current: + self.internals[self.__sut + 1][4] = ( + dpdk_current + "/" + dpdk_version + ) + else: + self.internals[self.__sut + 1].append(dpdk_version) + + def __get_dpdk_version(self): + try: + return self.internals[self.__sut + 1][4] + except: + return "" + + def __current_targets(self): + return self.internals[self.__sut + 1] + + def __set_target(self, target): + targets = self.__current_targets() + if target not in targets: + targets.append(target) + targets.append("_nic_") + targets.append([]) + self.__target = targets.index(target) + + def __get_target(self): + return self.__current_targets()[self.__target] + + def __current_suites(self): + return self.__current_targets()[self.__target + 2] + + def __set_test_suite(self, test_suite): + suites = self.__current_suites() + if test_suite not in suites: + suites.append(test_suite) + suites.append([]) + self.__test_suite = suites.index(test_suite) + + def __get_test_suite(self): + return self.__current_suites()[self.__test_suite] + + def __current_cases(self): + return self.__current_suites()[self.__test_suite + 1] + + def __set_test_case(self, test_case): + cases = self.__current_cases() + cases.append(test_case) + cases.append([]) + self.__test_case = cases.index(test_case) + + def __get_test_case(self): + return self.__current_cases()[self.__test_case] + + def __get_internals(self): + return self.__internals + + def __current_result(self): + return self.__current_cases()[self.__test_case + 1] + + def __set_test_case_result(self, result, message): + test_case = self.__current_result() + test_case.append(result) + test_case.append(message) + self.__test_result = result + self.__message = message + + def copy_suite(self, suite_result): + self.__current_suites()[self.__test_suite + 1] = suite_result.__current_cases() + + def test_case_passed(self): + """ + Set last test case added as PASSED + """ + self.__set_test_case_result(result="PASSED", message="") + + def test_case_failed(self, message): + """ + Set last test case added as FAILED + """ + self.__set_test_case_result(result="FAILED", message=message) + + def test_case_blocked(self, message): + """ + Set last test case added as BLOCKED + """ + self.__set_test_case_result(result="BLOCKED", message=message) + + def all_suts(self): + """ + Returns all the SUTs it's aware of. + """ + return self.__internals[::2] + + def all_targets(self, sut): + """ + Returns the targets for a given SUT + """ + try: + sut_idx = self.__internals.index(sut) + except: + return None + return self.__internals[sut_idx + 1][5::3] + + def add_failed_sut(self, sut, msg): + """ + Sets the given SUT as failing due to msg + """ + self.__failed_suts[sut] = msg + + def remove_failed_sut(self, sut): + """ + Remove the given SUT from failed SUTs collection + """ + if sut in self.__failed_suts: + self.__failed_suts.pop(sut) + + def add_failed_target(self, sut, target, msg): + """ + Sets the given SUT, target as failing due to msg + """ + self.__failed_targets[sut + target] = msg + + def remove_failed_target(self, sut, target): + """ + Remove the given SUT, target from failed targets collection + """ + key_word = sut + target + if key_word in self.__failed_targets: + self.__failed_targets.pop(key_word) + + """ + Attributes defined as properties to hide the implementation from the + presented interface. + """ + sut = property(__get_sut, __set_sut) + dpdk_version = property(__get_dpdk_version, __set_dpdk_version) + target = property(__get_target, __set_target) + test_suite = property(__get_test_suite, __set_test_suite) + test_case = property(__get_test_case, __set_test_case) + internals = property(__get_internals) diff --git a/dts/framework/utils.py b/dts/framework/utils.py index 2a174831d0..aac4d3505b 100644 --- a/dts/framework/utils.py +++ b/dts/framework/utils.py @@ -4,6 +4,7 @@ # Copyright(c) 2022 University of New Hampshire # +import inspect import sys @@ -15,6 +16,19 @@ def GREEN(text: str) -> str: return f"\u001B[32;1m{str(text)}\u001B[0m" +def get_subclasses(module, clazz): + """ + Get module attribute name and attribute. + """ + for subclazz_name, subclazz in inspect.getmembers(module): + if ( + hasattr(subclazz, "__bases__") + and subclazz.__bases__ + and clazz in subclazz.__bases__ + ): + yield (subclazz_name, subclazz) + + def check_dts_python_version() -> None: if sys.version_info.major < 3 or ( sys.version_info.major == 3 and sys.version_info.minor < 10 From patchwork Wed Aug 24 16:24:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115391 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B0904A0543; Wed, 24 Aug 2022 18:25:47 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 758BF42B84; Wed, 24 Aug 2022 18:25:09 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 54B954282F for ; Wed, 24 Aug 2022 18:25:06 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 4F8CECD26D; Wed, 24 Aug 2022 18:25:05 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id oPzGsw12E5YL; Wed, 24 Aug 2022 18:25:04 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 813B5CD275; Wed, 24 Aug 2022 18:24:59 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 08/10] dts: add test runner and statistics collector Date: Wed, 24 Aug 2022 16:24:52 +0000 Message-Id: <20220824162454.394285-9-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add functions responsible for initializing testbed setup, testcase discovery and execution. The stats collector gathers the pass/fail results and provides a short report. Signed-off-by: Juraj Linkeš --- dts/framework/dts.py | 174 +++++++++++++++++++++++++++++--- dts/framework/stats_reporter.py | 70 +++++++++++++ 2 files changed, 232 insertions(+), 12 deletions(-) create mode 100644 dts/framework/stats_reporter.py diff --git a/dts/framework/dts.py b/dts/framework/dts.py index 1938ea6af8..39e07d9eec 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -4,38 +4,179 @@ # Copyright(c) 2022 University of New Hampshire # +import os import sys +import traceback from typing import Iterable, Optional import framework.logger as logger from .config import CONFIGURATION +from .exception import VerifyFailure from .logger import getLogger from .node import Node -from .settings import SETTINGS -from .utils import check_dts_python_version +from .settings import SETTINGS, DTSRuntimeError, DTSRuntimeErrors +from .stats_reporter import StatsReporter +from .sut_node import SutNode +from .test_case import TestCase +from .test_result import Result +from .tg_node import TrafficGeneratorNode +from .utils import check_dts_python_version, get_subclasses +requested_tests: Optional[list[str]] = None +result: Optional[Result] = None +stats_report: Optional[StatsReporter] = None log_handler: Optional[logger.DTSLOG] = None +def dts_nodes_init(): + """ + Create dts SUT/TG instance and initialize them. + """ + sut_nodes = [] + tg_node = None + for node_config in CONFIGURATION.nodes: + if hasattr(node_config, 'memory_channels'): + sut_nodes.append(SutNode(node_config)) + else: + tg_node = TrafficGeneratorNode(node_config) + tg_node.set_re_run(SETTINGS.re_run if SETTINGS.re_run > 0 else 0) + + return sut_nodes, tg_node + + +def dts_run_prerequisites(nodes): + """ + Run dts prerequisites function. + """ + # TODO nodes config contains both sut and tg nodes + try: + for node in nodes: + node.prerequisites() + except Exception as ex: + log_handler.error("NODE PREREQ EXCEPTION " + traceback.format_exc()) + result.add_failed_sut(node, str(ex)) + if isinstance(node, TrafficGeneratorNode): + DTSRuntimeError = DTSRuntimeErrors.TG_SETUP_ERR + else: + DTSRuntimeError = DTSRuntimeErrors.SUT_SETUP_ERR + return False + return True + + +def dts_run_target(sut_nodes, tg_node, targets, test_suites): + """ + Run each target in execution targets. + """ + for target in targets: + target = str(target) + log_handler.info("\nTARGET " + target) + result.target = target + + try: + for sut_node in sut_nodes: + sut_node.set_target(target) + except AssertionError as ex: + DTSRuntimeError = DTSRuntimeErrors.DPDK_BUILD_ERR + log_handler.error(" TARGET ERROR: " + str(ex)) + result.add_failed_target(result.sut, target, str(ex)) + continue + except Exception as ex: + DTSRuntimeError = DTSRuntimeErrors.GENERIC_ERR + log_handler.error(" !!! DEBUG IT: " + traceback.format_exc()) + result.add_failed_target(result.sut, target, str(ex)) + continue + + dts_run_suite(sut_nodes, tg_node, test_suites, target) + + +def dts_run_suite(sut_nodes, tg_node, test_suites, target): + """ + Run each suite in test suite list. + """ + for suite_name in test_suites: + try: + # check whether config the test cases + append_requested_case_list = None + if ":" in suite_name: + case_list = suite_name[suite_name.find(":") + 1 :] + append_requested_case_list = case_list.split("\\") + suite_name = suite_name[: suite_name.find(":")] + result.test_suite = suite_name + _suite_full_name = "TestSuite_" + suite_name + suite_module = __import__( + "tests." + _suite_full_name, fromlist=[_suite_full_name] + ) + for test_classname, test_class in get_subclasses(suite_module, TestCase): + + suite_obj = test_class(sut_nodes, tg_node, target, suite_name) + suite_obj.init_log() + suite_obj.set_requested_cases(requested_tests) + suite_obj.set_requested_cases(append_requested_case_list) + + log_handler.info("\nTEST SUITE : " + test_classname) + + if suite_obj.execute_setup_all(): + suite_obj.execute_test_cases() + + # save suite cases result + result.copy_suite(suite_obj.get_result()) + + log_handler.info("\nTEST SUITE ENDED: " + test_classname) + except VerifyFailure: + DTSRuntimeError = DTSRuntimeErrors.SUITE_EXECUTE_ERR + log_handler.error(" !!! DEBUG IT: " + traceback.format_exc()) + except KeyboardInterrupt: + # stop/save result/skip execution + log_handler.error(" !!! STOPPING DTS") + break + except Exception as e: + DTSRuntimeError = DTSRuntimeErrors.GENERIC_ERR + log_handler.error(str(e)) + finally: + try: + suite_obj.execute_tear_downall() + except Exception as e: + DTSRuntimeError = DTSRuntimeErrors.GENERIC_ERR + log_handler.error(str(e)) + try: + stats_report.save(result) + except Exception as e: + DTSRuntimeError = DTSRuntimeErrors.GENERIC_ERR + log_handler.error(str(e)) + + def run_all() -> None: """ Main process of DTS, it will run all test suites in the config file. """ global log_handler + global result + global stats_report + global requested_tests # check the python version of the server that run dts check_dts_python_version() + # prepare the output folder + if not os.path.exists(SETTINGS.output_dir): + os.mkdir(SETTINGS.output_dir) + # init log_handler handler if SETTINGS.verbose is True: logger.set_verbose() log_handler = getLogger("dts") - nodes = {} - # This try/finally block means "Run the try block, if there is an exception, + # run designated test cases + requested_tests = SETTINGS.test_cases + + # report objects + stats_report = StatsReporter(SETTINGS.output_dir + "/statistics.txt") + result = Result() + + # This try/finally block means "Run the try block and if there is an exception, # run the finally block before passing it upward. If there is not an exception, # run the finally block after the try block is finished." This helps avoid the # problem of python's interpreter exit context, which essentially prevents you @@ -45,26 +186,35 @@ def run_all() -> None: # An except block SHOULD NOT be added to this. A failure at this level should # deliver a full stack trace for debugging, since the only place that exceptions # should be caught and handled is in the testing code. + nodes = [] try: # for all Execution sections for execution in CONFIGURATION.executions: - sut_config = execution.system_under_test - if sut_config.name not in nodes: - nodes[sut_config.name] = Node(sut_config) + sut_nodes, tg_node = dts_nodes_init() + nodes.extend(sut_nodes) + nodes.append(tg_node) + + # Run SUT prerequisites + if dts_run_prerequisites(nodes) is False: + continue + result.dpdk_version = sut_nodes[0].dpdk_version + dts_run_target( + sut_nodes, tg_node, execution.target_descriptions, execution.test_suites + ) finally: - quit_execution(nodes.values()) + quit_execution(nodes) -def quit_execution(sut_nodes: Iterable[Node]) -> None: +def quit_execution(nodes: Iterable[Node]) -> None: """ Close session to SUT and TG before quit. Return exit status when failure occurred. """ - for sut_node in sut_nodes: + for node in nodes: # close all session - sut_node.node_exit() + node.node_exit() if log_handler is not None: log_handler.info("DTS ended") - sys.exit(0) + sys.exit(DTSRuntimeError) diff --git a/dts/framework/stats_reporter.py b/dts/framework/stats_reporter.py new file mode 100644 index 0000000000..a8d589bc7b --- /dev/null +++ b/dts/framework/stats_reporter.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2022 PANTHEON.tech s.r.o. +# + +""" +Simple text file statistics generator +""" + + +class StatsReporter(object): + """ + Generates a small statistics file containing the number of passing, + failing and blocked tests. It makes use of a Result instance as input. + """ + + def __init__(self, filename): + self.filename = filename + + def __add_stat(self, test_result): + if test_result is not None: + if test_result[0] == "PASSED": + self.passed += 1 + if test_result[0] == "FAILED": + self.failed += 1 + if test_result[0] == "BLOCKED": + self.blocked += 1 + self.total += 1 + + def __count_stats(self): + for sut in self.result.all_suts(): + for target in self.result.all_targets(sut): + for suite in self.result.all_test_suites(sut, target): + for case in self.result.all_test_cases(sut, target, suite): + test_result = self.result.result_for(sut, target, suite, case) + if len(test_result): + self.__add_stat(test_result) + + def __write_stats(self): + sut_nodes = self.result.all_suts() + if len(sut_nodes) == 1: + self.stats_file.write( + "dpdk_version = {}\n".format( + self.result.current_dpdk_version(sut_nodes[0]) + ) + ) + else: + for sut in sut_nodes: + dpdk_version = self.result.current_dpdk_version(sut) + self.stats_file.write( + "{}.dpdk_version = {}\n".format(sut, dpdk_version) + ) + self.__count_stats() + self.stats_file.write("Passed = %d\n" % self.passed) + self.stats_file.write("Failed = %d\n" % self.failed) + self.stats_file.write("Blocked = %d\n" % self.blocked) + rate = 0 + if self.total > 0: + rate = self.passed * 100.0 / self.total + self.stats_file.write("Pass rate = %.1f\n" % rate) + + def save(self, result): + self.passed = 0 + self.failed = 0 + self.blocked = 0 + self.total = 0 + self.stats_file = open(self.filename, "w+") + self.result = result + self.__write_stats() + self.stats_file.close() From patchwork Wed Aug 24 16:24:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115393 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1594CA0543; Wed, 24 Aug 2022 18:26:03 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BB16842B6C; Wed, 24 Aug 2022 18:25:11 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 0163C42B74 for ; Wed, 24 Aug 2022 18:25:08 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 0C90BCD274; Wed, 24 Aug 2022 18:25:07 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 2WpABciMsF2V; Wed, 24 Aug 2022 18:25:06 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 36DA0CD276; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 09/10] dts: add hello world testplan Date: Wed, 24 Aug 2022 16:24:53 +0000 Message-Id: <20220824162454.394285-10-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The testplan describes the capabilities of the tested application along with the description of testcases to test it. Signed-off-by: Juraj Linkeš --- dts/test_plans/hello_world_test_plan.rst | 68 ++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 dts/test_plans/hello_world_test_plan.rst diff --git a/dts/test_plans/hello_world_test_plan.rst b/dts/test_plans/hello_world_test_plan.rst new file mode 100644 index 0000000000..566a9bb10c --- /dev/null +++ b/dts/test_plans/hello_world_test_plan.rst @@ -0,0 +1,68 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2010-2017 Intel Corporation + +============================================= +Sample Application Tests: Hello World Example +============================================= + +This example is one of the most simple RTE application that can be +done. The program will just print a "helloworld" message on every +enabled lcore. + +Command Usage:: + + ./dpdk-helloworld -c COREMASK [-m NB] [-r NUM] [-n NUM] + + EAL option list: + -c COREMASK: hexadecimal bitmask of cores we are running on + -m MB : memory to allocate (default = size of hugemem) + -n NUM : force number of memory channels (don't detect) + -r NUM : force number of memory ranks (don't detect) + --huge-file: base filename for hugetlbfs entries + debug options: + --no-huge : use malloc instead of hugetlbfs + --no-pci : disable pci + --no-hpet : disable hpet + --no-shconf: no shared config (mmap'd files) + + +Prerequisites +============= + +Support igb_uio and vfio driver, if used vfio, kernel need 3.6+ and enable vt-d in bios. +When used vfio , used "modprobe vfio" and "modprobe vfio-pci" insmod vfio driver, then used +"./tools/dpdk_nic_bind.py --bind=vfio-pci device_bus_id" to bind vfio driver to test driver. + +To find out the mapping of lcores (processor) to core id and socket (physical +id), the command below can be used:: + + $ grep "processor\|physical id\|core id\|^$" /proc/cpuinfo + +The total logical core number will be used as ``helloworld`` input parameters. + + +Test Case: run hello world on single lcores +=========================================== + +To run example in single lcore :: + + $ ./dpdk-helloworld -c 1 + hello from core 0 + +Check the output is exact the lcore 0 + + +Test Case: run hello world on every lcores +========================================== + +To run the example in all the enabled lcore :: + + $ ./dpdk-helloworld -cffffff + hello from core 1 + hello from core 2 + hello from core 3 + ... + ... + hello from core 0 + +Verify the output of according to all the core masks. From patchwork Wed Aug 24 16:24:54 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 115394 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76428A0543; Wed, 24 Aug 2022 18:26:09 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9ECD942B99; Wed, 24 Aug 2022 18:25:12 +0200 (CEST) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 3A2D442B75 for ; Wed, 24 Aug 2022 18:25:08 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 3E2E1CD275; Wed, 24 Aug 2022 18:25:07 +0200 (CEST) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id HiHyiWvtT858; Wed, 24 Aug 2022 18:25:06 +0200 (CEST) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 07487CD277; Wed, 24 Aug 2022 18:25:00 +0200 (CEST) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, david.marchand@redhat.com, ronan.randles@intel.com, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [RFC PATCH v1 10/10] dts: add hello world testsuite Date: Wed, 24 Aug 2022 16:24:54 +0000 Message-Id: <20220824162454.394285-11-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220824162454.394285-1-juraj.linkes@pantheon.tech> References: <20220824162454.394285-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The testsuite implements the testcases defined in the corresponding test plan. Signed-off-by: Juraj Linkeš --- dts/tests/TestSuite_hello_world.py | 80 ++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 dts/tests/TestSuite_hello_world.py diff --git a/dts/tests/TestSuite_hello_world.py b/dts/tests/TestSuite_hello_world.py new file mode 100644 index 0000000000..8be33330aa --- /dev/null +++ b/dts/tests/TestSuite_hello_world.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# + +""" +DPDK Test suite. +Test HelloWorld example. +""" + +import os.path +from framework.test_case import TestCase + + +class TestHelloWorld(TestCase): + def set_up_all(self): + """ + Run at the start of each test suite. + hello_world Prerequisites: + helloworld build pass + """ + out = self.sut_node.build_dpdk_apps("examples/helloworld") + self.app_helloworld_path = os.path.join(self.target, "examples", "dpdk-helloworld") + + self.verify("Error" not in out, "compilation error 1") + self.verify("No such file" not in out, "compilation error 2") + + def set_up(self): + """ + Run before each test case. + Nothing to do. + """ + pass + + def test_hello_world_single_core(self): + """ + Run hello world on single lcores + Only received hello message from core0 + """ + + # get the mask for the first core + cores = self.sut_node.get_core_list("1S/1C/1T") + eal_para = self.sut_node.create_eal_parameters(cores="1S/1C/1T") + cmdline = "./%s %s" % (self.app_helloworld_path, eal_para) + out = self.sut_node.send_expect(cmdline, "# ", 30) + self.verify( + "hello from core %s" % cores[0] in out, + "EAL not started on core%s" % cores[0], + ) + + def test_hello_world_all_cores(self): + """ + Run hello world on all lcores + Received hello message from all lcores + """ + + # get the maximum logical core number + cores = self.sut_node.get_core_list("all") + eal_para = self.sut_node.create_eal_parameters(cores=cores) + + cmdline = "./%s %s " % (self.app_helloworld_path, eal_para) + out = self.sut_node.send_expect(cmdline, "# ", 50) + for core in cores: + self.verify( + "hello from core %s" % core in out, + "EAL not started on core%s" % core, + ) + + def tear_down(self): + """ + Run after each test case. + Nothing to do. + """ + pass + + def tear_down_all(self): + """ + Run after each test suite. + Nothing to do. + """ + pass