From patchwork Tue Mar 28 08:55:57 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 125553 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3B0F842856; Tue, 28 Mar 2023 10:59:07 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3697A410EE; Tue, 28 Mar 2023 10:59:07 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 4563940156 for ; Tue, 28 Mar 2023 10:59:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1679993945; x=1711529945; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=uUSoZoPbG+po4ve4VZxkcoajPSlMwHgam7Nb5IiLBeg=; b=mTc8Q78goubgmV1t1TmDCj8hBaQ1JnBCYbXtTD7X1qCbuQ4LXOu4xscB 9KSJqrCTdpNJhX32Tm2pyj8TFvHN0teAVxxehbb+DD3E1Vu0PFfFt2nV3 DSXAamj1XzuPGzcKHbW4evRVyRxKTTdM0rALuMjXU19ZG9HF6ojbQMcvW UUSmfxTjSni3zidKJmPGnrbkKqq5g3h5ZpnidIPJW7NaMdzZGIL0LVf5Y TnY5qHRoYIKJjPN/TnvYCgEeVqOEmITiu/HC0UVKtGYR/Fo5HNK9psCR/ DTAQ7UzVBrw/DxdQX5cr1tuVQIRl/T4g3ySy813oX4Lt6XpuR3dPK0t1H A==; X-IronPort-AV: E=McAfee;i="6600,9927,10662"; a="405448972" X-IronPort-AV: E=Sophos;i="5.98,296,1673942400"; d="scan'208";a="405448972" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Mar 2023 01:59:00 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10662"; a="716412896" X-IronPort-AV: E=Sophos;i="5.98,296,1673942400"; d="scan'208";a="716412896" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga001-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Mar 2023 01:58:59 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/2] tests/virtio_event_idx_interrupt: modify re-run times from 100 to 10 Date: Tue, 28 Mar 2023 16:55:57 +0800 Message-Id: <20230328085557.3796893-3-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230328085557.3796893-1-weix.ling@intel.com> References: <20230328085557.3796893-1-weix.ling@intel.com> MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org 1.Modify re-run times from 100 to 10 for reduce run time. 2.Use the pmd_out API to replace send_expect() to start testpmd. Signed-off-by: Wei Ling Acked-by: Xingguang He --- tests/TestSuite_virtio_event_idx_interrupt.py | 67 ++++++++----------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/tests/TestSuite_virtio_event_idx_interrupt.py b/tests/TestSuite_virtio_event_idx_interrupt.py index 620cf794..bfc44cb4 100644 --- a/tests/TestSuite_virtio_event_idx_interrupt.py +++ b/tests/TestSuite_virtio_event_idx_interrupt.py @@ -2,17 +2,12 @@ # Copyright(c) 2019 Intel Corporation # -""" -DPDK Test suite. -Virtio idx interrupt need test with l3fwd-power sample -""" - import _thread import re import time -import framework.utils as utils from framework.pktgen import PacketGeneratorHelper +from framework.pmd_output import PmdOutput from framework.test_case import TestCase from framework.virt_common import VM @@ -22,8 +17,6 @@ class TestVirtioIdxInterrupt(TestCase): """ Run at the start of each test suite. """ - self.queues = 1 - self.nb_cores = 1 self.dut_ports = self.dut.get_ports() self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) @@ -32,7 +25,7 @@ class TestVirtioIdxInterrupt(TestCase): ) self.dst_mac = self.dut.get_mac_address(self.dut_ports[0]) self.base_dir = self.dut.base_dir.replace("~", "/root") - self.pf_pci = self.dut.ports_info[0]["pci"] + self.port_pci = self.dut.ports_info[0]["pci"] self.out_path = "/tmp" out = self.tester.send_expect("ls -d %s" % self.out_path, "# ") if "No such file or directory" in out: @@ -41,7 +34,8 @@ class TestVirtioIdxInterrupt(TestCase): self.pktgen_helper = PacketGeneratorHelper() self.app_testpmd_path = self.dut.apps_name["test-pmd"] self.testpmd_name = self.app_testpmd_path.split("/")[-1] - self.device_str = None + self.vhost_user = self.dut.new_session(suite="vhost-user") + self.vhost_user_pmd = PmdOutput(self.dut, self.vhost_user) def set_up(self): """ @@ -52,7 +46,6 @@ class TestVirtioIdxInterrupt(TestCase): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost = self.dut.new_session(suite="vhost") def get_core_mask(self): self.core_config = "1S/%dC/1T" % (self.nb_cores + 1) @@ -62,39 +55,38 @@ class TestVirtioIdxInterrupt(TestCase): ) self.core_list = self.dut.get_core_list(self.core_config) - def start_vhost_testpmd(self, dmas=None, mode=False): + def start_vhost_testpmd(self): """ start the testpmd on vhost side """ - # get the core mask depend on the nb_cores number self.get_core_mask() - testcmd = self.app_testpmd_path + " " - vdev = [ - "net_vhost,iface=%s/vhost-net,queues=%d " % (self.base_dir, self.queues) - ] - eal_params = self.dut.create_eal_parameters( - cores=self.core_list, prefix="vhost", ports=[self.pf_pci], vdevs=vdev + eal_param = "--vdev 'net_vhost,iface=%s/vhost-net,queues=%d'" % ( + self.base_dir, + self.queues, ) - para = " -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % ( + param = "--nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d" % ( self.nb_cores, self.queues, self.queues, ) - command_line = testcmd + eal_params + para - self.vhost.send_expect(command_line, "testpmd> ", 30) - self.vhost.send_expect("start", "testpmd> ", 30) + self.vhost_user_pmd.start_testpmd( + cores=self.core_list, + eal_param=eal_param, + param=param, + prefix="vhost-user", + fixed_prefix=True, + ports=[self.port_pci], + ) + self.vhost_user_pmd.execute_cmd("start") - def start_vms(self, packed=False, mode=False, set_target=False, bind_dev=False): + def start_vms(self, packed=False): """ start qemus """ self.vm = VM(self.dut, "vm0", "vhost_sample") vm_params = {} vm_params["driver"] = "vhost-user" - if mode: - vm_params["opt_path"] = "%s/vhost-net,%s" % (self.base_dir, mode) - else: - vm_params["opt_path"] = "%s/vhost-net" % self.base_dir + vm_params["opt_path"] = "%s/vhost-net" % self.base_dir vm_params["opt_mac"] = "00:11:22:33:44:55" opt_args = ( "mrg_rxbuf=on,csum=on,gso=on,guest_csum=on,host_tso4=on,guest_tso4=on" @@ -107,7 +99,7 @@ class TestVirtioIdxInterrupt(TestCase): vm_params["opt_settings"] = opt_args self.vm.set_vm_device(**vm_params) try: - self.vm_dut = self.vm.start(set_target=set_target, bind_dev=bind_dev) + self.vm_dut = self.vm.start(set_target=False, bind_dev=False) if self.vm_dut is None: raise Exception("Set up VM ENV failed") except Exception as e: @@ -202,7 +194,7 @@ class TestVirtioIdxInterrupt(TestCase): """ check each queue has receive packets on vhost side """ - out = self.vhost.send_expect("stop", "testpmd> ", 60) + out = self.vhost_user_pmd.execute_cmd("stop") print(out) for queue_index in range(0, self.queues): queue = re.search("Port= 0/Queue=\s*%d" % queue_index, out) @@ -217,14 +209,14 @@ class TestVirtioIdxInterrupt(TestCase): "The queue %d rx-packets or tx-packets is 0 about " % queue_index + "rx-packets:%d, tx-packets:%d" % (rx_packets, tx_packets), ) - self.vhost.send_expect("clear port stats all", "testpmd> ", 60) + self.vhost_user_pmd.execute_cmd("clear port stats all") def stop_all_apps(self): """ close all vms """ self.vm.stop() - self.vhost.send_expect("quit", "#", 20) + self.vhost_user_pmd.quit() def test_perf_split_ring_virito_pci_driver_reload(self): """ @@ -233,9 +225,9 @@ class TestVirtioIdxInterrupt(TestCase): self.queues = 1 self.nb_cores = 1 self.start_vhost_testpmd() - self.start_vms() + self.start_vms(packed=False) self.config_virito_net_in_vm() - res = self.check_packets_after_reload_virtio_device(reload_times=100) + res = self.check_packets_after_reload_virtio_device(reload_times=10) self.verify(res is True, "Should increase the wait times of ixia") self.stop_all_apps() @@ -248,7 +240,7 @@ class TestVirtioIdxInterrupt(TestCase): self.queues = 16 self.nb_cores = 16 self.start_vhost_testpmd() - self.start_vms() + self.start_vms(packed=False) self.config_virito_net_in_vm() self.start_to_send_packets(delay=15) self.check_each_queue_has_packets_info_on_vhost() @@ -263,7 +255,7 @@ class TestVirtioIdxInterrupt(TestCase): self.start_vhost_testpmd() self.start_vms(packed=True) self.config_virito_net_in_vm() - res = self.check_packets_after_reload_virtio_device(reload_times=100) + res = self.check_packets_after_reload_virtio_device(reload_times=10) self.verify(res is True, "Should increase the wait times of ixia") self.stop_all_apps() @@ -286,7 +278,6 @@ class TestVirtioIdxInterrupt(TestCase): """ Run after each test case. """ - self.dut.close_session(self.vhost) self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") @@ -294,4 +285,4 @@ class TestVirtioIdxInterrupt(TestCase): """ Run after each test suite. """ - pass + self.dut.close_session(self.vhost_user)