From patchwork Fri Dec 23 05:47:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 121346 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5AB1FA0093; Fri, 23 Dec 2022 06:56:21 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 52014410F9; Fri, 23 Dec 2022 06:56:21 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id ADC9D40141 for ; Fri, 23 Dec 2022 06:56:19 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1671774979; x=1703310979; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=QTWMnxFCd3fwpqbgU7s+hudZP46rZbpEwoI95rWvaXo=; b=jwIThc3b7ciUWWzokHgp+n1vd98rnYRS1wZK2zVSlSvUh6FwOTyu1WmP 38/5jNzv55e9o7YjzsEt1VradtME5AC/dcQHLbayal4Nn/sSQCuD3aaWJ UZRuq4Z1HDQ9A88vG7MQCEBO14VtTFYxWD89cAei4Ke+VEz13ufX0ChKI x2xqhBB+r2urKAbI7yAfRXGfeY5HEhPpnN4U/fTlsLyRWlFYlNgLO40mQ AAyALzd7kxmotQ8z2x5OQkLHXJI9ctiGNc7k3wHwxyyyBkjVBSKxa+cIe Fok5BNQ4aaRgye3P0fStJ9fhA0mTFUO1AMqKFmDGe4+fEFcFJRl55eO/e g==; X-IronPort-AV: E=McAfee;i="6500,9779,10569"; a="384666981" X-IronPort-AV: E=Sophos;i="5.96,267,1665471600"; d="scan'208";a="384666981" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Dec 2022 21:56:18 -0800 X-IronPort-AV: E=McAfee;i="6500,9779,10569"; a="629736371" X-IronPort-AV: E=Sophos;i="5.96,267,1665471600"; d="scan'208";a="629736371" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Dec 2022 21:56:17 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/2] tests/pvp_vhost_user_reconnect: split the packed ring cases Date: Fri, 23 Dec 2022 13:47:32 +0800 Message-Id: <20221223054732.754833-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org As the packed ring not support reconnect from the back-end side, but support reconnect from the front front-end side, so split the packed ring case to 2 cases reconnect from back-end and front-end. Signed-off-by: Wei Ling --- tests/TestSuite_pvp_vhost_user_reconnect.py | 173 +++++++++++++------- 1 file changed, 112 insertions(+), 61 deletions(-) diff --git a/tests/TestSuite_pvp_vhost_user_reconnect.py b/tests/TestSuite_pvp_vhost_user_reconnect.py index 93006413..f503da9b 100644 --- a/tests/TestSuite_pvp_vhost_user_reconnect.py +++ b/tests/TestSuite_pvp_vhost_user_reconnect.py @@ -2,13 +2,6 @@ # Copyright(c) 2019 Intel Corporation # -""" -DPDK Test suite. - -Vhost reconnect two VM test suite. -Becase this suite will use the reconnet feature, the VM will start as -server mode, so the qemu version should greater than 2.7 -""" import re import time @@ -21,11 +14,9 @@ from framework.virt_common import VM class TestPVPVhostUserReconnect(TestCase): def set_up_all(self): - # Get and verify the ports self.dut_ports = self.dut.get_ports() self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") - # Get the port's socket self.pf = self.dut_ports[0] netdev = self.dut.ports_info[self.pf]["port"] @@ -38,7 +29,6 @@ class TestPVPVhostUserReconnect(TestCase): self.socket_mem = "1024" else: self.socket_mem = "1024,1024" - self.reconnect_times = 5 self.vm_num = 1 self.frame_sizes = [64, 1518] @@ -67,7 +57,7 @@ class TestPVPVhostUserReconnect(TestCase): self.dut.send_expect("rm -rf ./vhost-net*", "# ") self.vhost_user = self.dut.new_session(suite="vhost-user") - def launch_testpmd_as_vhost_user(self): + def launch_testpmd_as_vhost_user(self, no_pci=False): """ launch the testpmd as vhost user """ @@ -78,32 +68,20 @@ class TestPVPVhostUserReconnect(TestCase): i, ) testcmd = self.dut.base_dir + "/%s" % self.path - eal_params = self.dut.create_eal_parameters( - cores=self.cores, prefix="vhost", ports=[self.pci_info] - ) - para = " -- -i --port-topology=chained --nb-cores=1 --txd=1024 --rxd=1024" - self.vhostapp_testcmd = testcmd + eal_params + vdev_info + para - self.vhost_user.send_expect(self.vhostapp_testcmd, "testpmd> ", 40) - self.vhost_user.send_expect("set fwd mac", "testpmd> ", 40) - self.vhost_user.send_expect("start", "testpmd> ", 40) - def launch_testpmd_as_vhost_user_with_no_pci(self): - """ - launch the testpmd as vhost user - """ - vdev_info = "" - for i in range(self.vm_num): - vdev_info += "--vdev 'net_vhost%d,iface=vhost-net%d,client=1,queues=1' " % ( - i, - i, + if not no_pci: + eal_params = self.dut.create_eal_parameters( + cores=self.cores, prefix="vhost", ports=[self.pci_info] ) - testcmd = self.dut.base_dir + "/%s" % self.path - eal_params = self.dut.create_eal_parameters( - cores=self.cores, no_pci=True, prefix="vhost" - ) - para = " -- -i --nb-cores=1 --txd=1024 --rxd=1024" + else: + eal_params = self.dut.create_eal_parameters( + cores=self.cores, no_pci=True, prefix="vhost" + ) + para = " -- -i --port-topology=chained --nb-cores=1 --txd=1024 --rxd=1024" self.vhostapp_testcmd = testcmd + eal_params + vdev_info + para self.vhost_user.send_expect(self.vhostapp_testcmd, "testpmd> ", 40) + if not no_pci: + self.vhost_user.send_expect("set fwd mac", "testpmd> ", 40) self.vhost_user.send_expect("start", "testpmd> ", 40) def check_link_status_after_testpmd_start(self, dut_info): @@ -337,7 +315,9 @@ class TestPVPVhostUserReconnect(TestCase): def test_perf_split_ring_reconnet_one_vm(self): """ - test reconnect stability test of one vm + Test Case 1: vhost-user/virtio-pmd pvp split ring reconnect from vhost-user + Test Case 2: vhost-user/virtio-pmd pvp split ring reconnect from VM + Test Case 3: vhost-user/virtio-pmd pvp split ring reconnect stability test """ self.header_row = [ "Mode", @@ -358,7 +338,7 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") @@ -366,7 +346,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from qemu self.logger.info("now reconnect from vm") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms() self.vm_testpmd_start() @@ -376,7 +356,9 @@ class TestPVPVhostUserReconnect(TestCase): def test_perf_split_ring_reconnet_two_vms(self): """ - test reconnect stability test of two vms + Test Case 4: vhost-user/virtio-pmd pvp split ring with multi VMs reconnect from vhost-user + Test Case 5: vhost-user/virtio-pmd pvp split ring with multi VMs reconnect from VMs + Test Case 6: vhost-user/virtio-pmd pvp split ring with multi VMs reconnect stability test """ self.header_row = [ "Mode", @@ -397,7 +379,7 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") @@ -405,7 +387,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from qemu self.logger.info("now reconnect from vm") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms() self.vm_testpmd_start() @@ -415,13 +397,15 @@ class TestPVPVhostUserReconnect(TestCase): def test_perf_split_ring_vm2vm_virtio_net_reconnet_two_vms(self): """ - test the iperf traffice can resume after reconnet + Test Case 7: vhost-user/virtio-net VM2VM split ring reconnect from vhost-user + Test Case 8: vhost-user/virtio-net VM2VM split ring reconnect from VMs + Test Case 9: vhost-user/virtio-net VM2VM split ring reconnect stability test """ self.header_row = ["Mode", "[M|G]bits/sec", "Cycle"] self.result_table_create(self.header_row) self.vm_num = 2 vm_cycle = 0 - self.launch_testpmd_as_vhost_user_with_no_pci() + self.launch_testpmd_as_vhost_user(no_pci=True) self.start_vms(bind_dev=False) self.config_vm_intf() self.start_iperf() @@ -430,9 +414,9 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.launch_testpmd_as_vhost_user_with_no_pci() + self.launch_testpmd_as_vhost_user(no_pci=True) self.start_iperf() self.reconnect_data = self.iperf_result_verify( vm_cycle, "reconnet from vhost" @@ -442,7 +426,7 @@ class TestPVPVhostUserReconnect(TestCase): # reconnet from VM self.logger.info("now reconnect from vm") vm_tmp = list() - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.vm_dut[0].send_expect("rm iperf_server.log", "# ", 10) self.vm_dut[1].send_expect("rm iperf_client.log", "# ", 10) self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") @@ -453,9 +437,10 @@ class TestPVPVhostUserReconnect(TestCase): self.check_reconnect_perf() self.result_table_print() - def test_perf_packed_ring_reconnet_one_vm(self): + def test_perf_packed_ring_reconnet_one_vm_from_vhost_user(self): """ - test reconnect stability test of one vm + Test Case 10: vhost-user/virtio-pmd pvp packed ring reconnect from vhost-user + Test Case 12: vhost-user/virtio-pmd pvp packed ring reconnect stability test """ self.header_row = [ "Mode", @@ -476,15 +461,38 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() + self.result_table_print() + + def test_perf_packed_ring_reconnet_one_vm_from_vm(self): + """ + Test Case 11: vhost-user/virtio-pmd pvp packed ring reconnect from VM + Test Case 12: vhost-user/virtio-pmd pvp packed ring reconnect stability test + """ + self.header_row = [ + "Mode", + "FrameSize(B)", + "Throughput(Mpps)", + "LineRate(%)", + "Cycle", + "Queue Number", + ] + self.result_table_create(self.header_row) + vm_cycle = 0 + self.vm_num = 1 + self.launch_testpmd_as_vhost_user() + self.start_vms(packed=True) + self.vm_testpmd_start() + self.before_data = self.send_and_verify(vm_cycle, "reconnet one vm") + vm_cycle = 1 # reconnet from qemu self.logger.info("now reconnect from vm") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(packed=True) self.vm_testpmd_start() @@ -492,9 +500,10 @@ class TestPVPVhostUserReconnect(TestCase): self.check_reconnect_perf() self.result_table_print() - def test_perf_packed_ring_reconnet_two_vms(self): + def test_perf_packed_ring_reconnet_two_vms_from_vhost_user(self): """ - test reconnect stability test of two vms + Test Case 13: vhost-user/virtio-pmd pvp packed ring with multi VMs reconnect from vhost-user + Test Case 15: vhost-user/virtio-pmd pvp packed ring with multi VMs reconnect stability test """ self.header_row = [ "Mode", @@ -515,14 +524,38 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.launch_testpmd_as_vhost_user() self.reconnect_data = self.send_and_verify(vm_cycle, "reconnet from vhost") self.check_reconnect_perf() + self.result_table_print() + + def test_perf_packed_ring_reconnet_two_vms_from_vms(self): + """ + Test Case 14: vhost-user/virtio-pmd pvp packed ring with multi VMs reconnect from VMs + Test Case 15: vhost-user/virtio-pmd pvp packed ring with multi VMs reconnect stability test + """ + self.header_row = [ + "Mode", + "FrameSize(B)", + "Throughput(Mpps)", + "LineRate(%)", + "Cycle", + "Queue Number", + ] + self.result_table_create(self.header_row) + vm_cycle = 0 + self.vm_num = 2 + self.launch_testpmd_as_vhost_user() + self.start_vms(packed=True) + self.vm_testpmd_start() + self.before_data = self.send_and_verify(vm_cycle, "reconnet two vm") + + vm_cycle = 1 # reconnet from qemu self.logger.info("now reconnect from vm") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") self.start_vms(packed=True) self.vm_testpmd_start() @@ -530,15 +563,16 @@ class TestPVPVhostUserReconnect(TestCase): self.check_reconnect_perf() self.result_table_print() - def test_perf_packed_ring_virtio_net_reconnet_two_vms(self): + def test_perf_packed_ring_virtio_net_reconnet_two_vms_from_vhost_user(self): """ - test the iperf traffice can resume after reconnet + Test Case 16: vhost-user/virtio-net VM2VM packed ring reconnect from vhost-user + Test Case 18: vhost-user/virtio-net VM2VM packed ring reconnect stability test """ self.header_row = ["Mode", "[M|G]bits/sec", "Cycle"] self.result_table_create(self.header_row) self.vm_num = 2 vm_cycle = 0 - self.launch_testpmd_as_vhost_user_with_no_pci() + self.launch_testpmd_as_vhost_user(no_pci=True) self.start_vms(packed=True, bind_dev=False) self.config_vm_intf() self.start_iperf() @@ -547,18 +581,35 @@ class TestPVPVhostUserReconnect(TestCase): vm_cycle = 1 # reconnet from vhost self.logger.info("now reconnect from vhost") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") - self.launch_testpmd_as_vhost_user_with_no_pci() + self.launch_testpmd_as_vhost_user(no_pci=True) self.start_iperf() self.reconnect_data = self.iperf_result_verify( vm_cycle, "reconnet from vhost" ) self.check_reconnect_perf() + self.result_table_print() + def test_perf_packed_ring_virtio_net_reconnet_two_vms_from_vms(self): + """ + Test Case 17: vhost-user/virtio-net VM2VM packed ring reconnect from VMs + Test Case 18: vhost-user/virtio-net VM2VM packed ring reconnect stability test + """ + self.header_row = ["Mode", "[M|G]bits/sec", "Cycle"] + self.result_table_create(self.header_row) + self.vm_num = 2 + vm_cycle = 0 + self.launch_testpmd_as_vhost_user(no_pci=True) + self.start_vms(packed=True, bind_dev=False) + self.config_vm_intf() + self.start_iperf() + self.before_data = self.iperf_result_verify(vm_cycle, "before reconnet") + + vm_cycle = 1 # reconnet from VM self.logger.info("now reconnect from vm") - for i in range(self.reconnect_times): + for _ in range(self.reconnect_times): self.vm_dut[0].send_expect("rm iperf_server.log", "# ", 10) self.vm_dut[1].send_expect("rm iperf_client.log", "# ", 10) self.dut.send_expect("killall -s INT qemu-system-x86_64", "# ") @@ -570,9 +621,9 @@ class TestPVPVhostUserReconnect(TestCase): self.result_table_print() def tear_down(self): - # - # Run after each test case. - # + """ + Run after each test case. + """ try: self.stop_all_apps() except Exception as e: