get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/115227/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 115227,
    "url": "http://patches.dpdk.org/api/patches/115227/?format=api",
    "web_url": "http://patches.dpdk.org/project/dts/patch/20220818080849.420782-1-weix.ling@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220818080849.420782-1-weix.ling@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20220818080849.420782-1-weix.ling@intel.com",
    "date": "2022-08-18T08:08:49",
    "name": "[V6,2/2] tests/vswitch_sample_cbdma: modify testsuite to test virito dequeue",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "cca8efa0273f137bb1ec8291464d221e3c36509c",
    "submitter": {
        "id": 1828,
        "url": "http://patches.dpdk.org/api/people/1828/?format=api",
        "name": "Ling, WeiX",
        "email": "weix.ling@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dts/patch/20220818080849.420782-1-weix.ling@intel.com/mbox/",
    "series": [
        {
            "id": 24351,
            "url": "http://patches.dpdk.org/api/series/24351/?format=api",
            "web_url": "http://patches.dpdk.org/project/dts/list/?series=24351",
            "date": "2022-08-18T08:08:27",
            "name": "modify vswitch_sample_cbdma to test virito dequeue",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/24351/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/115227/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/115227/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EBA27A034C;\n\tThu, 18 Aug 2022 10:13:16 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E600040DDC;\n\tThu, 18 Aug 2022 10:13:16 +0200 (CEST)",
            "from mga06.intel.com (mga06b.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id C1F0A40156\n for <dts@dpdk.org>; Thu, 18 Aug 2022 10:13:15 +0200 (CEST)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Aug 2022 01:13:14 -0700",
            "from unknown (HELO localhost.localdomain) ([10.239.252.222])\n by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Aug 2022 01:13:12 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1660810395; x=1692346395;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=62vTnOYwviYc5acUyVKXWSQ3C2EOe02449sz8t59CdI=;\n b=SrcJAjcIrkVuDniNotxoPR55fEpyYdg7nQIHYkuvsgFSiSONkN0kQ0Am\n 09K1Ig+13sXykPTRuA8uGMIYRHguSKGoq1Rnjp+Y5dN4oZdrdRZQVsnc2\n sLQVR6A0lF67C6CTq1in6LcQMCluIHOjYQGE8uVJ7pbfb7bS1QprxV1Ko\n UXg5B8zsyYdWRYImhKU11a5pkDZoDWhenByZmd27zrePDKwvSR6APFtDI\n FqUusBC1biDhH5an3DxBsoGTvaXPVxcT531NWgUbjpPFKErSa3Zf2C71V\n f6qBgspC+jvz++i4XuWa/WUSavjpFXiuDexpETloDZdD+D+gNq+yL2Kcw A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10442\"; a=\"354438123\"",
            "E=Sophos;i=\"5.93,245,1654585200\";\n d=\"scan'208,223\";a=\"354438123\"",
            "E=Sophos;i=\"5.93,245,1654585200\";\n d=\"scan'208,223\";a=\"668000677\""
        ],
        "From": "Wei Ling <weix.ling@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Wei Ling <weix.ling@intel.com>",
        "Subject": "[dts][PATCH V6 2/2] tests/vswitch_sample_cbdma: modify testsuite to\n test virito dequeue",
        "Date": "Thu, 18 Aug 2022 04:08:49 -0400",
        "Message-Id": "<20220818080849.420782-1-weix.ling@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org"
    },
    "content": "From DPDK-22.07, virtio support async dequeue for split and packed ring\npath, so modify vswitch_sample_cbdma testsuite to test the split and\npacked ring async dequeue feature.\n\nSigned-off-by: Wei Ling <weix.ling@intel.com>\n---\n tests/TestSuite_vswitch_sample_cbdma.py | 731 ++++++++++--------------\n 1 file changed, 314 insertions(+), 417 deletions(-)",
    "diff": "diff --git a/tests/TestSuite_vswitch_sample_cbdma.py b/tests/TestSuite_vswitch_sample_cbdma.py\nindex 9fb6150b..d98a62ab 100644\n--- a/tests/TestSuite_vswitch_sample_cbdma.py\n+++ b/tests/TestSuite_vswitch_sample_cbdma.py\n@@ -33,14 +33,13 @@ class TestVswitchSampleCBDMA(TestCase):\n         self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])\n         self.cores = self.dut.get_core_list(\"all\", socket=self.ports_socket)\n         self.vhost_core_list = self.cores[0:2]\n+        self.vhost_core_range = \"%s-%s\" % (\n+            self.vhost_core_list[0],\n+            self.vhost_core_list[-1],\n+        )\n         self.vuser0_core_list = self.cores[2:4]\n         self.vuser1_core_list = self.cores[4:6]\n-        self.vhost_core_mask = utils.create_mask(self.vhost_core_list)\n         self.mem_channels = self.dut.get_memory_channels()\n-        # get cbdma device\n-        self.cbdma_dev_infos = []\n-        self.dmas_info = None\n-        self.device_str = None\n         self.out_path = \"/tmp\"\n         out = self.tester.send_expect(\"ls -d %s\" % self.out_path, \"# \")\n         if \"No such file or directory\" in out:\n@@ -61,11 +60,6 @@ class TestVswitchSampleCBDMA(TestCase):\n         self.virtio_user1 = self.dut.new_session(suite=\"virtio-user1\")\n         self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0)\n         self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1)\n-        self.mrg_rxbuf = 0\n-        self.in_order = 0\n-        self.vectorized = 0\n-        self.packed_vq = 0\n-        self.server = 0\n         self.random_string = string.ascii_letters + string.digits\n         self.virtio_ip0 = \"1.1.1.2\"\n         self.virtio_ip1 = \"1.1.1.3\"\n@@ -75,6 +69,8 @@ class TestVswitchSampleCBDMA(TestCase):\n         \"\"\"\n         Run before each test case.\n         \"\"\"\n+        self.table_header = [\"Frame Size(Byte)\", \"Mode\", \"Throughput(Mpps)\"]\n+        self.result_table_create(self.table_header)\n         self.dut.send_expect(\"rm -rf %s/vhost-net*\" % self.base_dir, \"#\")\n         self.dut.send_expect(\"killall -I dpdk-vhost\", \"#\", 20)\n         self.dut.send_expect(\"killall -I dpdk-testpmd\", \"#\", 20)\n@@ -93,9 +89,7 @@ class TestVswitchSampleCBDMA(TestCase):\n         )\n         return True if out == \"2048\" else False\n \n-    def start_vhost_app(\n-        self, with_cbdma=True, cbdma_num=1, socket_num=1, client_mode=False\n-    ):\n+    def start_vhost_app(self, cbdma_num, socket_num, dmas_info, client_mode=False):\n         \"\"\"\n         launch the vhost app on vhost side\n         \"\"\"\n@@ -105,122 +99,86 @@ class TestVswitchSampleCBDMA(TestCase):\n             socket_file_param += \"--socket-file ./vhost-net{} \".format(item)\n         allow_pci = [self.dut.ports_info[0][\"pci\"]]\n         for item in range(cbdma_num):\n-            allow_pci.append(self.cbdma_dev_infos[item])\n+            allow_pci.append(self.cbdma_list[item])\n         allow_option = \"\"\n         for item in allow_pci:\n             allow_option += \" -a {}\".format(item)\n-        if with_cbdma:\n-            if client_mode:\n-                params = (\n-                    \" -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 \"\n-                    + socket_file_param\n-                    + \"--dmas [{}] --client --total-num-mbufs 600000\"\n-                ).format(\n-                    self.vhost_core_mask,\n-                    self.mem_channels,\n-                    allow_option,\n-                    self.dmas_info,\n-                )\n-            else:\n-                params = (\n-                    \" -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 \"\n-                    + socket_file_param\n-                    + \"--dmas [{}] --total-num-mbufs 600000\"\n-                ).format(\n-                    self.vhost_core_mask,\n-                    self.mem_channels,\n-                    allow_option,\n-                    self.dmas_info,\n-                )\n-        else:\n-            params = (\n-                \" -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 \"\n-                + socket_file_param\n-                + \"--total-num-mbufs 600000\"\n-            ).format(self.vhost_core_mask, self.mem_channels, allow_option)\n+        params = (\n+            \" -l {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 \"\n+            + socket_file_param\n+            + \"--dmas [{}] --total-num-mbufs 600000\"\n+        ).format(\n+            self.vhost_core_range,\n+            self.mem_channels,\n+            allow_option,\n+            dmas_info,\n+        )\n+        if client_mode:\n+            params = params + \" --client\"\n         self.command_line = self.app_path + params\n         self.vhost_user.send_command(self.command_line)\n-        # After started dpdk-vhost app, wait 3 seconds\n         time.sleep(3)\n \n-    def start_virtio_testpmd(\n-        self,\n-        pmd_session,\n-        dev_mac,\n-        dev_id,\n-        cores,\n-        prefix,\n-        enable_queues=1,\n-        nb_cores=1,\n-        used_queues=1,\n-        force_max_simd_bitwidth=False,\n-        power2=False,\n-    ):\n+    def start_virtio_testpmd_with_vhost_net0(self, eal_param=\"\", param=\"\"):\n         \"\"\"\n         launch the testpmd as virtio with vhost_net0\n         \"\"\"\n-        txd_rxd = 1024\n-        eal_params = \" --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={},mrg_rxbuf={},in_order={}\".format(\n-            dev_mac, dev_id, enable_queues, self.mrg_rxbuf, self.in_order\n-        )\n-        if self.vectorized == 1:\n-            eal_params += \",vectorized=1\"\n-        if self.packed_vq == 1:\n-            eal_params += \",packed_vq=1\"\n-        if self.server:\n-            eal_params += \",server=1\"\n-        if power2:\n-            txd_rxd += 1\n-            eal_params += \",queue_size={}\".format(txd_rxd)\n         if self.check_2M_env:\n-            eal_params += \" --single-file-segments\"\n-        if force_max_simd_bitwidth:\n-            eal_params += \" --force-max-simd-bitwidth=512\"\n-        params = \"--rxq={} --txq={} --txd={} --rxd={} --nb-cores={}\".format(\n-            used_queues, used_queues, txd_rxd, txd_rxd, nb_cores\n+            eal_param += \" --single-file-segments\"\n+        self.virtio_user0_pmd.start_testpmd(\n+            cores=self.vuser0_core_list,\n+            eal_param=eal_param,\n+            param=param,\n+            no_pci=True,\n+            prefix=\"virtio-user0\",\n+            fixed_prefix=True,\n         )\n-        pmd_session.start_testpmd(\n-            cores=cores,\n-            param=params,\n-            eal_param=eal_params,\n+\n+    def start_virtio_testpmd_with_vhost_net1(self, eal_param=\"\", param=\"\"):\n+        \"\"\"\n+        launch the testpmd as virtio with vhost_net1\n+        \"\"\"\n+        if self.check_2M_env:\n+            eal_param += \" --single-file-segments\"\n+        self.virtio_user1_pmd.start_testpmd(\n+            cores=self.vuser1_core_list,\n+            eal_param=eal_param,\n+            param=param,\n             no_pci=True,\n-            ports=[],\n-            prefix=prefix,\n+            prefix=\"virtio-user1\",\n             fixed_prefix=True,\n         )\n \n     def start_vms(\n         self,\n-        mergeable=True,\n+        mrg_rxbuf=True,\n         packed=False,\n         server_mode=False,\n         set_target=True,\n         bind_dev=True,\n-        vm_diff_param=False,\n     ):\n         \"\"\"\n         start two VM, each VM has one virtio device\n         \"\"\"\n-        mergeable = \"on\" if mergeable else \"off\"\n-        setting_args = \"disable-modern=true,mrg_rxbuf={0},csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on\".format(\n-            mergeable\n-        )\n-        if packed:\n-            setting_args = setting_args + \",packed=on\"\n+        mrg_rxbuf = \"on\" if mrg_rxbuf else \"off\"\n         for i in range(self.vm_num):\n             vm_dut = None\n+            if not packed:\n+                modern = \"true\" if i == 0 else \"false\"\n+                setting_format = \"disable-modern=%s,mrg_rxbuf=%s,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on\"\n+            else:\n+                modern = \"false\"\n+                setting_format = \"disable-modern=%s,mrg_rxbuf=%s,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n+            setting_args = setting_format % (modern, mrg_rxbuf)\n             vm_info = VM(self.dut, \"vm%d\" % i, \"vhost_sample\")\n             vm_params = {}\n             vm_params[\"driver\"] = \"vhost-user\"\n             if server_mode:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i + \",server\"\n+                vm_params[\"opt_path\"] = \"./vhost-net%d\" % i + \",server\"\n             else:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i\n+                vm_params[\"opt_path\"] = \"./vhost-net%d\" % i\n             vm_params[\"opt_mac\"] = \"52:54:00:00:00:0%d\" % (i + 1)\n-            if vm_diff_param and i > 0:\n-                vm_params[\"opt_settings\"] = setting_args + \",packed=on\"\n-            else:\n-                vm_params[\"opt_settings\"] = setting_args\n+            vm_params[\"opt_settings\"] = setting_args\n             vm_info.set_vm_device(**vm_params)\n             time.sleep(3)\n             try:\n@@ -249,10 +207,13 @@ class TestVswitchSampleCBDMA(TestCase):\n             dut.bind_interfaces_linux(driver=\"vfio-pci\")\n             i += 1\n \n-    def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num):\n+    def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):\n         \"\"\"\n-        get all cbdma ports\n+        get and bind cbdma ports into DPDK driver\n         \"\"\"\n+        self.all_cbdma_list = []\n+        self.cbdma_list = []\n+        self.cbdma_str = \"\"\n         out = self.dut.send_expect(\n             \"./usertools/dpdk-devbind.py --status-dev dma\", \"# \", 30\n         )\n@@ -267,23 +228,19 @@ class TestVswitchSampleCBDMA(TestCase):\n                     cur_socket = 1\n                 else:\n                     cur_socket = 0\n-                if self.ports_socket == cur_socket:\n-                    self.cbdma_dev_infos.append(pci_info.group(1))\n+                if allow_diff_socket:\n+                    self.all_cbdma_list.append(pci_info.group(1))\n+                else:\n+                    if self.ports_socket == cur_socket:\n+                        self.all_cbdma_list.append(pci_info.group(1))\n         self.verify(\n-            len(self.cbdma_dev_infos) >= cbdma_num,\n-            \"There no enough cbdma device to run this suite\",\n+            len(self.all_cbdma_list) >= cbdma_num, \"There no enough cbdma device\"\n         )\n-        used_cbdma = self.cbdma_dev_infos[0:cbdma_num]\n-        dmas_info = \"\"\n-        for dmas in used_cbdma:\n-            number = used_cbdma.index(dmas)\n-            dmas = \"txd{}@{},\".format(number, dmas)\n-            dmas_info += dmas\n-        self.dmas_info = dmas_info[:-1]\n-        self.device_str = \" \".join(used_cbdma)\n+        self.cbdma_list = self.all_cbdma_list[0:cbdma_num]\n+        self.cbdma_str = \" \".join(self.cbdma_list)\n         self.dut.send_expect(\n             \"./usertools/dpdk-devbind.py --force --bind=%s %s\"\n-            % (self.drivername, self.device_str),\n+            % (self.drivername, self.cbdma_str),\n             \"# \",\n             60,\n         )\n@@ -306,14 +263,14 @@ class TestVswitchSampleCBDMA(TestCase):\n         )\n \n     def bind_cbdma_device_to_kernel(self):\n-        if self.device_str is not None:\n+        if self.cbdma_str is not None:\n             self.dut.send_expect(\"modprobe ioatdma\", \"# \")\n             self.dut.send_expect(\n-                \"./usertools/dpdk-devbind.py -u %s\" % self.device_str, \"# \", 30\n+                \"./usertools/dpdk-devbind.py -u %s\" % self.cbdma_str, \"# \", 30\n             )\n             self.dut.send_expect(\n                 \"./usertools/dpdk-devbind.py --force --bind=ioatdma  %s\"\n-                % self.device_str,\n+                % self.cbdma_str,\n                 \"# \",\n                 60,\n             )\n@@ -335,10 +292,6 @@ class TestVswitchSampleCBDMA(TestCase):\n         return tgen_input\n \n     def perf_test(self, frame_sizes, dst_mac_list):\n-        # Create test results table\n-        table_header = [\"Frame Size(Byte)\", \"Throughput(Mpps)\"]\n-        self.result_table_create(table_header)\n-        # Begin test perf\n         test_result = {}\n         for frame_size in frame_sizes:\n             self.logger.info(\n@@ -358,118 +311,64 @@ class TestVswitchSampleCBDMA(TestCase):\n             )\n             throughput = pps / 1000000.0\n             test_result[frame_size] = throughput\n-            self.result_table_add([frame_size, throughput])\n-        self.result_table_print()\n         return test_result\n \n     def pvp_test_with_cbdma(self):\n         frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]\n-        self.virtio_user0_pmd.execute_cmd(\"set fwd mac\")\n-        self.virtio_user0_pmd.execute_cmd(\"start tx_first\")\n-        self.virtio_user0_pmd.execute_cmd(\"stop\")\n-        self.virtio_user0_pmd.execute_cmd(\"set fwd mac\")\n-        self.virtio_user0_pmd.execute_cmd(\"start\")\n         dst_mac_list = [self.virtio_dst_mac0]\n         perf_result = self.perf_test(frame_sizes, dst_mac_list)\n         return perf_result\n \n-    def test_perf_pvp_check_with_cbdma_channel_using_vhost_async_driver(self):\n+    def let_vswitch_know_mac(self, virtio_pmd, relaunch=False):\n+        if not relaunch:\n+            virtio_pmd.execute_cmd(\"set fwd mac\")\n+            virtio_pmd.execute_cmd(\"start tx_first\")\n+        else:\n+            virtio_pmd.execute_cmd(\"stop\")\n+            virtio_pmd.execute_cmd(\"start tx_first\")\n+\n+    def test_perf_pvp_perfromance_check_with_cbdma_channel_using_vhost_async_driver(\n+        self,\n+    ):\n         \"\"\"\n         Test Case1: PVP performance check with CBDMA channel using vhost async driver\n         \"\"\"\n         perf_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)\n-\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n+        dmas_info = \"txd0@%s,rxd0@%s\" % (self.cbdma_list[0], self.cbdma_list[1])\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=1, socket_num=1, client_mode=True\n+            cbdma_num=2, socket_num=1, dmas_info=dmas_info, client_mode=True\n         )\n-        # packed ring\n-        self.mrg_rxbuf = 0\n-        self.in_order = 1\n-        self.vectorized = 1\n-        self.packed_vq = 1\n-        self.server = 1\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user0_pmd,\n-            dev_mac=self.virtio_dst_mac0,\n-            dev_id=0,\n-            cores=self.vuser0_core_list,\n-            prefix=\"testpmd0\",\n-            nb_cores=1,\n-            used_queues=1,\n-            force_max_simd_bitwidth=True,\n-            power2=False,\n-        )\n-        packed_ring_result = self.pvp_test_with_cbdma()\n \n-        # packed ring of power2\n-        self.virtio_user0_pmd.execute_cmd(\"quit\", \"# \")\n-        self.mrg_rxbuf = 0\n-        self.in_order = 1\n-        self.vectorized = 1\n-        self.packed_vq = 1\n-        self.server = 1\n-\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user0_pmd,\n-            dev_mac=self.virtio_dst_mac0,\n-            dev_id=0,\n-            cores=self.vuser0_core_list,\n-            prefix=\"testpmd0\",\n-            nb_cores=1,\n-            used_queues=1,\n-            force_max_simd_bitwidth=True,\n-            power2=True,\n+        # packed ring path\n+        virtio0_eal_param = \"--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1\"\n+        virtio0_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net0(\n+            eal_param=virtio0_eal_param, param=virtio0_param\n         )\n-        packed_ring_power2_result = self.pvp_test_with_cbdma()\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=False)\n+        packed_ring_result = self.pvp_test_with_cbdma()\n \n-        # split ring\n+        # split ring path\n         self.virtio_user0_pmd.execute_cmd(\"quit\", \"# \")\n-        self.mrg_rxbuf = 0\n-        self.in_order = 1\n-        self.vectorized = 1\n-        self.packed_vq = 0\n-        self.server = 1\n-\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user0_pmd,\n-            dev_mac=self.virtio_dst_mac0,\n-            dev_id=0,\n-            cores=self.vuser0_core_list,\n-            prefix=\"testpmd0\",\n-            nb_cores=1,\n-            used_queues=1,\n-            force_max_simd_bitwidth=False,\n-            power2=False,\n+        virtio0_eal_param = \"--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,server=1\"\n+        virtio0_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net0(\n+            eal_param=virtio0_eal_param, param=virtio0_param\n         )\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd)\n         split_ring_reult = self.pvp_test_with_cbdma()\n \n-        self.table_header = [\"Frame Size(Byte)\", \"Mode\", \"Throughput(Mpps)\"]\n-        self.result_table_create(self.table_header)\n         for key in packed_ring_result.keys():\n             perf_result.append([key, \"packed_ring\", packed_ring_result[key]])\n-        for key in packed_ring_power2_result.keys():\n-            perf_result.append(\n-                [key, \"packed_ring_power2\", packed_ring_power2_result[key]]\n-            )\n+\n         for key in split_ring_reult.keys():\n             perf_result.append([key, \"split_ring\", split_ring_reult[key]])\n+\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n+\n         self.result_table_print()\n-        for key in packed_ring_result.keys():\n-            self.verify(\n-                packed_ring_result[key] > 1, \"The perf test result is lower than 1 Mpps\"\n-            )\n-        for key in packed_ring_power2_result.keys():\n-            self.verify(\n-                packed_ring_power2_result[key] > 1,\n-                \"The perf test result is lower than 1 Mpps\",\n-            )\n-        for key in split_ring_reult.keys():\n-            self.verify(\n-                split_ring_reult[key] > 1, \"The perf test result is lower than 1 Mpps\"\n-            )\n \n     def config_stream_imix(self, frame_sizes, dst_mac_list):\n         tgen_input = []\n@@ -496,10 +395,6 @@ class TestVswitchSampleCBDMA(TestCase):\n         return tgen_input\n \n     def perf_test_imix(self, frame_sizes, dst_mac_list):\n-        # Create test results table\n-        table_header = [\"Frame Size(Byte)\", \"Throughput(Mpps)\"]\n-        self.result_table_create(table_header)\n-        # Begin test perf\n         test_result = {}\n         tgenInput = self.config_stream_imix(frame_sizes, dst_mac_list)\n         fields_config = {\n@@ -520,83 +415,54 @@ class TestVswitchSampleCBDMA(TestCase):\n         )\n         throughput = pps / 1000000.0\n         test_result[\"imix\"] = throughput\n-        self.result_table_add([\"imix\", throughput])\n-        self.result_table_print()\n         return test_result\n \n-    def pvp_test_with_multi_cbdma(self, relaunch=False):\n+    def pvp_test_with_multi_cbdma(self):\n         frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]\n-        if relaunch:\n-            self.virtio_user0_pmd.execute_cmd(\"stop\")\n-            self.virtio_user1_pmd.execute_cmd(\"stop\")\n-            self.virtio_user0_pmd.execute_cmd(\"clear port stats all\")\n-            self.virtio_user1_pmd.execute_cmd(\"clear port stats all\")\n-        self.virtio_user0_pmd.execute_cmd(\"set fwd mac\")\n-        self.virtio_user1_pmd.execute_cmd(\"set fwd mac\")\n-        self.virtio_user0_pmd.execute_cmd(\"start tx_first\")\n-        self.virtio_user1_pmd.execute_cmd(\"start tx_first\")\n         dst_mac_list = [self.virtio_dst_mac0, self.virtio_dst_mac1]\n         perf_result = self.perf_test_imix(frame_sizes, dst_mac_list)\n-        out0 = self.virtio_user0_pmd.execute_cmd(\"show port stats all\")\n-        out1 = self.virtio_user1_pmd.execute_cmd(\"show port stats all\")\n-        rx_num0 = re.compile(\"RX-packets: (.*?)\\s+?\").findall(out0, re.S)\n-        rx_num1 = re.compile(\"RX-packets: (.*?)\\s+?\").findall(out1, re.S)\n-        self.verify(int(rx_num0[0]) > 32, \"virtio-user0 not receive pkts from tester\")\n-        self.verify(int(rx_num1[0]) > 32, \"virtio-user1 not receive pkts from tester\")\n         return perf_result\n \n-    def test_perf_pvp_test_with_two_vm_and_two_cbdma_channels_using_vhost_async_driver(\n-        self,\n-    ):\n+    def test_perf_pvp_test_with_2_vms_using_vhost_async_driver(self):\n         \"\"\"\n-        Test Case2: PVP test with two VM and two CBDMA channels using vhost async driver\n+        Test Case2: PVP test with two VMs using vhost async driver\n         \"\"\"\n         perf_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-\n-        self.logger.info(\"Launch vhost app perf test\")\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        self.mrg_rxbuf = 1\n-        self.in_order = 0\n-        self.vectorized = 0\n-        self.packed_vq = 1\n-        self.server = 1\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user0_pmd,\n-            dev_mac=self.virtio_dst_mac0,\n-            dev_id=0,\n-            cores=self.vuser0_core_list,\n-            prefix=\"testpmd0\",\n-            nb_cores=1,\n-            used_queues=1,\n+        virtio0_eal_param = \"--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1\"\n+        virtio0_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net0(\n+            eal_param=virtio0_eal_param, param=virtio0_param\n         )\n-        self.mrg_rxbuf = 1\n-        self.in_order = 1\n-        self.vectorized = 1\n-        self.packed_vq = 0\n-        self.server = 1\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user1_pmd,\n-            dev_mac=self.virtio_dst_mac1,\n-            dev_id=1,\n-            cores=self.vuser1_core_list,\n-            prefix=\"testpmd1\",\n-            nb_cores=1,\n-            used_queues=1,\n+\n+        virtio1_eal_param = \"--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=./vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1\"\n+        virtio1_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net1(\n+            eal_param=virtio1_eal_param, param=virtio1_param\n         )\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=False)\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user1_pmd, relaunch=False)\n+\n         before_relunch = self.pvp_test_with_multi_cbdma()\n \n-        self.logger.info(\"Relaunch vhost app perf test\")\n         self.vhost_user.send_expect(\"^C\", \"# \", 20)\n+        dmas_info = \"txd0@%s,rxd1@%s\" % (self.cbdma_list[0], self.cbdma_list[1])\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        after_relunch = self.pvp_test_with_multi_cbdma(relaunch=True)\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=True)\n+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user1_pmd, relaunch=True)\n+        after_relunch = self.pvp_test_with_multi_cbdma()\n \n-        self.table_header = [\"Frame Size(Byte)\", \"Mode\", \"Throughput(Mpps)\"]\n-        self.result_table_create(self.table_header)\n         for key in before_relunch.keys():\n             perf_result.append([\"imix\", \"Before Re-launch vhost\", before_relunch[key]])\n         for key in after_relunch.keys():\n@@ -604,14 +470,15 @@ class TestVswitchSampleCBDMA(TestCase):\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n         self.result_table_print()\n-        for key in before_relunch.keys():\n-            self.verify(\n-                before_relunch[key] > 1, \"The perf test result is lower than 1 Mpps\"\n-            )\n-        for key in after_relunch.keys():\n-            self.verify(\n-                after_relunch[key] > 1, \"The perf test result is lower than 1 Mpps\"\n+\n+        self.verify(\n+            (\n+                abs(before_relunch[\"imix\"] - after_relunch[\"imix\"])\n+                / before_relunch[\"imix\"]\n             )\n+            < 0.5,\n+            \"perf data have drop after re-launch vhost\",\n+        )\n \n     def get_receive_throughput(self, pmd_session, count=10):\n         i = 0\n@@ -636,7 +503,6 @@ class TestVswitchSampleCBDMA(TestCase):\n         pmd_session.execute_cmd(\"set eth-peer 0 %s\" % eth_peer_mac)\n \n     def send_pkts_from_testpmd1(self, pmd_session, pkt_len):\n-        pmd_session.execute_cmd(\"stop\")\n         if pkt_len in [64, 2000]:\n             pmd_session.execute_cmd(\"set txpkts %s\" % pkt_len)\n         elif pkt_len == 8000:\n@@ -645,15 +511,8 @@ class TestVswitchSampleCBDMA(TestCase):\n             pmd_session.execute_cmd(\"set txpkts 64,256,2000,64,256,2000\")\n         pmd_session.execute_cmd(\"start tx_first\")\n \n-    def vm2vm_check_with_two_cbdma(self, relaunch=False):\n+    def vm2vm_check_with_two_cbdma(self):\n         frame_sizes = [64, 2000, 8000, \"imix\"]\n-        if relaunch:\n-            self.virtio_user0_pmd.execute_cmd(\"stop\")\n-            self.virtio_user1_pmd.execute_cmd(\"stop\")\n-            self.virtio_user0_pmd.execute_cmd(\"clear port stats all\")\n-            self.virtio_user1_pmd.execute_cmd(\"clear port stats all\")\n-            self.virtio_user0_pmd.execute_cmd(\"show port stats all\")\n-            self.virtio_user1_pmd.execute_cmd(\"show port stats all\")\n         self.set_testpmd0_param(self.virtio_user0_pmd, self.virtio_dst_mac1)\n         self.set_testpmd1_param(self.virtio_user1_pmd, self.virtio_dst_mac0)\n \n@@ -662,65 +521,46 @@ class TestVswitchSampleCBDMA(TestCase):\n             self.send_pkts_from_testpmd1(\n                 pmd_session=self.virtio_user1_pmd, pkt_len=frame_size\n             )\n-            # Create test results table\n-            table_header = [\"Frame Size(Byte)\", \"Throughput(Mpps)\"]\n-            self.result_table_create(table_header)\n             rx_pps = self.get_receive_throughput(pmd_session=self.virtio_user1_pmd)\n-            self.result_table_add([frame_size, rx_pps])\n             rx_throughput[frame_size] = rx_pps\n-            self.result_table_print()\n         return rx_throughput\n \n-    def test_vm2vm_fwd_test_with_two_cbdma_channels(self):\n+    def test_vm2vm_virtio_user_forwarding_test_using_vhost_async_driver(self):\n         \"\"\"\n-        Test Case3: VM2VM forwarding test with two CBDMA channels\n+        Test Case3: VM2VM virtio-user forwarding test using vhost async driver\n         \"\"\"\n         perf_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-\n-        self.logger.info(\"Launch vhost app perf test\")\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        self.mrg_rxbuf = 1\n-        self.in_order = 0\n-        self.vectorized = 0\n-        self.packed_vq = 1\n-        self.server = 1\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user0_pmd,\n-            dev_mac=self.virtio_dst_mac0,\n-            dev_id=0,\n-            cores=self.vuser0_core_list,\n-            prefix=\"testpmd0\",\n-            nb_cores=1,\n-            used_queues=1,\n+        virtio0_eal_param = \"--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1\"\n+        virtio0_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net0(\n+            eal_param=virtio0_eal_param, param=virtio0_param\n         )\n-        self.mrg_rxbuf = 1\n-        self.in_order = 1\n-        self.vectorized = 1\n-        self.packed_vq = 0\n-        self.server = 1\n-        self.start_virtio_testpmd(\n-            pmd_session=self.virtio_user1_pmd,\n-            dev_mac=self.virtio_dst_mac1,\n-            dev_id=1,\n-            cores=self.vuser1_core_list,\n-            prefix=\"testpmd1\",\n-            nb_cores=1,\n-            used_queues=1,\n+\n+        virtio1_eal_param = \"--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=./vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1\"\n+        virtio1_param = \"--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\"\n+        self.start_virtio_testpmd_with_vhost_net1(\n+            eal_param=virtio1_eal_param, param=virtio1_param\n         )\n         before_relunch_result = self.vm2vm_check_with_two_cbdma()\n \n-        self.logger.info(\"Relaunch vhost app perf test\")\n         self.vhost_user.send_expect(\"^C\", \"# \", 20)\n+        dmas_info = \"txd0@%s,rxd1@%s\" % (self.cbdma_list[0], self.cbdma_list[1])\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        after_relunch_result = self.vm2vm_check_with_two_cbdma(relaunch=True)\n+        self.virtio_user0_pmd.execute_cmd(\"stop\")\n+        after_relunch_result = self.vm2vm_check_with_two_cbdma()\n \n-        self.table_header = [\"Frame Size(Byte)\", \"Mode\", \"Throughput(Mpps)\"]\n-        self.result_table_create(self.table_header)\n         for key in before_relunch_result.keys():\n             perf_result.append(\n                 [key, \"Before Re-launch vhost\", before_relunch_result[key]]\n@@ -732,40 +572,26 @@ class TestVswitchSampleCBDMA(TestCase):\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n         self.result_table_print()\n-        for key in before_relunch_result.keys():\n-            self.verify(\n-                before_relunch_result[key] > 0.1,\n-                \"The perf test result is lower than 0.1 Mpps\",\n-            )\n-        for key in after_relunch_result.keys():\n-            self.verify(\n-                after_relunch_result[key] > 0.1,\n-                \"The perf test result is lower than 0.1 Mpps\",\n-            )\n \n     def vm2vm_check_with_two_vhost_device(self):\n         rx_throughput = {}\n         self.frame_sizes = [64, 2000, 8000, \"imix\"]\n         for frame_size in self.frame_sizes:\n             self.send_pkts_from_testpmd1(pmd_session=self.vm1_pmd, pkt_len=frame_size)\n-            # Create test results table\n-            table_header = [\"Frame Size(Byte)\", \"Throughput(Mpps)\"]\n-            self.result_table_create(table_header)\n             rx_pps = self.get_receive_throughput(pmd_session=self.vm1_pmd)\n-            self.result_table_add([frame_size, rx_pps])\n             rx_throughput[frame_size] = rx_pps\n-            self.result_table_print()\n         return rx_throughput\n \n-    def start_vms_testpmd_and_test(self, need_start_vm=True):\n+    def start_vms_testpmd_and_test(\n+        self, mrg_rxbuf=True, need_start_vm=True, packed=False\n+    ):\n         if need_start_vm:\n             self.start_vms(\n-                mergeable=True,\n-                packed=False,\n+                mrg_rxbuf=mrg_rxbuf,\n+                packed=packed,\n                 server_mode=True,\n                 set_target=True,\n                 bind_dev=True,\n-                vm_diff_param=True,\n             )\n             self.vm0_pmd = PmdOutput(self.vm_dut[0])\n             self.vm1_pmd = PmdOutput(self.vm_dut[1])\n@@ -778,46 +604,53 @@ class TestVswitchSampleCBDMA(TestCase):\n         self.vm1_pmd.quit()\n         return perf_result\n \n-    def test_vm2vm_test_with_cbdma_channels_register_or_unregister_stable_check(self):\n+    def test_vm2vm_virtio_pmd_split_ring_test_with_cbdma_channels_register_and_unregister_stable_check(\n+        self,\n+    ):\n         \"\"\"\n-        Test Case4: VM2VM test with cbdma channels register/unregister stable check\n+        Test Case 4: VM2VM virtio-pmd split ring test with cbdma channels register/unregister stable check\n         \"\"\"\n         perf_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-\n-        self.logger.info(\"Before rebind VM Driver perf test\")\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n+        )\n+        before_rebind = self.start_vms_testpmd_and_test(\n+            mrg_rxbuf=True, need_start_vm=True, packed=False\n         )\n-        before_rebind = self.start_vms_testpmd_and_test(need_start_vm=True)\n \n-        self.logger.info(\"After rebind VM Driver perf test\")\n         # repeat bind 50 time from virtio-pci to vfio-pci\n         self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)\n         self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)\n+\n         self.vhost_user.send_expect(\"^C\", \"# \", 20)\n+        dmas_info = \"txd0@%s,rxd1@%s\" % (self.cbdma_list[0], self.cbdma_list[3])\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n+        )\n+        after_rebind = self.start_vms_testpmd_and_test(\n+            mrg_rxbuf=True, need_start_vm=False, packed=False\n         )\n-        after_bind = self.start_vms_testpmd_and_test(need_start_vm=False)\n-        # repeat bind 50 time from virtio-pci to vfio-pci\n-        self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)\n-        self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)\n \n-        self.table_header = [\n-            \"Frame Size(Byte)\",\n-            \"Before/After Bind VM Driver\",\n-            \"Throughput(Mpps)\",\n-        ]\n-        self.result_table_create(self.table_header)\n         for key in before_rebind.keys():\n             perf_result.append([key, \"Before rebind driver\", before_rebind[key]])\n-        for key in after_bind.keys():\n-            perf_result.append([key, \"After rebind driver\", after_bind[key]])\n+\n+        for key in after_rebind.keys():\n+            perf_result.append([key, \"After rebind driver\", after_rebind[key]])\n+\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n         self.result_table_print()\n \n+        for i in perf_result:\n+            self.verify(i[2] > 0, \"%s Frame Size(Byte) is less than 0 Mpps\" % i[0])\n+\n     def config_vm_env(self):\n         \"\"\"\n         set virtio device IP and run arp protocal\n@@ -851,8 +684,6 @@ class TestVswitchSampleCBDMA(TestCase):\n         \"\"\"\n         get the iperf test result\n         \"\"\"\n-        self.table_header = [\"Mode\", \"[M|G]bits/sec\"]\n-        self.result_table_create(self.table_header)\n         self.vm_dut[0].send_expect(\"pkill iperf\", \"# \")\n         self.vm_dut[1].session.copy_file_from(\"%s/iperf_client.log\" % self.dut.base_dir)\n         fp = open(\"./iperf_client.log\")\n@@ -871,13 +702,6 @@ class TestVswitchSampleCBDMA(TestCase):\n             and float(iperfdata[-1].split()[0]) >= 1,\n             \"the throughput must be above 1Gbits/sec\",\n         )\n-\n-        # put the result to table\n-        results_row = [\"vm2vm\", iperfdata[-1]]\n-        self.result_table_add(results_row)\n-\n-        # print iperf resut\n-        self.result_table_print()\n         # rm the iperf log file in vm\n         self.vm_dut[0].send_expect(\"rm iperf_server.log\", \"#\", 10)\n         self.vm_dut[1].send_expect(\"rm iperf_client.log\", \"#\", 10)\n@@ -889,7 +713,7 @@ class TestVswitchSampleCBDMA(TestCase):\n         \"\"\"\n         # default file_size=1024K\n         data = \"\"\n-        for char in range(file_size * 1024):\n+        for _ in range(file_size * 1024):\n             data += random.choice(self.random_string)\n         self.vm_dut[0].send_expect('echo \"%s\" > /tmp/payload' % data, \"# \")\n         # scp this file to vm1\n@@ -909,11 +733,11 @@ class TestVswitchSampleCBDMA(TestCase):\n         )\n \n     def start_iperf_and_scp_test_in_vms(\n-        self, need_start_vm=True, mergeable=False, packed=False, server_mode=False\n+        self, mrg_rxbuf=False, need_start_vm=True, packed=False, server_mode=False\n     ):\n         if need_start_vm:\n             self.start_vms(\n-                mergeable=mergeable,\n+                mrg_rxbuf=mrg_rxbuf,\n                 packed=packed,\n                 server_mode=server_mode,\n                 set_target=True,\n@@ -927,74 +751,147 @@ class TestVswitchSampleCBDMA(TestCase):\n         iperfdata = self.get_iperf_result()\n         return iperfdata\n \n-    def test_vm2vm_split_ring_test_with_iperf_and_reconnect_stable_check(self):\n+    def test_vm2vm_virtio_pmd_packed_ring_test_with_cbdma_channels_register_and_unregister_stable_check(\n+        self,\n+    ):\n         \"\"\"\n-        Test Case5: VM2VM split ring test with iperf and reconnect stable check\n+        Test Case 5: VM2VM virtio-pmd packed ring test with cbdma channels register/unregister stable check\n         \"\"\"\n         perf_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n+        self.start_vhost_app(\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n+        )\n+        before_rebind = self.start_vms_testpmd_and_test(\n+            mrg_rxbuf=True, need_start_vm=True, packed=True\n+        )\n+\n+        # repeat bind 50 time from virtio-pci to vfio-pci\n+        self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)\n+        self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)\n+\n+        after_rebind = self.start_vms_testpmd_and_test(\n+            mrg_rxbuf=True, need_start_vm=False, packed=True\n+        )\n+\n+        for key in before_rebind.keys():\n+            perf_result.append([key, \"Before rebind driver\", before_rebind[key]])\n+\n+        for key in after_rebind.keys():\n+            perf_result.append([key, \"After rebind driver\", after_rebind[key]])\n \n-        self.logger.info(\"launch vhost\")\n+        for table_row in perf_result:\n+            self.result_table_add(table_row)\n+        self.result_table_print()\n+\n+        for i in perf_result:\n+            self.verify(i[2] > 0, \"%s Frame Size(Byte) is less than 0 Mpps\" % i[0])\n+\n+    def test_vm2vm_virtio_net_split_ring_test_with_4_cbdma_channels_and_iperf_stable_check(\n+        self,\n+    ):\n+        \"\"\"\n+        Test Case 6: VM2VM virtio-net split ring test with 4 cbdma channels and iperf stable check\n+        \"\"\"\n+        perf_result = []\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        before_rerun = self.start_iperf_and_scp_test_in_vms(\n-            need_start_vm=True, mergeable=False, packed=False, server_mode=True\n+        before_relaunch = self.start_iperf_and_scp_test_in_vms(\n+            mrg_rxbuf=False, need_start_vm=True, packed=False, server_mode=True\n         )\n+        perf_result.append([\"split ring\", \"Before relaunch test\", before_relaunch])\n \n-        self.logger.info(\"relaunch vhost\")\n         self.vhost_user.send_expect(\"^C\", \"# \", 20)\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n         )\n-        self.logger.info(\"rerun scp and iperf test\")\n-        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-\n-        self.table_header = [\"Path\", \"Before/After rerun scp/iperf\", \"Throughput(Mpps)\"]\n-        self.result_table_create(self.table_header)\n-        perf_result.append([\"split ring\", \"Before rerun\", before_rerun])\n-        perf_result.append([\"split ring\", \"rerun test 1\", rerun_test_1])\n-        perf_result.append([\"split ring\", \"rerun test 2\", rerun_test_2])\n-        perf_result.append([\"split ring\", \"rerun test 3\", rerun_test_3])\n-        perf_result.append([\"split ring\", \"rerun test 4\", rerun_test_4])\n-        perf_result.append([\"split ring\", \"rerun test 5\", rerun_test_5])\n+\n+        for _ in range(5):\n+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n+            perf_result.append([\"split ring\", \"After  rerun test\", rerun_result])\n+\n+        self.vhost_user.send_expect(\"^C\", \"# \", 20)\n+        dmas_info = \"txd0@%s,rxd1@%s\" % (self.cbdma_list[0], self.cbdma_list[1])\n+        self.start_vhost_app(\n+            cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=True\n+        )\n+\n+        after_relaunch = self.start_iperf_and_scp_test_in_vms(\n+            mrg_rxbuf=False, need_start_vm=False, packed=False, server_mode=True\n+        )\n+\n+        perf_result.append([\"split ring\", \"After relaunch test\", after_relaunch])\n+\n+        for table_row in perf_result:\n+            self.result_table_add(table_row)\n+        self.result_table_print()\n+\n+    def test_vm2vm_virtio_net_packed_ring_test_with_4_cbdma_channels_and_iperf_stable_check(\n+        self,\n+    ):\n+        \"\"\"\n+        Test Case 7: VM2VM virtio-net packed ring test with 4 cbdma channels and iperf stable check\n+        \"\"\"\n+        perf_result = []\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)\n+        dmas_info = \"txd0@%s,rxd0@%s,txd1@%s,rxd1@%s\" % (\n+            self.cbdma_list[0],\n+            self.cbdma_list[1],\n+            self.cbdma_list[2],\n+            self.cbdma_list[3],\n+        )\n+        self.start_vhost_app(\n+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True\n+        )\n+        before_relaunch = self.start_iperf_and_scp_test_in_vms(\n+            mrg_rxbuf=False, need_start_vm=True, packed=True, server_mode=True\n+        )\n+\n+        perf_result.append([\"packed ring\", \"Before rerun test\", before_relaunch])\n+\n+        for _ in range(5):\n+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n+            perf_result.append([\"packed ring\", \"After  rerun test\", rerun_result])\n+\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n         self.result_table_print()\n \n-    def test_vm2vm_packed_ring_test_with_iperf_and_reconnect_stable_check(self):\n+    def test_vm2vm_virtio_net_packed_ring_test_with_2_cbdma_channels_and_iperf_stable_check(\n+        self,\n+    ):\n         \"\"\"\n-        Test Case6: VM2VM packed ring test with iperf and reconnect stable test\n+        Test Case 8: VM2VM virtio-net packed ring test with 2 cbdma channels and iperf stable check\n         \"\"\"\n         perf_result = []\n         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-\n+        dmas_info = \"txd0@%s,rxd1@%s\" % (self.cbdma_list[0], self.cbdma_list[1])\n         self.start_vhost_app(\n-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=False\n+            cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=False\n         )\n-        before_rerun = self.start_iperf_and_scp_test_in_vms(\n-            need_start_vm=True, mergeable=False, packed=True, server_mode=False\n+        before_relaunch = self.start_iperf_and_scp_test_in_vms(\n+            mrg_rxbuf=False, need_start_vm=True, packed=True, server_mode=False\n         )\n+        perf_result.append([\"packed ring\", \"Before rerun test\", before_relaunch])\n \n-        self.logger.info(\"rerun scp and iperf test\")\n-        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n-        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n+        for _ in range(5):\n+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)\n+            perf_result.append([\"packed ring\", \"After  rerun test\", rerun_result])\n \n-        self.table_header = [\"Path\", \"Before/After rerun scp/iperf\", \"Throughput(Mpps)\"]\n-        self.result_table_create(self.table_header)\n-        perf_result.append([\"packed ring\", \"Before rerun test\", before_rerun])\n-        perf_result.append([\"packed ring\", \"rerun test 1\", rerun_test_1])\n-        perf_result.append([\"packed ring\", \"rerun test 2\", rerun_test_2])\n-        perf_result.append([\"packed ring\", \"rerun test 3\", rerun_test_3])\n-        perf_result.append([\"packed ring\", \"rerun test 4\", rerun_test_4])\n-        perf_result.append([\"packed ring\", \"rerun test 5\", rerun_test_5])\n         for table_row in perf_result:\n             self.result_table_add(table_row)\n         self.result_table_print()\n@@ -1011,10 +908,10 @@ class TestVswitchSampleCBDMA(TestCase):\n         \"\"\"\n         Run after each test case.\n         \"\"\"\n-        self.dut.kill_all()\n         for i in range(len(self.vm)):\n             self.vm[i].stop()\n         self.vhost_user.send_expect(\"^C\", \"# \", 20)\n+        self.dut.kill_all()\n         self.bind_cbdma_device_to_kernel()\n \n     def tear_down_all(self):\n",
    "prefixes": [
        "V6",
        "2/2"
    ]
}