get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95342/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95342,
    "url": "https://patches.dpdk.org/api/patches/95342/?format=api",
    "web_url": "https://patches.dpdk.org/project/dts/patch/20210706181100.4374-1-leweix.yang@intel.com/",
    "project": {
        "id": 3,
        "url": "https://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210706181100.4374-1-leweix.yang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20210706181100.4374-1-leweix.yang@intel.com",
    "date": "2021-07-06T18:11:00",
    "name": "[V1] tests/TestSuite_vhost_cbdma:change cases about cbdma",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": false,
    "hash": "37ae4f0c34587ac85421675151e899cd31ad350f",
    "submitter": {
        "id": 2225,
        "url": "https://patches.dpdk.org/api/people/2225/?format=api",
        "name": "Lewei Yang",
        "email": "leweix.yang@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dts/patch/20210706181100.4374-1-leweix.yang@intel.com/mbox/",
    "series": [
        {
            "id": 17656,
            "url": "https://patches.dpdk.org/api/series/17656/?format=api",
            "web_url": "https://patches.dpdk.org/project/dts/list/?series=17656",
            "date": "2021-07-06T18:11:00",
            "name": "[V1] tests/TestSuite_vhost_cbdma:change cases about cbdma",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/17656/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/95342/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/95342/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CB972A0C47;\n\tTue,  6 Jul 2021 11:36:50 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B0A2F4126C;\n\tTue,  6 Jul 2021 11:36:50 +0200 (CEST)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 595E641264\n for <dts@dpdk.org>; Tue,  6 Jul 2021 11:36:48 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 06 Jul 2021 02:36:47 -0700",
            "from unknown (HELO localhost.localdomain) ([10.240.183.102])\n by fmsmga003-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 06 Jul 2021 02:36:44 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10036\"; a=\"270206742\"",
            "E=Sophos;i=\"5.83,328,1616482800\"; d=\"scan'208\";a=\"270206742\"",
            "E=Sophos;i=\"5.83,328,1616482800\"; d=\"scan'208\";a=\"486085442\""
        ],
        "From": "Yang Lewei <leweix.yang@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "YangLewei <leweix.yang@intel.com>",
        "Date": "Tue,  6 Jul 2021 18:11:00 +0000",
        "Message-Id": "<20210706181100.4374-1-leweix.yang@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "Subject": "[dts] [PATCH V1] tests/TestSuite_vhost_cbdma:change cases about\n cbdma",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org",
        "Sender": "\"dts\" <dts-bounces@dpdk.org>"
    },
    "content": "From: YangLewei <leweix.yang@intel.com>\n\n1. change cases using imix pkts for better coverage:\n   - test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations\n   - test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations\n2. add one cbdma performance case:\n   - test_perf_compare_pvp_split_ring_performance\n\nSigned-off-by: YangLewei <leweix.yang@intel.com>\n---\n tests/TestSuite_vhost_cbdma.py | 212 +++++++++++++++++++--------------\n 1 file changed, 125 insertions(+), 87 deletions(-)",
    "diff": "diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py\nindex 6bd5919b..3523c4b3 100644\n--- a/tests/TestSuite_vhost_cbdma.py\n+++ b/tests/TestSuite_vhost_cbdma.py\n@@ -183,11 +183,13 @@ class TestVirTioVhostCbdma(TestCase):\n         self.result_secondary = re.findall(r'TX-packets: (\\w+)', out)\n         self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, \"forward packets no correctly\")\n \n+\n     @property\n     def check_2m_env(self):\n         out = self.dut.send_expect(\"cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'\", \"# \")\n         return True if out == '2048' else False\n \n+\n     def launch_testpmd_as_vhost_user(self, command, cores=\"Default\", dev=\"\", ports = \"\"):\n         self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix=\"vhost\")\n         self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120)\n@@ -271,62 +273,49 @@ class TestVirTioVhostCbdma(TestCase):\n         \"\"\"\n         self.test_target = self.running_case\n         self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n-        used_cbdma_num = 4\n-        queue = 2\n+        used_cbdma_num = 8\n+        queue = 8\n         txd_rxd = 1024\n         dmathr = 1024\n         nb_cores = 1\n         virtio_path = \"/tmp/s0\"\n         path_mode = 'mrg_rxbuf=1,in_order=1'\n         self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n-        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}\"\n-        eal_params = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \" % (nb_cores, txd_rxd, txd_rxd, queue, queue)\n-        dynamic_queue_number_cbdma_virtio_params = f\"  --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}\"\n+        eal_params = \" --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d \"\n+        dynamic_queue_number_cbdma_virtio_params = f\"  --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}\"\n         virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1\"\n-        vhost_dev = f\"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'\"\n+        vhost_dev = f\"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'\"\n         # launch vhost testpmd\n         allow_pci = [self.dut.ports_info[0]['pci']]\n         for index in range(used_cbdma_num):\n-            if index < used_cbdma_num / 2:\n-                allow_pci.append(self.cbdma_dev_infos[index])\n-        self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)\n-        #  queue 2 start virtio testpmd, check perforamnce and RX/TX\n-        mode = \"dynamic_queue2\"\n+            allow_pci.append(self.cbdma_dev_infos[index])\n+\n+        # no cbdma to launch vhost\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])\n+        mode = \"no_cbdma\"\n         self.mode_list.append(mode)\n         self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n         self.send_and_verify(mode, queue_list=range(queue))\n-        # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues\n-        self.vhost_or_virtio_set_one_queue(self.virtio_user)\n-        self.send_and_verify(\"virtio_user_\" + mode + \"_change_to_1\", queue_list=[0])\n-        self.mode_list.append(\"virtio_user_\" + mode + \"_change_to_1\")\n-        self.virtio_user.send_expect(\"stop\", \"testpmd> \")\n-        self.virtio_user.send_expect(\"quit\", \"# \")\n+        self.vhost_user.send_expect(\"quit\", \"#\")\n+\n+        # used 4 cbdma_num and 4 queue to launch vhost\n+\n+        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}\"\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])\n+        self.send_and_verify(\"used_4_cbdma_num\", queue_list=range(int(queue/2)))\n+        self.mode_list.append(\"used_4_cbdma_num\")\n+        self.vhost_user.send_expect(\"quit\", \"#\")\n+\n+        #used 8 cbdma_num to launch vhost\n+        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}\"\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],\n+                                          dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)\n+        self.send_and_verify(\"used_8_cbdma_num\", queue_list=range(queue))\n+        self.mode_list.append(\"used_8_cbdma_num\")\n+        self.send_and_verify(\"used_8_cbdma_num_1\", queue_list=range(queue))\n+        self.mode_list.append(\"used_8_cbdma_num_1\")\n+        self.virtio_user.send_expect(\"stop\", \"testpmd> \", 60)\n         time.sleep(5)\n-        self.dut.send_expect(f\"rm -rf {virtio_path}\", \"#\")\n-        # queue 2 start virtio testpmd, check perforamnce and RX/TX\n-        self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n-        mode = \"Relaunch_dynamic_queue2\"\n-        self.mode_list.append(mode)\n-        self.send_and_verify(mode, queue_list=range(queue))\n-        # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues\n-        self.vhost_or_virtio_set_one_queue(self.vhost_user)\n-        self.send_and_verify(\"vhost_user\" + mode + \"_change_to_1\")\n-        self.mode_list.append(\"vhost_user\" + mode + \"_change_to_1\")\n-        self.vhost_user.send_expect(\"quit\", \"# \")\n-        time.sleep(2)\n-        # Relaunch vhost with another two cbdma channels\n-        mode = \"Relaunch_vhost_2_cbdma\"\n-        self.mode_list.append(mode)\n-        dmathr = 512\n-        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}\"\n-        allow_pci = [self.dut.ports_info[0]['pci']]\n-        for index in range(used_cbdma_num):\n-            if index >= used_cbdma_num / 2:\n-                allow_pci.append(self.cbdma_dev_infos[index])\n-        self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)\n-        self.virtio_user.send_expect(\"clear port stats all\", \"testpmd> \", 30)\n-        self.send_and_verify(mode, queue_list=range(queue))\n-        self.check_port_stats_result(self.virtio_user)\n         self.virtio_user.send_expect(\"quit\", \"# \")\n         self.vhost_user.send_expect(\"quit\", \"# \")\n         self.result_table_print()\n@@ -427,8 +416,8 @@ class TestVirTioVhostCbdma(TestCase):\n         \"\"\"\n         self.test_target = self.running_case\n         self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n-        used_cbdma_num = 4\n-        queue = 2\n+        used_cbdma_num = 8\n+        queue = 8\n         txd_rxd = 1024\n         dmathr = 1024\n         nb_cores = 1\n@@ -436,53 +425,41 @@ class TestVirTioVhostCbdma(TestCase):\n         path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'\n         self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n         vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}\"\n-        eal_params = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \" % (nb_cores, txd_rxd, txd_rxd, queue, queue)\n-        dynamic_queue_number_cbdma_virtio_params = f\"  --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params}\"\n+        eal_params = \" --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d \"\n+        dynamic_queue_number_cbdma_virtio_params = f\"  --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}\"\n         virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1\"\n-        vhost_dev = f\"'net_vhost0,iface={virtio_path},queues={queue},client=1,%s'\"\n+        vhost_dev = f\"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'\"\n         # launch vhost testpmd\n         allow_pci = [self.dut.ports_info[0]['pci']]\n         for index in range(used_cbdma_num):\n-            if index < used_cbdma_num / 2:\n-                allow_pci.append(self.cbdma_dev_infos[index])\n-        self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)\n-        #  queue 2 start virtio testpmd, check perforamnce and RX/TX\n-        mode = \"dynamic_queue2\"\n+            allow_pci.append(self.cbdma_dev_infos[index])\n+\n+        # no cbdma to launch vhost\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])\n+        mode = \"no_cbdma\"\n         self.mode_list.append(mode)\n         self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n         self.send_and_verify(mode, queue_list=range(queue))\n-        # On virtio-user side, dynamic change rx/tx queue numbers from 2 queue to 1 queues\n-        self.vhost_or_virtio_set_one_queue(self.virtio_user)\n-        self.send_and_verify(\"virtio_user_\" + mode + \"_change_to_1\", queue_list=[0])\n-        self.mode_list.append(\"virtio_user_\" + mode + \"_change_to_1\")\n-        self.virtio_user.send_expect(\"stop\", \"testpmd> \")\n-        self.virtio_user.send_expect(\"quit\", \"# \")\n+        self.vhost_user.send_expect(\"quit\", \"#\")\n+\n+        # used 4 cbdma_num and 4 queue to launch vhost\n+        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}\"\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2],\n+                dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])\n+        self.send_and_verify(\"used_4_cbdma_num\", queue_list=range(int(queue/2)))\n+        self.mode_list.append(\"used_4_cbdma_num\")\n+        self.vhost_user.send_expect(\"quit\", \"#\")\n+\n+        #used 8 cbdma_num to launch vhost\n+        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}\"\n+        self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],\n+                                          dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)\n+        self.send_and_verify(\"used_8_cbdma_num\", queue_list=range(queue))\n+        self.mode_list.append(\"used_8_cbdma_num\")\n+        self.send_and_verify(\"used_8_cbdma_num_1\", queue_list=range(queue))\n+        self.mode_list.append(\"used_8_cbdma_num_1\")\n+        self.virtio_user.send_expect(\"stop\", \"testpmd> \", 60)\n         time.sleep(5)\n-        self.dut.send_expect(f\"rm -rf {virtio_path}\", \"#\")\n-        # queue 2 start virtio testpmd, check perforamnce and RX/TX\n-        self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n-        mode = \"Relaunch_dynamic_queue2\"\n-        self.mode_list.append(mode)\n-        self.send_and_verify(mode, queue_list=range(queue))\n-        # On vhost side, dynamic change rx queue numbers from 2 queue to 1 queues\n-        self.vhost_or_virtio_set_one_queue(self.vhost_user)\n-        self.send_and_verify(\"vhost_user\" + mode + \"_change_to_1\")\n-        self.mode_list.append(\"vhost_user\" + mode + \"_change_to_1\")\n-        self.vhost_user.send_expect(\"quit\", \"# \")\n-        time.sleep(2)\n-        # Relaunch vhost with another two cbdma channels\n-        mode = \"Relaunch_vhost_2_cbdma\"\n-        self.mode_list.append(mode)\n-        dmathr = 512\n-        vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr}\"\n-        allow_pci = [self.dut.ports_info[0]['pci']]\n-        for index in range(used_cbdma_num):\n-            if index >= used_cbdma_num / 2:\n-                allow_pci.append(self.cbdma_dev_infos[index])\n-        self.launch_testpmd_as_vhost_user(eal_params, self.cores[0:2], dev=vhost_dev % vhost_dmas, ports=allow_pci)\n-        self.virtio_user.send_expect(\"clear port stats all\", \"testpmd> \", 30)\n-        self.send_and_verify(mode, queue_list=range(queue))\n-        self.check_port_stats_result(self.virtio_user)\n         self.virtio_user.send_expect(\"quit\", \"# \")\n         self.vhost_user.send_expect(\"quit\", \"# \")\n         self.result_table_print()\n@@ -491,6 +468,67 @@ class TestVirTioVhostCbdma(TestCase):\n         self.handle_expected(mode_list=self.mode_list)\n         self.handle_results(mode_list=self.mode_list)\n \n+\n+    \n+    def test_perf_compare_pvp_split_ring_performance(self):\n+        \"\"\"\n+        Test Case6: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy\n+        \"\"\"\n+        used_cbdma_num = 1\n+        queue = 1\n+        txd_rxd = 1024\n+        eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'\n+        path_mode = 'mrg_rxbuf=1,in_order=1,server=1'\n+        allow_pci = [self.dut.ports_info[0]['pci']]\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n+        for index in range(used_cbdma_num):\n+            allow_pci.append(self.cbdma_dev_infos[index])\n+        path_mode = 'mrg_rxbuf=1,in_order=1'\n+        vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}],%s'\"\n+        compare_pvp_split_ring_performance = \"--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (queue, txd_rxd, txd_rxd)\n+        dev_path_mode_mapper = {\n+            \"sync_cbdma\": ['dmathr=1024', 'dmathr=2000'],\n+            \"cpu\": 'dmathr=0',\n+        }\n+        for key,dma_mode in dev_path_mode_mapper.items():\n+            if key == \"cpu\":\n+                vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=1'\"\n+                self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])\n+                vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queue\n+                self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs)\n+                mode = \"cpu_copy_64\"\n+                self.mode_list.append(mode)\n+                self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode=None)\n+                perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]\n+                self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)\n+                self.virtio_user.send_expect(\"quit\", \"# \")\n+                self.vhost_user.send_expect(\"quit\", \"# \")\n+            else:\n+                self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)\n+                vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'\" % queue\n+                self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs)\n+                mode = \"sync_copy_64\"\n+                self.mode_list.append(mode)\n+                self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode=None)\n+                perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]\n+                mode = \"cbdma_copy_1518\"\n+                self.mode_list.append(mode)\n+                self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)\n+                perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]\n+                self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)\n+                self.vhost_user.send_expect(\"quit\", \"# \")\n+                time.sleep(3)\n+                self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)\n+                mode = \"sync_copy_1518\"\n+                self.mode_list.append(mode)\n+                self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode=None)\n+                perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]\n+                self.check_port_stats_result(self.virtio_user)\n+                self.virtio_user.send_expect(\"quit\", \"# \")\n+                self.vhost_user.send_expect(\"quit\", \"# \")\n+        self.result_table_print()\n+        self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05 and abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, \"sync_copy_64 vs. cpu_copy_64 delta > 10% or cbdma_copy_1518 vs sync_copy_1518 delta < 5%\"  )\n+\n     @staticmethod\n     def vhost_or_virtio_set_one_queue(session):\n         session.send_expect('stop', 'testpmd> ', 120)\n@@ -556,16 +594,16 @@ class TestVirTioVhostCbdma(TestCase):\n             # check RX/TX can work normally in each queues\n             self.check_packets_of_each_queue(queue_list=queue_list)\n \n-    def send_and_verify(self, mode, multiple_queue=True, queue_list=[]):\n+    def send_and_verify(self, mode, multiple_queue=True, queue_list=[], frame_sizes=[],pkt_length_mode='imix'):\n         \"\"\"\n-        Send packet with packet generator and verify\n+        Send packet with packet generator and verif\n         \"\"\"\n-        if self.pkt_length_mode == 'imix':\n+        if self.pkt_length_mode == pkt_length_mode:\n             self.send_imix_and_verify(mode, multiple_queue, queue_list)\n             return\n \n         self.throughput[mode] = dict()\n-        for frame_size in self.frame_sizes:\n+        for frame_size in frame_sizes:\n             self.throughput[mode][frame_size] = dict()\n             payload_size = frame_size - self.headers_size\n             tgenInput = []\n",
    "prefixes": [
        "V1"
    ]
}