get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/110082/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 110082,
    "url": "https://patches.dpdk.org/api/patches/110082/?format=api",
    "web_url": "https://patches.dpdk.org/project/dts/patch/20220422054838.1559225-1-weix.ling@intel.com/",
    "project": {
        "id": 3,
        "url": "https://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220422054838.1559225-1-weix.ling@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20220422054838.1559225-1-weix.ling@intel.com",
    "date": "2022-04-22T05:48:38",
    "name": "[V1,2/6] test_plans/vswitch_sample_cbdma_test_plan: modify testplan with new format",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "d26b0ebcd85028d3c58c822a215bce692d62ed5f",
    "submitter": {
        "id": 1828,
        "url": "https://patches.dpdk.org/api/people/1828/?format=api",
        "name": "Ling, WeiX",
        "email": "weix.ling@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dts/patch/20220422054838.1559225-1-weix.ling@intel.com/mbox/",
    "series": [
        {
            "id": 22610,
            "url": "https://patches.dpdk.org/api/series/22610/?format=api",
            "web_url": "https://patches.dpdk.org/project/dts/list/?series=22610",
            "date": "2022-04-22T05:48:16",
            "name": "add new testsuite of DPDK-22.03",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/22610/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/110082/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/110082/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C0699A0093;\n\tFri, 22 Apr 2022 07:48:47 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B9D2E410D5;\n\tFri, 22 Apr 2022 07:48:47 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n by mails.dpdk.org (Postfix) with ESMTP id 8C0A040040\n for <dts@dpdk.org>; Fri, 22 Apr 2022 07:48:46 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Apr 2022 22:48:45 -0700",
            "from unknown (HELO localhost.localdomain) ([10.239.251.222])\n by orsmga005-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Apr 2022 22:48:43 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1650606526; x=1682142526;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=d6dZNF9FzcsyJ92q+xxQA6+cYQjncO6Pjmn/b3hCeCQ=;\n b=HBqZG5hLkvLgPSd/+e0d140Dmw9qyXiejkCw7HPC0ThqrkFxrTAysZzt\n rEaqQ6UTj3eXacynMWV+n1PSbk0tiMm+Q0llSvA0NhNMgl8OpIK3BoKJU\n KdzJRzuIhq38QofMok+oD8nlMpFN3g3U0TEGomJMWXfYJiWr9t6eQPCE0\n 93aecTKaFs4nlDG68o8NVqEloJCn1uPpMbqIFP2kPmr1hNbUURWps2UMS\n nn36Njjc9ZMYUClJAeyGEby9+85JCpf4B+U+81lwtizxmXuaAq4ve77MV\n JljukGXkN8vT1sPYDjlECxFKuUgLlNCcwXdKzDSL2vW65VZZyK3URGHfc g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10324\"; a=\"262179417\"",
            "E=Sophos;i=\"5.90,280,1643702400\"; d=\"scan'208\";a=\"262179417\"",
            "E=Sophos;i=\"5.90,280,1643702400\"; d=\"scan'208\";a=\"728357398\""
        ],
        "From": "Wei Ling <weix.ling@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Wei Ling <weix.ling@intel.com>",
        "Subject": "[dts][PATCH V1 2/6] test_plans/vswitch_sample_cbdma_test_plan: modify\n testplan with new format",
        "Date": "Fri, 22 Apr 2022 13:48:38 +0800",
        "Message-Id": "<20220422054838.1559225-1-weix.ling@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=y",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org"
    },
    "content": "Modify testplan with new format.\n\nSigned-off-by: Wei Ling <weix.ling@intel.com>\n---\n test_plans/vswitch_sample_cbdma_test_plan.rst | 294 ++++++++++++------\n 1 file changed, 193 insertions(+), 101 deletions(-)",
    "diff": "diff --git a/test_plans/vswitch_sample_cbdma_test_plan.rst b/test_plans/vswitch_sample_cbdma_test_plan.rst\nindex af2e62d1..e6fabe32 100644\n--- a/test_plans/vswitch_sample_cbdma_test_plan.rst\n+++ b/test_plans/vswitch_sample_cbdma_test_plan.rst\n@@ -37,68 +37,142 @@ Vswitch sample test with vhost async data path test plan\n Description\n ===========\n \n-Vswitch sample can leverage IOAT to accelerate vhost async data-path from dpdk 20.11. This plan test\n-vhost DMA operation callbacks for CBDMA PMD and vhost async data-path in vhost sample.\n+Vswitch sample can leverage IOAT to accelerate vhost async data-path from dpdk 20.11.\n+This plan test vhost DMA operation callbacks for CBDMA PMD and vhost async data-path in vhost sample.\n From 20.11 to 21.02, only split ring support cbdma copy with vhost enqueue direction;\n from 21.05,packed ring also can support cbdma copy with vhost enqueue direction.\n \n+For more about dpdk-testpmd sample, please refer to the DPDK docments:\n+https://doc.dpdk.org/guides/testpmd_app_ug/run_app.html\n+\n+For virtio-user vdev parameter, you can refer to the DPDK docments:\n+https://doc.dpdk.org/guides/nics/virtio.html#virtio-paths-selection-and-usage.\n+\n+For more about dpdk-vhost sample, please refer to the DPDK docments:\n+https://doc.dpdk.org/guides/sample_app_ug/vhost.html\n+\n Prerequisites\n =============\n \n+Topology\n+--------\n+\tTest flow: TG-->NIC-->VSwitch-->Virtio-->VSwitch-->NIC-->TG\n+\n+Hardware\n+--------\n+\tSupportted NICs: ALL\n+\n+Software\n+--------\n+\tTrex:http://trex-tgn.cisco.com/trex/release/v2.26.tar.gz\n+\n+General set up\n+--------------\n+1. Compile DPDK::\n+\n+\t# CC=gcc meson --werror -Denable_kmods=True -Dlibdir=lib -Dexamples=all --default-library=<dpdk build dir>\n+\t# ninja -C <dpdk build dir> -j 110\n+\n+2. Get the PCI device ID and DMA device ID of DUT, for example, 0000:18:00.0 is PCI device ID, 0000:00:04.0, 0000:00:04.1 is DMA device ID::\n+\n+\t<dpdk dir># ./usertools/dpdk-devbind.py -s\n+\n+\tNetwork devices using kernel driver\n+\t===================================\n+\t0000:18:00.0 'Device 159b' if=ens785f0 drv=ice unused=vfio-pci\n+\n+\tDMA devices using kernel driver\n+\t===============================\n+\t0000:00:04.0 'Sky Lake-E CBDMA Registers 2021' drv=ioatdma unused=vfio-pci\n+\t0000:00:04.1 'Sky Lake-E CBDMA Registers 2021' drv=ioatdma unused=vfio-pci\n+\n+Test case\n+=========\n+\n+Common steps\n+------------\n+1. Bind 1 NIC port and CBDMA channels to vfio-pci::\n+\n+\t<dpdk dir># ./usertools/dpdk-devbind.py -b vfio-pci <DUT port pci device id>\n+\t<dpdk dir># ./usertools/dpdk-devbind.py -b vfio-pci <DUT port DMA device id>\n+\n+\tFor example, Bind 1 NIC port and 2 CBDMA channels::\n+\t./usertools/dpdk-devbind.py -b vfio-pci 0000:00:18.0\n+\t./usertools/dpdk-devbind.py -b vfio-pci 0000:00:04.0,0000:00:04.1\n+\n+2. Inject imix packets to NIC by traffic generator::\n+\n+\tThe packet size include [64, 128, 256, 512, 1024, 1518], and the format of packet is as follows.\n+\t+-------------+-------------+-------------+-------------+\n+\t| MAC         | MAC         | IPV4        | IPV4        |\n+\t| Src address | Dst address | Src address | Dst address |\n+\t|-------------|-------------|-------------|-------------|\n+\t| Any MAC     | Virtio mac  | Any IP      | Any IP      |\n+\t+-------------+-------------+-------------+-------------+\n+\tAll the packets in this test plan use the Virtio mac:00:11:22:33:44:10.\n \n Test Case1: PVP performance check with CBDMA channel using vhost async driver\n-=============================================================================\n+-----------------------------------------------------------------------------\n+This case uses vhost, testpmd and Traffic Generator(For example, Trex) send imix packets to test performance with 1 CBDMA channel when using vhost async driver.\n+Include packed ring vectorized path, packed ring size not power of 2 path and split ring vectorized path have been tested.\n \n-1. Bind physical port to vfio-pci and CBDMA channel to vfio-pci.\n+1. Bind 1 NIC port and 1 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-\t./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 31-32 -n 4 -- \\\n-\t-p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat --stats 1 --socket-file /tmp/vhost-net --dmas [txd0@0000:00:04.0] --client --total-num-mbufs 600000\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 31-32 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n+\t--stats 1 --socket-file /tmp/vhost-net --dmas [txd0@0000:00:04.0] --client --total-num-mbufs 600000\n \n 3. Launch virtio-user with packed ring::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 --force-max-simd-bitwidth=512 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 --force-max-simd-bitwidth=512 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n \n 4. Start pkts from virtio-user side to let vswitch know the mac addr::\n \n-\ttestpmd>set fwd mac\n-\ttestpmd>start tx_first\n+\ttestpmd> set fwd mac\n+\ttestpmd> start tx_first\n \n 5. Inject pkts (packets length=64...1518) separately with dest_mac=virtio_mac_address (specific in above cmd with 00:11:22:33:44:10) to NIC using packet generator, record pvp (PG>nic>vswitch>virtio-user>vswitch>nic>PG) performance number can get expected.\n \n 6. Quit and re-launch virtio-user with packed ring size not power of 2::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 --force-max-simd-bitwidth=512 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1,queue_size=1025 -- -i --rxq=1 --txq=1 --txd=1025 --rxd=1025 --nb-cores=1\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 --force-max-simd-bitwidth=512 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1,queue_size=1025 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1025 --rxd=1025 --nb-cores=1\n \n 7. Re-test step 4-5, record performance of different packet length.\n \n 8. Quit and re-launch virtio-user with split ring::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,server=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,server=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n \n 9. Re-test step 4-5, record performance of different packet length.\n \n Test Case2: PVP test with two VM and two CBDMA channels using vhost async driver\n-=================================================================================\n+--------------------------------------------------------------------------------\n+This case uses vhost, testpmd and Traffic Generator(For example, Trex) send imix packets to test 2 virtio-user performance with 2 CBDMA channels when using vhost async driver.\n+And also have tested relaunch vhost-user to send packets to get the performance.\n \n-1. Bind one physical ports to vfio-pci and two CBDMA channels to vfio-pci.\n+1. Bind 1 NIC port and 2 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-\t./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- \\\n-\t-p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat --stats 1 --socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:01.0,txd1@0000:00:01.1] --client--total-num-mbufs 600000\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n+\t--stats 1 --socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:01.0,txd1@0000:00:01.1] --client--total-num-mbufs 600000\n \n 3. launch two virtio-user ports::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n-\t\n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 31-32 -n 4 --no-pci --file-prefix=testpmd1 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=/tmp/vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 31-32 -n 4 --no-pci --file-prefix=testpmd1 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=/tmp/vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n \n 4. Start pkts from two virtio-user side individually to let vswitch know the mac addr::\n \n@@ -107,36 +181,41 @@ Test Case2: PVP test with two VM and two CBDMA channels using vhost async driver\n \ttestpmd1>start tx_first\n \ttestpmd1>start tx_first\n \n-5. Inject IMIX packets (64b...1518b) with dest_mac=virtio_mac_address (00:11:22:33:44:10 and 00:11:22:33:44:11) to NIC using packet generator,record performance number can get expected from Packet generator rx side.\n+5. Send imix packets [64,1518] from packet generator as common step2, and check the throughput can get expected data.\n \n 6. Stop dpdk-vhost side and relaunch it with same cmd as step2.\n \n 7. Start pkts from two virtio-user side individually to let vswitch know the mac addr::\n \n-    testpmd0>stop\n-    testpmd0>start tx_first\n-    testpmd1>stop\n-    testpmd1>start tx_first\n+\ttestpmd0>stop\n+\ttestpmd0>start tx_first\n+\ttestpmd1>stop\n+\ttestpmd1>start tx_first\n \n-8. Inject IMIX packets (64b...1518b) with dest_mac=virtio_mac_address (00:11:22:33:44:10 and 00:11:22:33:44:11) to NIC using packet generator, ensure get same throughput as step5.\n+8. Send imix packets [64,1518] from packet generator as common step2, and check the throughput can get expected data.\n \n Test Case3: VM2VM forwarding test with two CBDMA channels\n-=========================================================\n+---------------------------------------------------------\n+This case uses vhost, testpmd  to test virtio-user0 to virtio-user1 forwarding 64Byte/2000Byte/8000Byte packets by testpmd with 2 CBDMA channels.\n+Virtio-user0 start with packed ring mergeable path and virtio-user1 start with split ring vectorized path.\n+And also have tested relaunch vhost-user to send packets to get the performance.\n \n-1.Bind one physical ports to vfio-pci and two CBDMA channels to vfio-pci.\n+1. Bind 1 NIC port and 2 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-\t./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat \\\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n \t--socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:04.0,txd1@0000:00:04.1]  --client --total-num-mbufs 600000\n \n 3. Launch virtio-user::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 29-30 -n 4 --no-pci --file-prefix=testpmd0 \\\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=/tmp/vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n \n \t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 31-32 -n 4 --no-pci --file-prefix=testpmd1 \\\n-\t--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=/tmp/vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1 -- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n+\t--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=/tmp/vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1 \\\n+\t-- -i --rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1\n \n 4. Loop pkts between two virtio-user sides, record performance number with 64b/2000b/8000b/IMIX pkts can get expected::\n \n@@ -168,40 +247,45 @@ Test Case3: VM2VM forwarding test with two CBDMA channels\n 6. Rerun step 4.\n \n Test Case4: VM2VM test with cbdma channels register/unregister stable check\n-============================================================================\n+---------------------------------------------------------------------------\n+This case uses vhost, QEMU to test VM0 to VM1 forwarding 64Byte/2000Byte/8000Byte packets by testpmd with 2 CBDMA channels.\n+2 VMs start with split ring mergeable path, and to test stable after re-bind PCI in VMs 50 times then forwarding\n+64Byte/2000Byte/8000Byte packets by testpmd.\n \n-1. Bind one physical ports to vfio-pci and two CBDMA channels to vfio-pci.\n+1. Bind 1 NIC port and 2 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-    ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat \\\n+    <dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n     --socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:04.0,txd1@0000:00:04.1] --client --total-num-mbufs 600000\n \n 3. Start VM0 with qemu-5.2.0::\n \n  \tqemu-system-x86_64 -name vm1 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net0,server \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :10\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net0,server \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=on,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :10\n \n 4. Start VM1 with qemu-5.2.0::\n \n \tqemu-system-x86_64 -name vm2 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net1,server \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :12\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net1,server \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=on,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :12\n \n 5. Bind virtio port to vfio-pci in both two VMs::\n \n@@ -212,7 +296,7 @@ Test Case4: VM2VM test with cbdma channels register/unregister stable check\n \n 6. Start testpmd in VMs seperately::\n \n-\t./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1-2 -n 4 -- -i --rxq=1 --txq=1 --nb-cores=1 --txd=1024 --rxd=1024\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 1-2 -n 4 -- -i --rxq=1 --txq=1 --nb-cores=1 --txd=1024 --rxd=1024\n \n 7. Loop pkts between two virtio-user sides, record performance number with 64b/2000b/8000b/IMIX pkts can get expected::\n \n@@ -248,40 +332,44 @@ Test Case4: VM2VM test with cbdma channels register/unregister stable check\n 9. Restart vhost, then rerun step 7,check vhost can stable work and get expected throughput.\n \n Test Case5: VM2VM split ring test with iperf and reconnect stable check\n-=======================================================================\n+-----------------------------------------------------------------------\n+This case uses vhost, QEMU to test VM0 to VM1 forwarding packets by iperf and scp tools with 2 CBDMA channels.\n+2 VMs start with split ring non-mergeable path, and to test relaunch vhost-user stable.\n \n-1. Bind one physical ports to vfio-pci and two CBDMA channels to vfio-pci.\n+1. Bind 1 NIC port and 2 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-\t./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat \\\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n \t--socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:04.0,txd1@0000:00:04.1] --client --total-num-mbufs 600000\n \n 3. Start VM0 with qemu-5.2.0::\n \n  \tqemu-system-x86_64 -name vm1 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net0,server \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :10\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net0,server \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=off,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :10\n \n 4. Start VM1 with qemu-5.2.0::\n \n \tqemu-system-x86_64 -name vm2 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net1,server \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :12\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net1,server \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=off,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on -vnc :12\n \n 5. On VM1, set virtio device IP and run arp protocal::\n \n@@ -302,45 +390,49 @@ Test Case5: VM2VM split ring test with iperf and reconnect stable check\n \n 9. Scp 1MB file form VM1 to VM2M, check packets can be forwarding success by scp::\n \n-     Under VM1, run: `scp [xxx] root@1.1.1.8:/`   [xxx] is the file name\n+    Under VM1, run: `scp [xxx] root@1.1.1.8:/`   [xxx] is the file name\n \n 10. Relaunch vhost-dpdk, then rerun step 7-9 five times.\n \n Test Case6: VM2VM packed ring test with iperf and reconnect stable test\n-=======================================================================\n+-----------------------------------------------------------------------\n+This case uses vhost, QEMU to test VM0 to VM1 forwarding packets by iperf and scp tools with 2 CBDMA channels.\n+2 VMs start with packed ring non-mergeable path, and to test relaunch vhost-user stable.\n \n-1. Bind one physical ports to vfio-pci and two CBDMA channels to vfio-pci.\n+1. Bind 1 NIC port and 2 CBDMA channel to vfio-pci, as common step 1.\n \n 2. On host, launch dpdk-vhost by below command::\n \n-\t./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 --dma-type ioat \\\n+\t<dpdk dir># ./x86_64-native-linuxapp-gcc/examples/dpdk-vhost -l 26-28 -n 4 -- -p 0x1 --mergeable 1 --vm2vm 1 \\\n \t--socket-file /tmp/vhost-net0 --socket-file /tmp/vhost-net1 --dmas [txd0@0000:00:04.0,txd1@0000:00:04.1] --total-num-mbufs 600000\n \n 3. Start VM0 with qemu-5.2.0::\n \n  \tqemu-system-x86_64 -name vm1 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net0 \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :10\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6002-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net0 \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:01,disable-modern=true,mrg_rxbuf=off,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :10\n \n 4. Start VM1 with qemu-5.2.0::\n \n \tqemu-system-x86_64 -name vm2 -enable-kvm -cpu host -smp 4 -m 4096 \\\n-        -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n-        -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n-        -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n-        -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n-        -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n-        -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n-        -chardev socket,id=char0,path=/tmp/vhost-net1 \\\n-        -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n-        -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :12\n+    -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on \\\n+    -numa node,memdev=mem -mem-prealloc -drive file=/home/osimg/ubuntu20-04-2.img  \\\n+    -chardev socket,path=/tmp/vm2_qga0.sock,server,nowait,id=vm2_qga0 -device virtio-serial \\\n+    -device virtserialport,chardev=vm2_qga0,name=org.qemu.guest_agent.2 -daemonize \\\n+    -monitor unix:/tmp/vm2_monitor.sock,server,nowait -device e1000,netdev=nttsip1 \\\n+    -netdev user,id=nttsip1,hostfwd=tcp:127.0.0.1:6003-:22 \\\n+    -chardev socket,id=char0,path=/tmp/vhost-net1 \\\n+    -netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce \\\n+    -device virtio-net-pci,netdev=netdev0,mac=52:54:00:00:00:02,disable-modern=true,mrg_rxbuf=off,\\\n+    csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on -vnc :12\n \n 5. On VM1, set virtio device IP and run arp protocal::\n \n@@ -361,6 +453,6 @@ Test Case6: VM2VM packed ring test with iperf and reconnect stable test\n \n 9. Scp 1MB file form VM1 to VM2M, check packets can be forwarding success by scp::\n \n-     Under VM1, run: `scp [xxx] root@1.1.1.8:/`   [xxx] is the file name\n+    Under VM1, run: `scp [xxx] root@1.1.1.8:/`   [xxx] is the file name\n \n 10. Rerun step 7-9 five times.\n",
    "prefixes": [
        "V1",
        "2/6"
    ]
}