get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96389/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96389,
    "url": "http://patches.dpdk.org/api/patches/96389/?format=api",
    "web_url": "http://patches.dpdk.org/project/dts/patch/20210729180620.1265512-1-yinan.wang@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210729180620.1265512-1-yinan.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20210729180620.1265512-1-yinan.wang@intel.com",
    "date": "2021-07-29T18:06:20",
    "name": "[v1] test_plans/vm2vm_virtio_pmd_test_plan.rst",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "e7604aae8a2474196ed8fcf33871b373b49a8abd",
    "submitter": {
        "id": 1081,
        "url": "http://patches.dpdk.org/api/people/1081/?format=api",
        "name": "Wang, Yinan",
        "email": "yinan.wang@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dts/patch/20210729180620.1265512-1-yinan.wang@intel.com/mbox/",
    "series": [
        {
            "id": 18058,
            "url": "http://patches.dpdk.org/api/series/18058/?format=api",
            "web_url": "http://patches.dpdk.org/project/dts/list/?series=18058",
            "date": "2021-07-29T18:06:20",
            "name": "[v1] test_plans/vm2vm_virtio_pmd_test_plan.rst",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/18058/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/96389/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/96389/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 717E1A0C40;\n\tThu, 29 Jul 2021 11:24:36 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 64D4940687;\n\tThu, 29 Jul 2021 11:24:36 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 0D65A40041\n for <dts@dpdk.org>; Thu, 29 Jul 2021 11:24:33 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 29 Jul 2021 02:24:33 -0700",
            "from dpdk-yinan-ntb1.sh.intel.com ([10.67.119.39])\n by FMSMGA003.fm.intel.com with ESMTP; 29 Jul 2021 02:24:31 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10059\"; a=\"200017110\"",
            "E=Sophos;i=\"5.84,278,1620716400\"; d=\"scan'208\";a=\"200017110\"",
            "E=Sophos;i=\"5.84,278,1620716400\"; d=\"scan'208\";a=\"507029221\""
        ],
        "X-ExtLoop1": "1",
        "From": "Yinan Wang <yinan.wang@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Yinan Wang <yinan.wang@intel.com>",
        "Date": "Thu, 29 Jul 2021 14:06:20 -0400",
        "Message-Id": "<20210729180620.1265512-1-yinan.wang@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dts] [PATCH v1] test_plans/vm2vm_virtio_pmd_test_plan.rst",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org",
        "Sender": "\"dts\" <dts-bounces@dpdk.org>"
    },
    "content": "1. Correct test app name.\n2. Add whole BDF name of cbdma devices.\n3. Add a tip that cbdma case need special dpdk code.\n\nSigned-off-by: Yinan Wang <yinan.wang@intel.com>\n---\n test_plans/vm2vm_virtio_pmd_test_plan.rst | 111 +++++++++++-----------\n 1 file changed, 56 insertions(+), 55 deletions(-)",
    "diff": "diff --git a/test_plans/vm2vm_virtio_pmd_test_plan.rst b/test_plans/vm2vm_virtio_pmd_test_plan.rst\nindex 0b1d4a7f..6b826f81 100644\n--- a/test_plans/vm2vm_virtio_pmd_test_plan.rst\n+++ b/test_plans/vm2vm_virtio_pmd_test_plan.rst\n@@ -37,6 +37,7 @@ vm2vm vhost-user/virtio-pmd test plan\n This test plan includes vm2vm mergeable, normal and vector_rx path test with virtio 0.95 and virtio 1.0,\n also add mergeable and normal path test with virtio 1.1. Specially, three mergeable path cases check the\n payload of each packets are valid by using pdump.\n+Note: Blow cases 9-11 which have cbdma usage should add dpdk local path to support async vhostpmd.\n \n Test flow\n =========\n@@ -48,7 +49,7 @@ Test Case 1: VM2VM vhost-user/virtio-pmd with vector_rx path\n 1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::\n \n      rm -rf vhost-net*\n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -78,13 +79,13 @@ Test Case 1: VM2VM vhost-user/virtio-pmd with vector_rx path\n \n 3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1, [0000:xx.00] is [Bus,Device,Function] of virtio-net::\n \n-    ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 and send 64B packets, [0000:xx.00] is [Bus,Device,Function] of virtio-net::\n \n-    ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd txonly\n     testpmd>set txpkts 64\n     testpmd>start tx_first 32\n@@ -103,7 +104,7 @@ Test Case 2: VM2VM vhost-user/virtio-pmd with normal path\n 1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::\n \n      rm -rf vhost-net*\n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -133,13 +134,13 @@ Test Case 2: VM2VM vhost-user/virtio-pmd with normal path\n \n 3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 4. On VM2, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio2 and send 64B packets ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd txonly\n     testpmd>set txpkts 64\n     testpmd>start tx_first 32\n@@ -158,7 +159,7 @@ Test Case 3: VM2VM vhost-user/virtio1.0-pmd with vector_rx path\n 1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::\n \n      rm -rf vhost-net*\n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -188,13 +189,13 @@ Test Case 3: VM2VM vhost-user/virtio1.0-pmd with vector_rx path\n \n 3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1, [0000:xx.00] is [Bus,Device,Function] of virtio-net::\n \n-    ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2, [0000:xx.00] is [Bus,Device,Function] of virtio-net::\n \n-    ./testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -w 0000:xx.00,vectorized=1 -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd txonly\n     testpmd>set txpkts 64\n     testpmd>start tx_first 32\n@@ -213,7 +214,7 @@ Test Case 4: VM2VM vhost-user/virtio1.0-pmd with normal path\n 1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::\n \n      rm -rf vhost-net*\n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -243,13 +244,13 @@ Test Case 4: VM2VM vhost-user/virtio1.0-pmd with normal path\n \n 3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd txonly\n     testpmd>set txpkts 64\n     testpmd>start tx_first 32\n@@ -267,7 +268,7 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check\n \n 1. Bind virtio with igb_uio driver, launch the testpmd by below commands::\n \n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -310,17 +311,18 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check\n \n 4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 5. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n \n 6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n+\n     testpmd>set fwd mac\n     testpmd>set txpkts 2000,2000,2000,2000\n \n@@ -333,17 +335,17 @@ Test Case 5: VM2VM vhost-user/virtio-pmd mergeable path with payload valid check\n \n 9. Relaunch testpmd in VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 10. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap,mbuf-size=8000'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap,mbuf-size=8000'\n \n 11. Relaunch testpmd on VM2, send ten 64B packets from virtio-pmd on VM2::\n \n-     ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024\n+     ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024\n      testpmd>set fwd mac\n      testpmd>set burst 1\n      testpmd>start tx_first 10\n@@ -355,7 +357,7 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch\n \n 1. Bind virtio with igb_uio driver, launch the testpmd by below commands::\n \n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -398,17 +400,16 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch\n \n 4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n-    testpmd>set fwd rxonly\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>start\n \n 5. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n \n 6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set fwd mac\n     testpmd>set txpkts 2000,2000,2000,2000\n \n@@ -421,17 +422,17 @@ Test Case 6: VM2VM vhost-user/virtio1.0-pmd mergeable path with payload valid ch\n \n 9. Relaunch testpmd in VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 10. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap'\n \n 11. Relaunch testpmd On VM2, send ten 64B packets from virtio-pmd on VM2::\n \n-     ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+     ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n      testpmd>set fwd mac\n      testpmd>set burst 1\n      testpmd>start tx_first 10\n@@ -443,7 +444,7 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch\n \n 1. Bind virtio with igb_uio driver, launch the testpmd by below commands::\n \n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -486,17 +487,17 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch\n \n 4. Bind virtio with igb_uio driver,then run testpmd, set rxonly mode for virtio-pmd on VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 5. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx.pcap,mbuf-size=8000'\n \n 6. On VM2, bind virtio with igb_uio driver,then run testpmd, config tx_packets to 8k length with chain mode::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set fwd mac\n     testpmd>set txpkts 2000,2000,2000,2000\n \n@@ -509,17 +510,17 @@ Test Case 7: VM2VM vhost-user/virtio1.1-pmd mergeable path with payload valid ch\n \n 9. Relaunch testpmd in VM1::\n \n-    ./testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 --file-prefix=test -- -i --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 10. Bootup pdump in VM1::\n \n-    ./x86_64-native-linuxapp-gcc/app/dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap'\n+    ./dpdk-pdump -v --file-prefix=test -- --pdump  'port=0,queue=*,rx-dev=/root/pdump-rx-small.pcap'\n \n 11. Relaunch testpmd On VM2, send ten 64B packets from virtio-pmd on VM2::\n \n-     ./testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600\n+     ./dpdk-testpmd -c 0x3 -n 4 -- -i --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n      testpmd>set fwd mac\n      testpmd>set burst 1\n      testpmd>start tx_first 10\n@@ -532,7 +533,7 @@ Test Case 8: VM2VM vhost-user/virtio1.1-pmd with normal path\n 1. Bind one physical nic port to igb_uio, then launch the testpmd by below commands::\n \n      rm -rf vhost-net*\n-    ./testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0xc0000 -n 4 --no-pci --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=1' --vdev 'net_vhost1,iface=vhost-net1,queues=1'  -- -i --nb-cores=1 --txd=1024 --rxd=1024\n     testpmd>set fwd mac\n     testpmd>start\n \n@@ -562,13 +563,13 @@ Test Case 8: VM2VM vhost-user/virtio1.1-pmd with normal path\n \n 3. On VM1, bind vdev with igb_uio driver,then run testpmd, set rxonly for virtio1 ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd rxonly\n     testpmd>start\n \n 4. On VM2, bind vdev with igb_uio driver,then run testpmd, set txonly for virtio2 ::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txd=1024 --rxd=1024\n     testpmd>set fwd txonly\n     testpmd>set txpkts 64\n     testpmd>start tx_first 32\n@@ -586,8 +587,8 @@ Test Case 9: VM2VM virtio-pmd split ring mergeable path 8 queues CBDMA enable wi\n \n 1. Bind 16 cbdma channels to igb_uio driver, then launch the testpmd with 2 vhost port and 8 queues by below commands::\n \n-    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3;txq4@00:04.4;txq5@00:04.5;txq6@00:04.6;txq7@00:04.7],dmathr=512' \\\n-    --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n+    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7],dmathr=512' \\\n+    --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n     testpmd>vhost enable tx all\n     testpmd>start\n \n@@ -624,13 +625,13 @@ Test Case 9: VM2VM virtio-pmd split ring mergeable path 8 queues CBDMA enable wi\n \n 4. Launch testpmd in VM1::\n \n-    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set mac fwd\n     testpmd>start\n \n 5. Launch testpmd in VM2, sent imix pkts from VM2::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set mac fwd\n     testpmd>set txpkts 64,256,512,1024,2000,64,256,512,1024,2000\n     testpmd>start tx_first 1\n@@ -642,8 +643,8 @@ Test Case 9: VM2VM virtio-pmd split ring mergeable path 8 queues CBDMA enable wi\n \n 7. Relaunch and start vhost side testpmd with below cmd, change cbdma threshold for one vhost port's cbdma channels::\n \n-  ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3;txq4@00:04.4;txq5@00:04.5;txq6@00:04.6;txq7@00:04.7],dmathr=512' \\\n-   --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7],dmathr=64'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n+  ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7],dmathr=512' \\\n+   --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7],dmathr=64'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n    testpmd>start\n \n 8. Send pkts by testpmd in VM2, check imix packets can looped between two VMs for 1 mins and 8 queues all have packets rx/tx::\n@@ -660,8 +661,8 @@ Test Case 10: VM2VM virtio-pmd split ring mergeable path dynamic queue size CBDM\n \n 1. Bind 16 cbdma channels to igb_uio driver, then launch the testpmd with 2 vhost ports below commands::\n \n-    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3;txq4@00:04.4;txq5@00:04.5;txq6@00:04.6;txq7@00:04.7],dmathr=512' \\\n-    --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4\n+    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7],dmathr=512' \\\n+    --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4\n     testpmd>vhost enable tx all\n     testpmd>start\n \n@@ -698,14 +699,14 @@ Test Case 10: VM2VM virtio-pmd split ring mergeable path dynamic queue size CBDM\n \n 4. Launch testpmd in VM1::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set mac fwd\n     testpmd>start\n \n 5. Launch testpmd in VM2 and send imix pkts, check imix packets can looped between two VMs for 1 mins and 4 queues (queue0 to queue3) have packets rx/tx::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n-    testpmd>set mac fwd\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n+    testpmd>set mac fwd \n     testpmd>set txpkts 64,256,512,1024,2000,64,256,512,1024,2000\n     testpmd>start tx_first 32\n     testpmd>show port stats all\n@@ -713,8 +714,8 @@ Test Case 10: VM2VM virtio-pmd split ring mergeable path dynamic queue size CBDM\n \n 6. Relaunch and start vhost side testpmd with eight queues, change cbdma threshold for one vhost port's cbdma channels::\n \n-  ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3;txq4@00:04.4;txq5@00:04.5;txq6@00:04.6;txq7@00:04.7],dmathr=512' \\\n-   --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7],dmathr=64'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n+  ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,client=1,queues=8,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7],dmathr=512' \\\n+   --vdev 'net_vhost1,iface=vhost-net1,client=1,queues=8,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7],dmathr=64'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n    testpmd>start\n \n 7. Send pkts by testpmd in VM2, check imix packets can looped between two VMs for 1 mins and 8 queues all have packets rx/tx::\n@@ -732,8 +733,8 @@ Test Case 11: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable\n 1. Bind 16 cbdma channels to igb_uio driver, then launch the testpmd with 2 vhost port and 8 queues by below commands::\n \n     rm -rf vhost-net*\n-    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3;txq4@00:04.4;txq5@00:04.5;txq6@00:04.6;txq7@00:04.7],dmathr=512' \\\n-    --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n+    ./dpdk-testpmd -l 1-5 -n 4 --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net0,queues=8,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7],dmathr=512' \\\n+    --vdev 'net_vhost1,iface=vhost-net1,queues=8,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7],dmathr=512'  -- -i --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8\n     testpmd>vhost enable tx all\n     testpmd>start\n \n@@ -770,13 +771,13 @@ Test Case 11: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable\n \n 4. Launch testpmd in VM1::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set mac fwd\n     testpmd>start\n \n 5. Launch testpmd in VM2 and send imix pkts, check imix packets can looped between two VMs for 1 mins and 8 queues all have packets rx/tx::\n \n-    ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+    ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n     testpmd>set mac fwd\n     testpmd>set txpkts 64,256,512,1024,20000,64,256,512,1024,20000\n     testpmd>start tx_first 32\n@@ -802,7 +803,7 @@ Test Case 11: VM2VM virtio-pmd packed ring mergeable path 8 queues CBDMA enable\n    modprobe vfio-pci\n    echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode\n    ./usertools/dpdk-devbind.py --force --bind=vfio-pci 0000:00:05.0\n-   ./testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600\n+   ./dpdk-testpmd -c 0x3 -n 4 -- -i --tx-offloads=0x00 --enable-hw-vlan-strip --txq=8 --rxq=8 --txd=1024 --rxd=1024 --max-pkt-len=9600 --rx-offloads=0x00002000\n    testpmd>set mac fwd\n    testpmd>set txpkts 64,256,512,1024,20000,64,256,512,1024,20000\n    testpmd>start tx_first 32\n",
    "prefixes": [
        "v1"
    ]
}