Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/90161/?format=api
http://patches.dpdk.org/api/patches/90161/?format=api", "web_url": "http://patches.dpdk.org/project/dts/patch/20210331052405.602588-1-weix.ling@intel.com/", "project": { "id": 3, "url": "http://patches.dpdk.org/api/projects/3/?format=api", "name": "DTS", "link_name": "dts", "list_id": "dts.dpdk.org", "list_email": "dts@dpdk.org", "web_url": "", "scm_url": "git://dpdk.org/tools/dts", "webscm_url": "http://git.dpdk.org/tools/dts/", "list_archive_url": "https://inbox.dpdk.org/dts", "list_archive_url_format": "https://inbox.dpdk.org/dts/{}", "commit_url_format": "" }, "msgid": "<20210331052405.602588-1-weix.ling@intel.com>", "list_archive_url": "https://inbox.dpdk.org/dts/20210331052405.602588-1-weix.ling@intel.com", "date": "2021-03-31T05:24:05", "name": "[V2] tests/vhost_dequeue_zero_copy: delete dpdk upsupport testsuite and testplan", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "2aa8a8ef66d98d5208ed93d544e41c5a0644499b", "submitter": { "id": 1828, "url": "http://patches.dpdk.org/api/people/1828/?format=api", "name": "Ling, WeiX", "email": "weix.ling@intel.com" }, "delegate": null, "mbox": "http://patches.dpdk.org/project/dts/patch/20210331052405.602588-1-weix.ling@intel.com/mbox/", "series": [ { "id": 15993, "url": "http://patches.dpdk.org/api/series/15993/?format=api", "web_url": "http://patches.dpdk.org/project/dts/list/?series=15993", "date": "2021-03-31T05:24:05", "name": "[V2] tests/vhost_dequeue_zero_copy: delete dpdk upsupport testsuite and testplan", "version": 2, "mbox": "http://patches.dpdk.org/series/15993/mbox/" } ], "comments": "http://patches.dpdk.org/api/patches/90161/comments/", "check": "pending", "checks": "http://patches.dpdk.org/api/patches/90161/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<dts-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 59838A034F;\n\tWed, 31 Mar 2021 07:25:43 +0200 (CEST)", "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2F4284069E;\n\tWed, 31 Mar 2021 07:25:43 +0200 (CEST)", "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n by mails.dpdk.org (Postfix) with ESMTP id 6BEC140141\n for <dts@dpdk.org>; Wed, 31 Mar 2021 07:25:41 +0200 (CEST)", "from fmsmga005.fm.intel.com ([10.253.24.32])\n by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Mar 2021 22:25:38 -0700", "from unknown (HELO localhost.localdomain) ([10.240.183.222])\n by fmsmga005-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Mar 2021 22:25:36 -0700" ], "IronPort-SDR": [ "\n sM5CR664X5vTBwhF/aY/1xbkM5lFtgWlSpOCNbaE2pyK4xR54wpgDNIr2iAr1L+0DUep/xrqor\n IHI5YmyYQx+Q==", "\n HVrGSIaO4lgxsR+iRidEltOooozAip/5EzalcNjMWzrsOAhd/4F8Uz7ioyNvsVHCJ6/PjQk25S\n Cz4kjgv14uiQ==" ], "X-IronPort-AV": [ "E=McAfee;i=\"6000,8403,9939\"; a=\"192023453\"", "E=Sophos;i=\"5.81,291,1610438400\"; d=\"scan'208\";a=\"192023453\"", "E=Sophos;i=\"5.81,291,1610438400\"; d=\"scan'208\";a=\"610356268\"" ], "From": "Ling Wei <weix.ling@intel.com>", "To": "dts@dpdk.org", "Cc": "Ling Wei <weix.ling@intel.com>", "Date": "Wed, 31 Mar 2021 13:24:05 +0800", "Message-Id": "<20210331052405.602588-1-weix.ling@intel.com>", "X-Mailer": "git-send-email 2.25.1", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "Subject": "[dts] [PATCH V2] tests/vhost_dequeue_zero_copy: delete dpdk\n upsupport testsuite and testplan", "X-BeenThere": "dts@dpdk.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "test suite reviews and discussions <dts.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dts/>", "List-Post": "<mailto:dts@dpdk.org>", "List-Help": "<mailto:dts-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>", "Errors-To": "dts-bounces@dpdk.org", "Sender": "\"dts\" <dts-bounces@dpdk.org>" }, "content": "As DPDK community has removed support dequeue zero copy feature in\n20.11,so remove this testsuite and testplan.\n\nSigned-off-by: Ling Wei <weix.ling@intel.com>\n---\n test_plans/index.rst | 1 -\n .../vhost_dequeue_zero_copy_test_plan.rst | 417 --------------\n tests/TestSuite_vhost_dequeue_zero_copy.py | 532 ------------------\n 3 files changed, 950 deletions(-)\n delete mode 100644 test_plans/vhost_dequeue_zero_copy_test_plan.rst\n delete mode 100644 tests/TestSuite_vhost_dequeue_zero_copy.py", "diff": "diff --git a/test_plans/index.rst b/test_plans/index.rst\nindex 2be687a9..b35ddb00 100644\n--- a/test_plans/index.rst\n+++ b/test_plans/index.rst\n@@ -228,7 +228,6 @@ The following are the test plans for the DPDK DTS automated test system.\n vm2vm_virtio_pmd_test_plan\n dpdk_gro_lib_test_plan\n dpdk_gso_lib_test_plan\n- vhost_dequeue_zero_copy_test_plan\n vswitch_sample_cbdma_test_plan\n vxlan_gpe_support_in_i40e_test_plan\n pvp_diff_qemu_version_test_plan\ndiff --git a/test_plans/vhost_dequeue_zero_copy_test_plan.rst b/test_plans/vhost_dequeue_zero_copy_test_plan.rst\ndeleted file mode 100644\nindex 29fba85f..00000000\n--- a/test_plans/vhost_dequeue_zero_copy_test_plan.rst\n+++ /dev/null\n@@ -1,417 +0,0 @@\n-.. Copyright (c) <2019>, Intel Corporation\n- All rights reserved.\n-\n- Redistribution and use in source and binary forms, with or without\n- modification, are permitted provided that the following conditions\n- are met:\n-\n- - Redistributions of source code must retain the above copyright\n- notice, this list of conditions and the following disclaimer.\n-\n- - Redistributions in binary form must reproduce the above copyright\n- notice, this list of conditions and the following disclaimer in\n- the documentation and/or other materials provided with the\n- distribution.\n-\n- - Neither the name of Intel Corporation nor the names of its\n- contributors may be used to endorse or promote products derived\n- from this software without specific prior written permission.\n-\n- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n- FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n- COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n- OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-=================================\n-vhost dequeue zero-copy test plan\n-=================================\n-\n-Description\n-===========\n-\n-Vhost dequeue zero-copy is a performance optimization for vhost, the copy in the dequeue path is avoided in order to improve the performance. The test cases cover split ring and packed ring.\n-Notice:\n-\n-* All packed ring case need special qemu version.\n-* In the PVP case, when packet size is 1518B, 10G nic could be the performance bottleneck, so we use 40G traffic genarator and 40G nic.\n-* Also as vhost zero copy mbufs should be consumed as soon as possible, don't start send packets at vhost side before VM and virtio-pmd launched.\n-\n-Test flow\n-=========\n-\n-TG --> NIC --> Vhost --> Virtio --> Vhost --> NIC --> TG\n-\n-Test Case 1: pvp split ring dequeue zero-copy test\n-==================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -c 0xe -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=1,dequeue-zero-copy=1' -- \\\n- -i --nb-cores=1 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with mrg_rxbuf feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024 \\\n- -vnc :10\n-\n-3. On VM, bind virtio net to igb_uio and run testpmd::\n-\n- ./dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -c 0x3 -n 4 -- -i --nb-cores=1 --txd=1024 --rxd=1024\n- testpmd>set fwd mac\n- testpmd>start\n-\n-4. Start testpmd at host side after VM and virtio-pmd launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Repeat the test with dequeue-zero-copy=0, compare the performance gains or degradation. For small packet, we may expect ~20% performance drop, but for big packet, we expect ~20% performance gains.\n-\n-Test Case 2: pvp split ring dequeue zero-copy test with 2 queues\n-================================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -l 2-4 -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=2,dequeue-zero-copy=1' -- \\\n- -i --nb-cores=2 --rxq=2 --txq=2 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with vectors=2*queue_num+2 and mrg_rxbuf/mq feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce,queues=2 \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,mq=on,vectors=8,rx_queue_size=1024,tx_queue_size=1024 \\\n- -vnc :10\n-\n-3. On VM, bind vdev to igb_uio and run testpmd::\n-\n- ./usertools/dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -c 0x07 -n 4 -- -i \\\n- --rxq=2 --txq=2 --txd=1024 --rxd=1024 --nb-cores=2\n- testpmd>set fwd mac\n- testpmd>start\n-\n-4. Start testpmd at host side after VM and virtio-pmd launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Check each queue's rx/tx packet numbers at vhost side::\n-\n- testpmd>stop\n-\n-Test Case 3: pvp split ring dequeue zero-copy test with driver reload test\n-==========================================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -l 1-5 -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=16,dequeue-zero-copy=1,client=1' -- \\\n- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with vectors=2*queue_num+2 and mrg_rxbuf/mq feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net,server \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce,queues=16 \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,mq=on,vectors=40,rx_queue_size=1024,tx_queue_size=1024 \\\n- -vnc :10\n-\n-3. On VM, bind virtio net to igb_uio and run testpmd::\n-\n- ./usertools/dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -l 0-4 -n 4 -- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024\n- testpmd>set fwd rxonly\n- testpmd>start\n-\n-4. Start testpmd at host side after VM launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes(64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Relaunch testpmd at virtio side in VM for driver reloading::\n-\n- testpmd>quit\n- ./testpmd -l 0-4 -n 4 -- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024\n- testpmd>set fwd mac\n- testpmd>start\n-\n-7. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-8. Check each queue's rx/tx packet numbers at vhost side::\n-\n- testpmd>stop\n-\n-Test Case 4: pvp split ring dequeue zero-copy test with maximum txfreet\n-=======================================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- ./testpmd -l 1-5 -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=16,dequeue-zero-copy=1,client=1' -- \\\n- -i --nb-cores=4 --rxq=16 --txq=16 --txfreet=988 --txrs=4 --txd=992 --rxd=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with vectors=2*queue_num+2 and mrg_rxbuf/mq feature on, note that qemu_version need>qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net,server \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce,queues=16 \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,mq=on,vectors=40,rx_queue_size=1024,tx_queue_size=1024 \\\n- -vnc :10\n-\n-3. On VM, bind virtio net to igb_uio and run testpmd::\n-\n- ./usertools/dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -l 0-4 -n 4 -- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024\n- testpmd>set fwd mac\n- testpmd>start\n-\n-4. Start testpmd at host side after VM launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes(64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Check each queue's rx/tx packet numbers at vhost side::\n-\n- testpmd>stop\n-\n-Test Case 5: pvp split ring dequeue zero-copy test with vector_rx path\n-======================================================================\n-\n-1. Bind one port to igb_uio, then launch vhost by below command::\n-\n- rm -rf vhost-net*\n- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -l 2-4 \\\n- --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=1,dequeue-zero-copy=1' \\\n- -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n- testpmd>start\n-\n-2. Launch virtio-user by below command::\n-\n- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -l 5-6 \\\n- --no-pci --file-prefix=virtio \\\n- --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,in_order=0,mrg_rxbuf=0,vectorized=1,queue_size=1024,server=1 \\\n- -- -i --tx-offloads=0x0 --nb-cores=1 --txd=1024 --rxd=1024\n- >set fwd mac\n- >start\n-\n-3. Send packet with packet generator with different packet size,includes [64, 128, 256, 512, 1024, 1518], check the throughput with below command::\n-\n- testpmd>show port stats all\n-\n-4. Repeat the test with dequeue-zero-copy=0, compare the performance gains or degradation. For small packet, we may expect ~20% performance drop, but for big packet, we expect ~20% performance gains.\n-\n-Test Case 6: pvp packed ring dequeue zero-copy test\n-===================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -c 0xe -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=1,dequeue-zero-copy=1' -- \\\n- -i --nb-cores=1 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with mrg_rxbuf feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024,packed=on \\\n- -vnc :10\n-\n-3. On VM, bind virtio net to igb_uio and run testpmd::\n-\n- ./dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -c 0x3 -n 4 -- -i --nb-cores=1 --txd=1024 --rxd=1024\n- testpmd>set fwd mac\n- testpmd>start\n-\n-4. Start testpmd at host side after VM and virtio-pmd launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Repeat the test with dequeue-zero-copy=0, compare the performance gains or degradation. For small packet, we may expect ~20% performance drop, but for big packet, we expect ~20% performance gains.\n-\n-Test Case 7: pvp packed ring dequeue zero-copy test with 2 queues\n-=================================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -l 2-4 -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=2,dequeue-zero-copy=1' -- \\\n- -i --nb-cores=2 --rxq=2 --txq=2 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with vectors=2*queue_num+2 and mrg_rxbuf/mq feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce,queues=2 \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,mq=on,vectors=8,rx_queue_size=1024,tx_queue_size=1024,packed=on \\\n- -vnc :10\n-\n-3. On VM, bind vdev to igb_uio and run testpmd::\n-\n- ./usertools/dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -c 0x07 -n 4 -- -i \\\n- --rxq=2 --txq=2 --txd=1024 --rxd=1024 --nb-cores=2\n- testpmd>set fwd mac\n- testpmd>start\n-\n-4. Start testpmd at host side after VM and virtio-pmd launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Check each queue's rx/tx packet numbers at vhost side::\n-\n- testpmd>stop\n-\n-Test Case 8: pvp packed ring dequeue zero-copy test with driver reload test\n-===========================================================================\n-\n-1. Bind one 40G port to igb_uio, then launch testpmd by below command::\n-\n- rm -rf vhost-net*\n- ./testpmd -l 1-5 -n 4 \\\n- --vdev 'eth_vhost0,iface=vhost-net,queues=16,dequeue-zero-copy=1,client=1' -- \\\n- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n-\n-2. Launch VM with vectors=2*queue_num+2 and mrg_rxbuf/mq feature on, note that qemu_version need > qemu_2.10 for support adjusting parameter rx_queue_size::\n-\n- qemu-system-x86_64 -name vm1 \\\n- -cpu host -enable-kvm -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc \\\n- -smp cores=5,sockets=1 -drive file=/home/osimg/ubuntu16.img \\\n- -monitor unix:/tmp/vm2_monitor.sock,server,nowait -net nic,macaddr=00:00:00:08:e8:aa,addr=1f \\\n- -net user,hostfwd=tcp:127.0.0.1:6002-:22 \\\n- -chardev socket,id=char0,path=./vhost-net,server \\\n- -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce,queues=16 \\\n- -device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,mrg_rxbuf=on,mq=on,vectors=40,rx_queue_size=1024,tx_queue_size=1024,packed=on \\\n- -vnc :10\n-\n-3. On VM, bind virtio net to igb_uio and run testpmd::\n-\n- ./usertools/dpdk-devbind.py --bind=igb_uio xx:xx.x\n- ./testpmd -l 0-4 -n 4 -- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024\n- testpmd>set fwd rxonly\n- testpmd>start\n-\n-4. Start testpmd at host side after VM launched::\n-\n- testpmd>start\n-\n-5. Send packets by packet generator with different packet sizes(64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-6. Relaunch testpmd at virtio side in VM for driver reloading::\n-\n- testpmd>quit\n- ./testpmd -l 0-4 -n 4 -- -i --nb-cores=4 --rxq=16 --txq=16 --txd=1024 --rxd=1024\n- testpmd>set fwd mac\n- testpmd>start\n-\n-7. Send packets by packet generator with different packet sizes (64,128,256,512,1024,1518), show throughput with below command::\n-\n- testpmd>show port stats all\n-\n-8. Check each queue's rx/tx packet numbers at vhost side::\n-\n- testpmd>stop\n-\n-Test Case 9: pvp packed ring dequeue zero-copy test with ring size is not power of 2\n-====================================================================================\n-\n-1. Bind one port to igb_uio, then launch vhost by below command::\n-\n- rm -rf vhost-net*\n- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -l 2-4 \\\n- --file-prefix=vhost --vdev 'net_vhost0,iface=vhost-net,queues=1,client=1,dequeue-zero-copy=1' \\\n- -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txfreet=992\n- testpmd>set fwd mac\n- testpmd>start\n-\n-2. Launch virtio-user by below command::\n-\n- ./x86_64-native-linuxapp-gcc/app/testpmd -n 4 -l 5-6 \\\n- --no-pci --file-prefix=virtio \\\n- --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,in_order=0,mrg_rxbuf=1,packed_vq=1,queue_size=1025,server=1 \\\n- -- -i --rx-offloads=0x10 --nb-cores=1 --txd=1025 --rxd=1025\n- >set fwd mac\n- >start\n-\n-3. Send packet with packet generator with different packet size,includes [64, 128, 256, 512, 1024, 1518], check the throughput with below command::\n-\n- testpmd>show port stats all\n-\n-4. Repeat the test with dequeue-zero-copy=0, compare the performance gains or degradation. For small packet, we may expect ~20% performance drop, but for big packet, we expect ~20% performance gains.\ndiff --git a/tests/TestSuite_vhost_dequeue_zero_copy.py b/tests/TestSuite_vhost_dequeue_zero_copy.py\ndeleted file mode 100644\nindex 0779edcb..00000000\n--- a/tests/TestSuite_vhost_dequeue_zero_copy.py\n+++ /dev/null\n@@ -1,532 +0,0 @@\n-#\n-# BSD LICENSE\n-#\n-# Copyright(c) <2019> Intel Corporation.\n-# All rights reserved.\n-#\n-# Redistribution and use in source and binary forms, with or without\n-# modification, are permitted provided that the following conditions\n-# are met:\n-#\n-# * Redistributions of source code must retain the above copyright\n-# notice, this list of conditions and the following disclaimer.\n-# * Redistributions in binary form must reproduce the above copyright\n-# notice, this list of conditions and the following disclaimer in\n-# the documentation and/or other materials provided with the\n-# distribution.\n-# * Neither the name of Intel Corporation nor the names of its\n-# contributors may be used to endorse or promote products derived\n-# from this software without specific prior written permission.\n-#\n-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n-# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-\n-\"\"\"\n-DPDK Test suite.\n-Test the performance of dequeue zero-copy.\n-There are three topology test (PVP/VM2VM/VM2NIC) for this feature.\n-And this testsuite implement the topology of PVP.\n-Testsuite vm2vm_net_perf implement the topology VM2VM\n-Testsuite gso implement the topology VM2NIC\n-To run this suite, the qemu version should support packed ring.\n-\"\"\"\n-import utils\n-import time\n-import re\n-from settings import HEADER_SIZE\n-from virt_common import VM\n-from test_case import TestCase\n-from packet import Packet\n-from pktgen import TRANSMIT_CONT\n-\n-\n-class TestVhostDequeueZeroCopy(TestCase):\n-\n- def set_up_all(self):\n- \"\"\"\n- Run at the start of each test suite.\n- \"\"\"\n- self.tester.extend_external_packet_generator(TestVhostDequeueZeroCopy, self)\n- self.frame_sizes = [64, 128, 256, 512, 1024, 1518]\n- self.dut_ports = self.dut.get_ports()\n- self.verify(len(self.dut_ports) >= 1, \"Insufficient ports for testing\")\n- self.def_driver = self.dut.ports_info[self.dut_ports[0]][\"port\"].get_nic_driver()\n- if self.def_driver != \"igb_uio\":\n- self.dut.setup_modules_linux(self.target, 'igb_uio', '')\n- self.dut.bind_interfaces_linux('igb_uio', nics_to_bind=self.dut_ports)\n- self.driver_chg = True\n- self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])\n- self.dst_mac = self.dut.get_mac_address(self.dut_ports[0])\n- self.tx_port = self.tester.get_local_port(self.dut_ports[0])\n- self.port_pci = self.dut.ports_info[self.dut_ports[0]]['pci']\n- self.vm_dut = None\n- self.virtio_user = None\n- self.virtio1_mac = \"52:54:00:00:00:01\"\n- self.header_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip'] + HEADER_SIZE['udp']\n-\n- self.logger.info(\"you can config packet_size in file %s.cfg,\" % self.suite_name + \\\n- \"in region 'suite' like packet_sizes=[64, 128, 256]\")\n- # get the frame_sizes from cfg file\n- if 'packet_sizes' in self.get_suite_cfg():\n- self.frame_sizes = self.get_suite_cfg()['packet_sizes']\n-\n- self.base_dir = self.dut.base_dir.replace('~', '/root')\n- self.vhost_user = self.dut.new_session(suite=\"vhost-user\")\n- self.number_of_ports = 1\n-\n- def set_up(self):\n- \"\"\"\n- Run before each test case.\n- \"\"\"\n- # Clean the execution ENV\n- self.dut.send_expect(\"rm -rf %s/vhost-net*\" % self.base_dir, \"#\")\n- # Prepare the result table\n- self.table_header = [\"FrameSize(B)\", \"Throughput(Mpps)\",\n- \"% linerate\", \"Queue number\", \"Cycle\"]\n- self.result_table_create(self.table_header)\n- self.vm_dut = None\n- self.big_pkt_record = {}\n-\n- @property\n- def check_2M_env(self):\n- out = self.dut.send_expect(\"cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'\", \"# \")\n- return True if out == '2048' else False\n-\n- def get_core_list(self):\n- \"\"\"\n- check whether the server has enough cores to run case\n- if want to get the best perf of the vhost, the vhost tesptmd at least\n- should have 3 cores to start testpmd\n- \"\"\"\n- if self.nb_cores == 1:\n- cores_num = 2\n- else:\n- cores_num = 1\n- core_config = \"1S/%dC/1T\" % (self.nb_cores + cores_num)\n- self.core_list = self.dut.get_core_list(\n- core_config, socket=self.ports_socket)\n- self.verify(len(self.core_list) >= (self.nb_cores + cores_num),\n- \"There has not enought cores to test this case %s\" % self.running_case)\n-\n- def launch_testpmd_as_vhost(self, txfreet, zero_copy=True, client_mode=False):\n- \"\"\"\n- launch testpmd on vhost\n- \"\"\"\n- self.get_core_list()\n-\n- mode_info = \"\"\n- if client_mode is True:\n- mode_info = ',client=1'\n- zero_copy_info = 1\n- if zero_copy is False:\n- zero_copy_info = 0\n- if txfreet == \"normal\":\n- txfreet_args = \"--txd=1024 --rxd=1024 --txfreet=992\"\n- elif txfreet == \"maximum\":\n- txfreet_args = \"--txrs=4 --txd=992 --rxd=992 --txfreet=988\"\n- elif txfreet == \"vector_rx\":\n- txfreet_args = \"--txd=1024 --rxd=1024 --txfreet=992 --txrs=32\"\n-\n- testcmd = self.dut.target + \"/app/testpmd \"\n- vdev = [r\"'eth_vhost0,iface=%s/vhost-net,queues=%d,dequeue-zero-copy=%d%s'\" % (self.base_dir, self.queue_number, zero_copy_info, mode_info)]\n- para = \" -- -i --nb-cores=%d --rxq=%d --txq=%d %s\" % (self.nb_cores, self.queue_number, self.queue_number, txfreet_args)\n- eal_params = self.dut.create_eal_parameters(cores=self.core_list, prefix='vhost', ports=[self.port_pci], vdevs=vdev)\n- command_line_client = testcmd + eal_params + para\n- self.vhost_user.send_expect(command_line_client, \"testpmd> \", 120)\n- self.vhost_user.send_expect(\"set fwd mac\", \"testpmd> \", 120)\n-\n- def launch_testpmd_as_virtio_user(self, path_mode):\n- \"\"\"\n- launch testpmd use vhost-net with path mode\n- \"\"\"\n- # To get the best perf, the vhost and virtio testpmd should not use same cores,\n- # so get the additional 3 cores to start virtio testpmd\n- core_config = \"1S/%dC/1T\" % (len(self.core_list) + 3)\n- core_list = self.dut.get_core_list(\n- core_config, socket=self.ports_socket)\n- self.verify(len(core_list) >= (len(self.core_list) + 3),\n- \"There has not enought cores to test this case %s\" % self.running_case)\n- testcmd = self.dut.target + \"/app/testpmd \"\n- vdev = \" --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net,queue_size=1024,%s\" % path_mode\n- para = \" -- -i --tx-offloads=0x0 --nb-cores=%d --txd=1024 --rxd=1024\" % self.nb_cores\n- eal_params = self.dut.create_eal_parameters(cores=core_list[len(self.core_list):],\n- prefix='virtio', no_pci=True)\n- if self.check_2M_env:\n- eal_params += \" --single-file-segments\"\n- command_line = testcmd + eal_params + vdev + para\n- self.virtio_user.send_expect(command_line, 'testpmd> ', 120)\n- self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 120)\n- self.virtio_user.send_expect('start', 'testpmd> ', 120)\n-\n- def start_testpmd_on_vm(self, fwd_mode='mac'):\n- \"\"\"\n- start testpmd in vm depend on different path\n- \"\"\"\n- command = self.dut.target + \"/app/testpmd \" + \\\n- \"-c 0x1f -n 3 -- -i \" + \\\n- \"--nb-cores=%d --rxq=%d --txq=%d \" + \\\n- \"--txd=1024 --rxd=1024\"\n- command_line = command % (self.nb_cores,\n- self.queue_number, self.queue_number)\n- self.vm_dut.send_expect(command_line, \"testpmd> \", 30)\n- self.vm_dut.send_expect('set fwd %s' % fwd_mode, \"testpmd> \", 30)\n- self.vm_dut.send_expect('start', \"testpmd> \", 30)\n-\n- def restart_testpmd_on_vm(self, fwd_mode):\n- self.vm_dut.send_expect(\"quit\", \"# \", 30)\n- self.start_testpmd_on_vm(fwd_mode)\n-\n- def set_vm_vcpu(self):\n- \"\"\"\n- config the vcpu numbers\n- remove the cpupin param from vm_params\n- when the cores in cpupin is the isolcpus, it will reduce the\n- performance of dequeue zero copy\n- And if we not use the cpupin params(taskset -c xxx), it will use\n- the cpu which not set in isolcpus, and it number equal to the vcpus\n- \"\"\"\n- params_number = len(self.vm.params)\n- for i in range(params_number):\n- if list(self.vm.params[i].keys())[0] == 'cpu':\n- if 'number' in list(self.vm.params[i]['cpu'][0].keys()):\n- self.vm.params[i]['cpu'][0]['number'] = 5\n- if 'cpupin' in list(self.vm.params[i]['cpu'][0].keys()):\n- self.vm.params[i]['cpu'][0].pop('cpupin')\n-\n- def start_one_vm(self, mode='client', packed=False):\n- \"\"\"\n- start qemu\n- \"\"\"\n- self.vm = VM(self.dut, 'vm0', 'vhost_sample')\n- self.vm.load_config()\n- # use igb_uio driver in vm to get better performance\n- for i in self.vm.params:\n- if 'def_driver' in i:\n- i['def_driver'] = [{'driver_name': 'igb_uio'}]\n- break\n- vm_params = {}\n- vm_params['driver'] = 'vhost-user'\n- vm_params['opt_path'] = '%s/vhost-net' % self.base_dir\n- vm_params['opt_mac'] = self.virtio1_mac\n- if mode == 'server':\n- vm_params['opt_server'] = 'server'\n- opt_args = \"mrg_rxbuf=on,rx_queue_size=1024,tx_queue_size=1024\"\n- if self.queue_number > 1:\n- vm_params['opt_queue'] = self.queue_number\n- opt_args += \",mq=on,vectors=%d\" % (2*self.queue_number + 2)\n- if packed is True:\n- opt_args += ',packed=on'\n- vm_params['opt_settings'] = opt_args\n- self.vm.set_vm_device(**vm_params)\n- self.set_vm_vcpu()\n- try:\n- # Due to we have change the params info before,\n- # so need to start vm with load_config=False\n- self.vm_dut = self.vm.start(load_config=False)\n- if self.vm_dut is None:\n- raise Exception(\"Set up VM ENV failed\")\n- except Exception as e:\n- self.logger.error(\"ERROR: Failure for %s, \" % str(e) + \\\n- \"if 'packed not found' in output of start qemu log, \" + \\\n- \"please use the qemu version which support packed ring\")\n- raise e\n-\n- def prepare_test_evn(self, vhost_txfreet_mode, vhost_zero_copy, vhost_client_mode,\n- vm_testpmd_fwd_mode, packed_mode):\n- \"\"\"\n- start vhost testpmd and launch qemu, start testpmd on vm\n- \"\"\"\n- if vhost_client_mode is True:\n- vm_mode = 'server'\n- else:\n- vm_mode = 'client'\n- self.launch_testpmd_as_vhost(txfreet=vhost_txfreet_mode, zero_copy=vhost_zero_copy,\n- client_mode=vhost_client_mode)\n- self.start_one_vm(mode=vm_mode, packed=packed_mode)\n- self.start_testpmd_on_vm(fwd_mode=vm_testpmd_fwd_mode)\n- # start testpmd at host side after VM and virtio-pmd launched\n- self.vhost_user.send_expect(\"start\", \"testpmd> \", 120)\n-\n- def update_table_info(self, frame_size, Mpps, throughtput, cycle):\n- results_row = [frame_size]\n- results_row.append(Mpps)\n- results_row.append(throughtput)\n- results_row.append(self.queue_number)\n- results_row.append(cycle)\n- self.result_table_add(results_row)\n-\n- # record the big pkt Mpps\n- if frame_size == 1518:\n- self.big_pkt_record[cycle] = Mpps\n-\n- def calculate_avg_throughput(self, frame_size, fwd_mode):\n- \"\"\"\n- start to send packet and get the throughput\n- \"\"\"\n- payload = frame_size - self.header_size\n- flow = 'Ether(dst=\"%s\")/IP(src=\"192.168.4.1\",proto=255)/UDP(sport=33,dport=34)/(\"X\"*%d)' % (\n- self.dst_mac, payload)\n- pkt = Packet(pkt_str=flow)\n- pkt.save_pcapfile(self.tester, \"%s/zero_copy.pcap\" % self.tester.tmp_file)\n- stream_option = {\n- 'pcap': \"%s/zero_copy.pcap\" % self.tester.tmp_file,\n- 'fields_config': {\n- 'ip': {'src': {'action': 'random', 'start': '16.0.0.1', 'step': 1, 'end': '16.0.0.64'}}},\n- 'stream_config': {\n- 'rate': 100,\n- 'transmit_mode': TRANSMIT_CONT,\n- }\n- }\n- self.tester.pktgen.clear_streams()\n- stream_id = self.tester.pktgen.add_stream(self.tx_port, self.tx_port,\n- \"%s/zero_copy.pcap\" % self.tester.tmp_file)\n- self.tester.pktgen.config_stream(stream_id, stream_option)\n- traffic_opt = {\n- 'method': 'throughput',\n- 'rate': 100,\n- 'interval': 6,\n- 'duration': 30}\n- stats = self.tester.pktgen.measure([stream_id], traffic_opt)\n-\n- if isinstance(stats, list):\n- # if get multi result, ignore the first one, because it may not stable\n- num = len(stats)\n- Mpps = 0\n- for index in range(1, num):\n- Mpps += stats[index][1]\n- Mpps = Mpps / 1000000.0 / (num-1)\n- else:\n- Mpps = stats[1] / 1000000.0\n- # when the fwd mode is rxonly, we can not receive data, so should not verify it\n- if fwd_mode != \"rxonly\":\n- self.verify(Mpps > self.check_value[frame_size],\n- \"%s of frame size %d speed verify failed, expect %s, result %s\" % (\n- self.running_case, frame_size, self.check_value[frame_size], Mpps))\n- throughput = Mpps * 100 / \\\n- float(self.wirespeed(self.nic, frame_size, 1))\n- return Mpps, throughput\n-\n- def check_packets_of_each_queue(self, frame_size, fwd_mode):\n- \"\"\"\n- check each queue has receive packets\n- \"\"\"\n- if fwd_mode == \"rxonly\":\n- verify_port = 1\n- else:\n- verify_port = 2\n- out = self.vhost_user.send_expect(\"stop\", \"testpmd> \", 60)\n- for port_index in range(0, verify_port):\n- for queue_index in range(0, self.queue_number):\n- queue_info = re.findall(\"RX\\s*Port=\\s*%d/Queue=\\s*%d\" %\n- (port_index, queue_index), out)\n- queue = queue_info[0]\n- index = out.find(queue)\n- rx = re.search(\"RX-packets:\\s*(\\d*)\", out[index:])\n- tx = re.search(\"TX-packets:\\s*(\\d*)\", out[index:])\n- rx_packets = int(rx.group(1))\n- tx_packets = int(tx.group(1))\n- self.verify(rx_packets > 0 and tx_packets > 0,\n- \"The queue %d rx-packets or tx-packets is 0 about \" %\n- queue_index + \\\n- \"frame_size:%d, rx-packets:%d, tx-packets:%d\" %\n- (frame_size, rx_packets, tx_packets))\n-\n- self.vhost_user.send_expect(\"start\", \"testpmd> \", 60)\n-\n- @property\n- def check_value(self):\n- check_dict = dict.fromkeys(self.frame_sizes)\n- linerate = {64: 0.07, 128: 0.10, 256: 0.20, 512: 0.18, 1024: 0.10, 1280: 0.10, 1518: 0.10}\n- for size in self.frame_sizes:\n- speed = self.wirespeed(self.nic, size, self.number_of_ports)\n- check_dict[size] = round(speed * linerate[size], 2)\n- return check_dict\n-\n- def send_and_verify_throughput(self, cycle=\"\", fwd_mode=\"\"):\n- \"\"\"\n- start to send packets and verify it\n- \"\"\"\n- for frame_size in self.frame_sizes:\n- info = \"Running test %s, and %d frame size.\" % (self.running_case, frame_size)\n- self.logger.info(info)\n-\n- Mpps, throughput = self.calculate_avg_throughput(frame_size, fwd_mode)\n- if fwd_mode != \"rxonly\":\n- self.update_table_info(frame_size, Mpps, throughput, cycle)\n- # when multi queues, check each queue can receive packets\n- if self.queue_number > 1:\n- self.check_packets_of_each_queue(frame_size, fwd_mode)\n-\n- def check_perf_drop_between_with_and_without_zero_copy(self):\n- \"\"\"\n- for dequeue-zero-copy=0, about the small pkts we expect ~10% gain\n- compare to dequeue-zero-copy=1\n- \"\"\"\n- value_with_zero_copy = 0\n- value_without_zero_copy = 0\n- if 'dequeue-zero-copy=1' in list(self.big_pkt_record.keys()):\n- value_with_zero_copy = self.big_pkt_record['dequeue-zero-copy=1']\n- if 'dequeue-zero-copy=0' in list(self.big_pkt_record.keys()):\n- value_without_zero_copy = self.big_pkt_record['dequeue-zero-copy=0']\n- self.verify(value_with_zero_copy != 0 and value_without_zero_copy != 0,\n- 'can not get the value of big pkts, please check self.frame_sizes')\n- self.verify(value_with_zero_copy - value_without_zero_copy >= value_with_zero_copy*0.05,\n- 'the drop with dequeue-zero-copy=0 is not as expected')\n-\n- def close_all_testpmd_and_vm(self):\n- \"\"\"\n- close testpmd about vhost-user and vm_testpmd\n- \"\"\"\n- if getattr(self, 'vhost_user', None):\n- self.vhost_user.send_expect(\"quit\", \"#\", 60)\n- if getattr(self, 'virtio_user', None):\n- self.virtio_user.send_expect(\"quit\", \"#\", 60)\n- self.dut.close_session(self.virtio_user)\n- self.virtio_user = None\n- if getattr(self, 'vm_dut', None):\n- self.vm_dut.send_expect(\"quit\", \"#\", 60)\n- self.vm.stop()\n-\n- def test_perf_pvp_split_ring_dequeue_zero_copy(self):\n- \"\"\"\n- pvp split ring dequeue zero-copy test\n- \"\"\"\n- self.nb_cores = 1\n- self.queue_number = 1\n- self.logger.info('start vhost testpmd with dequeue-zero-copy=1 to test')\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=False)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n-\n- self.close_all_testpmd_and_vm()\n- self.logger.info('start vhost testpmd with dequeue-zero-copy=0 to test')\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=False,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=False)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=0')\n- self.result_table_print()\n- self.check_perf_drop_between_with_and_without_zero_copy()\n-\n- def test_perf_pvp_packed_ring_dequeue_zero_copy(self):\n- \"\"\"\n- pvp packed ring dequeue zero-copy test\n- \"\"\"\n- self.nb_cores = 1\n- self.queue_number = 1\n- self.logger.info('start vhost testpmd with dequeue-zero-copy=1 to test')\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=True)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n-\n- self.close_all_testpmd_and_vm()\n- self.logger.info('start vhost testpmd with dequeue-zero-copy=0 to test')\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=False,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=True)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=0')\n- self.result_table_print()\n- self.check_perf_drop_between_with_and_without_zero_copy()\n-\n- def test_perf_pvp_split_ring_dequeue_zero_copy_with_2_queue(self):\n- \"\"\"\n- pvp split ring dequeue zero-copy test with 2 queues\n- \"\"\"\n- self.nb_cores = 2\n- self.queue_number = 2\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=False)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n- self.result_table_print()\n-\n- def test_perf_pvp_packed_ring_dequeue_zero_copy_with_2_queue(self):\n- \"\"\"\n- pvp packed ring dequeue zero-copy test with 2 queues\n- \"\"\"\n- self.nb_cores = 2\n- self.queue_number = 2\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=True)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n- self.result_table_print()\n-\n- def test_perf_pvp_split_ring_dequeue_zero_copy_with_driver_unload(self):\n- \"\"\"\n- pvp split ring dequeue zero-copy test with driver reload test\n- \"\"\"\n- self.nb_cores = 4\n- self.queue_number = 16\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='rxonly', packed_mode=False)\n- self.send_and_verify_throughput(cycle=\"before relaunch\", fwd_mode=\"rxonly\")\n-\n- # relaunch testpmd at virtio side in VM for driver reloading\n- self.restart_testpmd_on_vm(fwd_mode='mac')\n- self.send_and_verify_throughput(cycle=\"after relaunch\")\n- self.result_table_print()\n-\n- def test_perf_pvp_packed_ring_dequeue_zero_copy_with_driver_unload(self):\n- \"\"\"\n- pvp packed ring dequeue zero-copy test with driver reload test\n- \"\"\"\n- self.nb_cores = 4\n- self.queue_number = 16\n- self.prepare_test_evn(vhost_txfreet_mode='normal', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='rxonly', packed_mode=True)\n- self.send_and_verify_throughput(cycle=\"before relaunch\", fwd_mode=\"rxonly\")\n-\n- # relaunch testpmd at virtio side in VM for driver reloading\n- self.restart_testpmd_on_vm(fwd_mode='mac')\n- self.send_and_verify_throughput(cycle=\"after relaunch\")\n- self.result_table_print()\n-\n- def test_perf_pvp_split_ring_dequeue_zero_copy_with_maximum_txfreet(self):\n- \"\"\"\n- pvp split ring dequeue zero-copy test with maximum txfreet\n- \"\"\"\n- self.nb_cores = 4\n- self.queue_number = 16\n- self.prepare_test_evn(vhost_txfreet_mode='maximum', vhost_zero_copy=True,\n- vhost_client_mode=False, vm_testpmd_fwd_mode='mac', packed_mode=False)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n- self.result_table_print()\n-\n- def test_perf_pvp_split_ring_dequeue_zero_copy_with_vector_rx(self):\n- \"\"\"\n- pvp split ring dequeue zero-copy test with vector_rx path\n- \"\"\"\n- self.nb_cores = 1\n- self.queue_number = 1\n- path_mode = 'packed_vq=0,in_order=0,mrg_rxbuf=0'\n- self.virtio_user = self.dut.new_session(suite=\"virtio-user\")\n-\n- self.logger.info('start vhost testpmd with dequeue-zero-copy=1 to test')\n- self.launch_testpmd_as_vhost(txfreet=\"vector_rx\", zero_copy=True, client_mode=False)\n- self.vhost_user.send_expect(\"start\", \"testpmd> \", 120)\n- self.launch_testpmd_as_virtio_user(path_mode)\n- self.send_and_verify_throughput(cycle='dequeue-zero-copy=1')\n- self.result_table_print()\n-\n- def tear_down(self):\n- \"\"\"\n- Run after each test case.\n- \"\"\"\n- self.close_all_testpmd_and_vm()\n- self.dut.kill_all()\n- time.sleep(2)\n-\n- def tear_down_all(self):\n- \"\"\"\n- Run after each test suite.\n- \"\"\"\n- if getattr(self, \"driver_chg\", None):\n- self.dut.bind_interfaces_linux(self.def_driver, nics_to_bind=self.dut_ports)\n- if getattr(self, 'vhost_user', None):\n- self.dut.close_session(self.vhost_user)\n", "prefixes": [ "V2" ] }{ "id": 90161, "url": "