Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/109894/?format=api
http://patches.dpdk.org/api/patches/109894/?format=api", "web_url": "http://patches.dpdk.org/project/dts/patch/20220420152256.23175-1-zhiminx.huang@intel.com/", "project": { "id": 3, "url": "http://patches.dpdk.org/api/projects/3/?format=api", "name": "DTS", "link_name": "dts", "list_id": "dts.dpdk.org", "list_email": "dts@dpdk.org", "web_url": "", "scm_url": "git://dpdk.org/tools/dts", "webscm_url": "http://git.dpdk.org/tools/dts/", "list_archive_url": "https://inbox.dpdk.org/dts", "list_archive_url_format": "https://inbox.dpdk.org/dts/{}", "commit_url_format": "" }, "msgid": "<20220420152256.23175-1-zhiminx.huang@intel.com>", "list_archive_url": "https://inbox.dpdk.org/dts/20220420152256.23175-1-zhiminx.huang@intel.com", "date": "2022-04-20T15:22:56", "name": "[V2] test_plans/multiprocess_iavf_test_plan:add vf multiprocess test case", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "8608a5a5de840b23c375987f1ee0e2683f0a5131", "submitter": { "id": 1685, "url": "http://patches.dpdk.org/api/people/1685/?format=api", "name": "Huang, ZhiminX", "email": "zhiminx.huang@intel.com" }, "delegate": null, "mbox": "http://patches.dpdk.org/project/dts/patch/20220420152256.23175-1-zhiminx.huang@intel.com/mbox/", "series": [ { "id": 22563, "url": "http://patches.dpdk.org/api/series/22563/?format=api", "web_url": "http://patches.dpdk.org/project/dts/list/?series=22563", "date": "2022-04-20T15:22:56", "name": "[V2] test_plans/multiprocess_iavf_test_plan:add vf multiprocess test case", "version": 2, "mbox": "http://patches.dpdk.org/series/22563/mbox/" } ], "comments": "http://patches.dpdk.org/api/patches/109894/comments/", "check": "warning", "checks": "http://patches.dpdk.org/api/patches/109894/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<dts-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1FAC1A00BE;\n\tWed, 20 Apr 2022 08:57:24 +0200 (CEST)", "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 183C8410FB;\n\tWed, 20 Apr 2022 08:57:24 +0200 (CEST)", "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by mails.dpdk.org (Postfix) with ESMTP id CEC554068E\n for <dts@dpdk.org>; Wed, 20 Apr 2022 08:57:21 +0200 (CEST)", "from orsmga008.jf.intel.com ([10.7.209.65])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Apr 2022 23:57:20 -0700", "from unknown (HELO localhost.localdomain) ([10.239.251.93])\n by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Apr 2022 23:57:18 -0700" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1650437842; x=1681973842;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=Go77zjaltaHio6iN2MEKcxmlS80pDhHhFGer2XexYZc=;\n b=V04fOCRBnrngyakEDaAF4Dqzqt/MT5s3gCG9Me9x2K6y6t92F7UVSHdB\n rsBq4YIeeBFCEdPhKCTD6ZpdHmIiqpSNXGS/Xl4iyY7kSDvBlvYTCQcsG\n uapH0O/JaZyPWrKdwjlMqTtlr7t7DFpzQmYEYBtf/J0BoWjVLfBKoR8pZ\n 3N5NT+/MrLey0LNvpGA2+hytu6p9i3ny2vu6LPRYmyKJTq7zfiCN2b08Q\n cFIiJMT1YqbRR7QfVQCEktXMJNQlayVOz86R9oi5SrM5ZgZU2vnvmKC+5\n 3dsVgubkIuPZSaikBAiwVWctkFpfKXllyGpcOccXL1UVURIbRDK5e9PL6 w==;", "X-IronPort-AV": [ "E=McAfee;i=\"6400,9594,10322\"; a=\"243888454\"", "E=Sophos;i=\"5.90,274,1643702400\"; d=\"scan'208\";a=\"243888454\"", "E=Sophos;i=\"5.90,274,1643702400\"; d=\"scan'208\";a=\"576452504\"" ], "From": "Zhimin Huang <zhiminx.huang@intel.com>", "To": "dts@dpdk.org", "Cc": "Zhimin Huang <zhiminx.huang@intel.com>", "Subject": "[dts][PATCH V2] test_plans/multiprocess_iavf_test_plan:add vf\n multiprocess test case", "Date": "Wed, 20 Apr 2022 23:22:56 +0800", "Message-Id": "<20220420152256.23175-1-zhiminx.huang@intel.com>", "X-Mailer": "git-send-email 2.17.1", "MIME-Version": "1.0", "Content-Type": "text/plain; charset=UTF-8", "Content-Transfer-Encoding": "8bit", "X-BeenThere": "dts@dpdk.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "test suite reviews and discussions <dts.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dts/>", "List-Post": "<mailto:dts@dpdk.org>", "List-Help": "<mailto:dts-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>", "Errors-To": "dts-bounces@dpdk.org" }, "content": "we not have iavf multiprocess cases in dts.\nadd new vf multiprocess test suite and 14 new test cases.\n\nSigned-off-by: Zhimin Huang <zhiminx.huang@intel.com>\n---\n test_plans/index.rst | 1 +\n test_plans/multiprocess_iavf_test_plan.rst | 948 +++++++++++++++++++++\n 2 files changed, 949 insertions(+)\n create mode 100644 test_plans/multiprocess_iavf_test_plan.rst", "diff": "diff --git a/test_plans/index.rst b/test_plans/index.rst\nindex f8118d14..7171e081 100644\n--- a/test_plans/index.rst\n+++ b/test_plans/index.rst\n@@ -272,6 +272,7 @@ The following are the test plans for the DPDK DTS automated test system.\n hello_world_test_plan\n keep_alive_test_plan\n multiprocess_test_plan\n+ multiprocess_iavf_test_plan\n rxtx_callbacks_test_plan\n skeleton_test_plan\n timer_test_plan\ndiff --git a/test_plans/multiprocess_iavf_test_plan.rst b/test_plans/multiprocess_iavf_test_plan.rst\nnew file mode 100644\nindex 00000000..8d597444\n--- /dev/null\n+++ b/test_plans/multiprocess_iavf_test_plan.rst\n@@ -0,0 +1,948 @@\n+.. Copyright (c) <2022>, Intel Corporation\n+ All rights reserved.\n+\n+ Redistribution and use in source and binary forms, with or without\n+ modification, are permitted provided that the following conditions\n+ are met:\n+\n+ - Redistributions of source code must retain the above copyright\n+ notice, this list of conditions and the following disclaimer.\n+\n+ - Redistributions in binary form must reproduce the above copyright\n+ notice, this list of conditions and the following disclaimer in\n+ the documentation and/or other materials provided with the\n+ distribution.\n+\n+ - Neither the name of Intel Corporation nor the names of its\n+ contributors may be used to endorse or promote products derived\n+ from this software without specific prior written permission.\n+\n+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n+ OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+\n+=======================================\n+Sample Application Tests: Multi-Process\n+=======================================\n+\n+Simple MP Application Test\n+==========================\n+\n+Description\n+-----------\n+\n+This test is a basic multi-process test for iavf which demonstrates the basics of sharing\n+information between DPDK processes. The same application binary is run\n+twice - once as a primary instance, and once as a secondary instance. Messages\n+are sent from primary to secondary and vice versa, demonstrating the processes\n+are sharing memory and can communicate using rte_ring structures.\n+\n+Prerequisites\n+-------------\n+\n+If using vfio the kernel must be >= 3.6+ and VT-d must be enabled in bios.When\n+using vfio, use the following commands to load the vfio driver and bind it\n+to the device under test::\n+\n+ modprobe vfio\n+ modprobe vfio-pci\n+ usertools/dpdk-devbind.py --bind=vfio-pci device_bus_id\n+\n+Assuming that a DPDK build has been set up and the multi-process sample\n+applications have been built.\n+\n+Test Case: Basic operation\n+--------------------------\n+\n+1. To run the application, start one copy of the simple_mp binary in one terminal,\n+ passing at least two cores in the coremask, as follows::\n+\n+ ./build/simple_mp -c 3 --proc-type=primary\n+\n+ The process should start successfully and display a command prompt as follows::\n+\n+ $ ./build/simple_mp -c 3 --proc-type=primary\n+ EAL: coremask set to 3\n+ EAL: Detected lcore 0 on socket 0\n+ EAL: Detected lcore 1 on socket 0\n+ EAL: Detected lcore 2 on socket 0\n+ EAL: Detected lcore 3 on socket 0\n+ ...\n+ EAL: Requesting 2 pages of size 1073741824\n+ EAL: Requesting 768 pages of size 2097152\n+ EAL: Ask a virtual area of 0x40000000 bytes\n+ EAL: Virtual area found at 0x7ff200000000 (size = 0x40000000)\n+ ...\n+ EAL: check igb_uio module\n+ EAL: check module finished\n+ EAL: Master core 0 is ready (tid=54e41820)\n+ EAL: Core 1 is ready (tid=53b32700)\n+ Starting core 1\n+\n+ simple_mp >\n+\n+2. To run the secondary process to communicate with the primary process, again run the\n+ same binary setting at least two cores in the coremask.::\n+\n+ ./build/simple_mp -c C --proc-type=secondary\n+\n+ Once the process type is specified correctly, the process starts up, displaying largely\n+ similar status messages to the primary instance as it initializes. Once again, you will be\n+ presented with a command prompt.\n+\n+3. Once both processes are running, messages can be sent between them using the send\n+ command. At any stage, either process can be terminated using the quit command.\n+\n+ Validate that this is working by sending a message between each process, both from\n+ primary to secondary and back again. This is shown below.\n+\n+ Transcript from the primary - text entered by used shown in ``{}``::\n+\n+ EAL: Master core 10 is ready (tid=b5f89820)\n+ EAL: Core 11 is ready (tid=84ffe700)\n+ Starting core 11\n+ simple_mp > {send hello_secondary}\n+ simple_mp > core 11: Received 'hello_primary'\n+ simple_mp > {quit}\n+\n+ Transcript from the secondary - text entered by the user is shown in ``{}``::\n+\n+ EAL: Master core 8 is ready (tid=864a3820)\n+ EAL: Core 9 is ready (tid=85995700)\n+ Starting core 9\n+ simple_mp > core 9: Received 'hello_secondary'\n+ simple_mp > {send hello_primary}\n+ simple_mp > {quit}\n+\n+Test Case: Load test of Simple MP application\n+---------------------------------------------\n+\n+1. Start up the sample application using the commands outlined in steps 1 & 2\n+ above.\n+\n+2. To load test, send a large number of strings (>5000), from the primary instance\n+ to the secondary instance, and then from the secondary instance to the primary.\n+ [NOTE: A good source of strings to use is /usr/share/dict/words which contains\n+ >400000 ascii strings on Fedora 14]\n+\n+Test Case: Test use of Auto for Application Startup\n+---------------------------------------------------\n+\n+1. Start the primary application as in Test 1, Step 1, except replace\n+ ``--proc-type=primary`` with ``--proc-type=auto``\n+\n+2. Validate that the application prints the line:\n+ ``EAL: Auto-detected process type: PRIMARY`` on startup.\n+\n+3. Start the secondary application as in Test 1, Step 2, except replace\n+ ``--proc-type=secondary`` with ``--proc-type=auto``.\n+\n+4. Validate that the application prints the line:\n+ ``EAL: Auto-detected process type: SECONDARY`` on startup.\n+\n+5. Verify that processes can communicate by sending strings, as in Test 1,\n+ Step 3.\n+\n+Test Case: Test running multiple processes without \"--proc-type\" flag\n+---------------------------------------------------------------------\n+\n+1. Start up the primary process as in Test 1, Step 1, except omit the\n+ ``--proc-type`` flag completely.\n+\n+2. Validate that process starts up as normal, and returns the ``simple_mp>``\n+ prompt.\n+\n+3. Start up the secondary process as in Test 1, Step 2, except omit the\n+ ``--proc-type`` flag.\n+\n+4. Verify that the process *fails* to start and prints an error message as\n+ below::\n+\n+ \"PANIC in rte_eal_config_create():\n+ Cannot create lock on '/path/to/.rte_config'. Is another primary process running?\"\n+\n+Symmetric MP Application Test\n+=============================\n+\n+Description\n+-----------\n+\n+This test is a multi-process test which demonstrates how multiple processes can\n+work together to perform packet I/O and packet processing in parallel, much as\n+other example application work by using multiple threads. In this example, each\n+process reads packets from all network ports being used - though from a different\n+RX queue in each case. Those packets are then forwarded by each process which\n+sends them out by writing them directly to a suitable TX queue.\n+\n+Prerequisites\n+-------------\n+\n+Assuming that an Intel DPDK build has been set up and the multi-process sample\n+applications have been built. It is also assumed that a traffic generator has\n+been configured and plugged in to the NIC ports 0 and 1.\n+\n+Test Methodology\n+----------------\n+\n+As with the simple_mp example, the first instance of the symmetric_mp process\n+must be run as the primary instance, though with a number of other application\n+specific parameters also provided after the EAL arguments. These additional\n+parameters are:\n+\n+* -p <portmask>, where portmask is a hexadecimal bitmask of what ports on the\n+ system are to be used. For example: -p 3 to use ports 0 and 1 only.\n+* --num-procs <N>, where N is the total number of symmetric_mp instances that\n+ will be run side-by-side to perform packet processing. This parameter is used to\n+ configure the appropriate number of receive queues on each network port.\n+* --proc-id <n>, where n is a numeric value in the range 0 <= n < N (number of\n+ processes, specified above). This identifies which symmetric_mp instance is being\n+ run, so that each process can read a unique receive queue on each network port.\n+\n+The secondary symmetric_mp instances must also have these parameters specified,\n+and the first two must be the same as those passed to the primary instance, or errors\n+result.\n+\n+For example, to run a set of four symmetric_mp instances, running on lcores 1-4, all\n+performing level-2 forwarding of packets between ports 0 and 1, the following\n+commands can be used (assuming run as root)::\n+\n+ ./build/symmetric_mp -c 2 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=0\n+ ./build/symmetric_mp -c 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=1\n+ ./build/symmetric_mp -c 8 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=2\n+ ./build/symmetric_mp -c 10 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=3\n+\n+To run only 1 or 2 instances, the above parameters to the 1 or 2 instances being\n+run should remain the same, except for the ``num-procs`` value, which should be\n+adjusted appropriately.\n+\n+Test Case: Function Tests\n+-------------------------\n+start 2 symmetric_mp process, send some packets, the number of packets is a random value between 20 and 256.\n+summarize all received packets and check whether it is bigger than or equal to the number of sent packets\n+\n+1. start 2 process::\n+\n+ /dpdk-symmetric_mp -l 1 -n 4 --proc-type=auto -a 0000:05:01.0 -a 0000:05:01.1 -- -p 0x3 --num-procs=2 --proc-id=0\n+ /dpdk-symmetric_mp -l 2 -n 4 --proc-type=auto -a 0000:05:01.0 -a 0000:05:01.1 -- -p 0x3 --num-procs=2 --proc-id=1\n+\n+2. send some packets,the number of packets is a random value between 20 and 256, packet type including IPV6/4,TCP/UDP,\n+ refer to Random_Packet\n+\n+3. stop all process and check output::\n+\n+ the number of received packets for each process should bigger than 0.\n+ summarize all received packets for all process should bigger than or equal to the number of sent packets\n+\n+\n+Client Server Multiprocess Tests\n+================================\n+\n+Description\n+-----------\n+\n+The client-server sample application demonstrates the ability of Intel� DPDK\n+to use multiple processes in which a server process performs packet I/O and one\n+or multiple client processes perform packet processing. The server process\n+controls load balancing on the traffic received from a number of input ports to\n+a user-specified number of clients. The client processes forward the received\n+traffic, outputting the packets directly by writing them to the TX rings of the\n+outgoing ports.\n+\n+Prerequisites\n+-------------\n+\n+Assuming that an Intel� DPDK build has been set up and the multi-process\n+sample application has been built.\n+Also assuming a traffic generator is connected to the ports \"0\" and \"1\".\n+\n+It is important to run the server application before the client application,\n+as the server application manages both the NIC ports with packet transmission\n+and reception, as well as shared memory areas and client queues.\n+\n+Run the Server Application:\n+\n+- Provide the core mask on which the server process is to run using -c, e.g. -c 3 (bitmask number).\n+- Set the number of ports to be engaged using -p, e.g. -p 3 refers to ports 0 & 1.\n+- Define the maximum number of clients using -n, e.g. -n 8.\n+\n+The command line below is an example on how to start the server process on\n+logical core 2 to handle a maximum of 8 client processes configured to\n+run on socket 0 to handle traffic from NIC ports 0 and 1::\n+\n+ root@host:mp_server# ./build/mp_server -c 2 -- -p 3 -n 8\n+\n+NOTE: If an additional second core is given in the coremask to the server process\n+that second core will be used to print statistics. When benchmarking, only a\n+single lcore is needed for the server process\n+\n+Run the Client application:\n+\n+- In another terminal run the client application.\n+- Give each client a distinct core mask with -c.\n+- Give each client a unique client-id with -n.\n+\n+An example commands to run 8 client processes is as follows::\n+\n+ root@host:mp_client# ./build/mp_client -c 40 --proc-type=secondary -- -n 0 &\n+ root@host:mp_client# ./build/mp_client -c 100 --proc-type=secondary -- -n 1 &\n+ root@host:mp_client# ./build/mp_client -c 400 --proc-type=secondary -- -n 2 &\n+ root@host:mp_client# ./build/mp_client -c 1000 --proc-type=secondary -- -n 3 &\n+ root@host:mp_client# ./build/mp_client -c 4000 --proc-type=secondary -- -n 4 &\n+ root@host:mp_client# ./build/mp_client -c 10000 --proc-type=secondary -- -n 5 &\n+ root@host:mp_client# ./build/mp_client -c 40000 --proc-type=secondary -- -n 6 &\n+ root@host:mp_client# ./build/mp_client -c 100000 --proc-type=secondary -- -n 7 &\n+\n+Test Case: Function Tests\n+-------------------------\n+start server process and 2 client process, send some packets, the number of packets is a random value between 20 and 256.\n+summarize all received packets and check whether it is bigger than or equal to the number of sent packets\n+\n+1. start server process::\n+\n+ ./dpdk-mp_server -l 1,2 -n 4 -a 0000:05:01.0 -a 0000:05:01.1 -- -p 0x3 -n 2\n+\n+2. start 2 client process::\n+\n+ ./dpdk-mp_client -l 3 -n 4 -a 0000:05:01.0 -a 0000:05:01.1 --proc-type=auto -- -n 0\n+ ./dpdk-mp_client -l 4 -n 4 -a 0000:05:01.0 -a 0000:05:01.1 --proc-type=auto -- -n 1\n+\n+3. send some packets,the number of packets is a random value between 20 and 256, packet type include IPV6/4,TCP/UDP,\n+ refer to Random_Packet\n+\n+4. stop all process and check output::\n+\n+ the number of received packets for each client should bigger than 0.\n+ summarize all received packets for all clients should bigger than or equal to the number of sent packets\n+\n+Testpmd Multi-Process Test\n+==========================\n+\n+Description\n+-----------\n+\n+This is a multi-process test for Testpmd application, which demonstrates how multiple processes can\n+work together to perform packet in parallel.\n+\n+Test Methodology\n+----------------\n+Testpmd support to specify total number of processes and current process ID.\n+Each process owns subset of Rx and Tx queues\n+The following are the command-line options for testpmd multi-process support::\n+\n+ primary process:\n+ ./dpdk-testpmd -a xxx --proc-type=auto -l 0-1 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=0\n+\n+ secondary process:\n+ ./dpdk-testpmd -a xxx --proc-type=auto -l 2-3 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=1\n+\n+ --num-procs:\n+ The number of processes which will be used\n+ --proc-id:\n+ The ID of the current process (ID < num-procs),ID should be different in primary process and secondary\n+ process, which starts from ‘0’.\n+\n+All queues are allocated to different processes based on proc_num and proc_id\n+Calculation rule for queue::\n+\n+ start(queue start id) = proc_id * nb_q / num_procs\n+ end(queue end id) = start + nb_q / num_procs\n+\n+For example, if testpmd is configured to have 4 Tx and Rx queues, queues 0 and 1 will be used by the primary process and\n+queues 2 and 3 will be used by the secondary process.\n+\n+Note::\n+\n+ nb_q is the number of queue\n+ The number of queues should be a multiple of the number of processes. If not, redundant queues will exist after\n+ queues are allocated to processes. If RSS is enabled, packet loss occurs when traffic is sent to all processes at the\n+ same time.Some traffic goes to redundant queues and cannot be forwarded.\n+ All the dev ops is supported in primary process. While secondary process is not permitted to allocate or release\n+ shared memory.\n+ When secondary is running, port in primary is not permitted to be stopped.\n+ Reconfigure operation is only valid in primary.\n+ Stats is supported, stats will not change when one quits and starts, as they share the same buffer to store the stats.\n+ Flow rules are maintained in process level:\n+ primary and secondary has its own flow list (but one flow list in HW). The two can see all the queues, so setting\n+ the flow rules for the other is OK. But in the testpmd primary process receiving or transmitting packets from the\n+ queue allocated for secondary process is not permitted, and same for secondary process\n+\n+ Flow API and RSS are supported\n+\n+Prerequisites\n+-------------\n+\n+1. Hardware:\n+ columbiaville_25g/columbiaville_100g\n+\n+2. Software:\n+ DPDK: http://dpdk.org/git/dpdk\n+ scapy: http://www.secdev.org/projects/scapy/\n+\n+3. Copy specific ice package to /lib/firmware/intel/ice/ddp/ice.pkg\n+\n+4. Generate 2 VFs on PF and set mac address for vf0::\n+\n+ echo 2 > /sys/bus/pci/devices/0000:af:00.0/sriov_numvfs\n+ ip link set eth7 vf 0 mac 00:11:22:33:44:55\n+\n+ 0000:05:00.0 generate 0000:05:01.0 and 0000:05:01.1\n+\n+4. Bind VFs to dpdk driver::\n+\n+ ./usertools/dpdk-devbind.py -b vfio-pci 0000:05:01.0 0000:05:01.1\n+\n+Default parameters\n+------------------\n+\n+ MAC::\n+\n+ [Dest MAC]: 00:11:22:33:44:55\n+\n+ IPv4::\n+\n+ [Source IP]: 192.168.0.20\n+ [Dest IP]: 192.168.0.21\n+ [IP protocol]: 255\n+ [TTL]: 2\n+ [DSCP]: 4\n+\n+ TCP::\n+\n+ [Source Port]: 22\n+ [Dest Port]: 23\n+\n+ Random_Packet::\n+\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.1', hlim=64)/TCP(sport=65535, dport=65535, flags=0)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IP(frag=0, src='192.168.0.1', tos=0, dst='192.168.1.2', version=4, ttl=64, id=1)/UDP(sport=65535, dport=65535)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.3', hlim=64)/UDP(sport=65535, dport=65535)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.4', hlim=64)/UDP(sport=65535, dport=65535)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.5', hlim=64)/TCP(sport=65535, dport=65535, flags=0)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IP(frag=0, src='192.168.0.1', tos=0, dst='192.168.1.15', version=4, ttl=64, id=1)/UDP(sport=65535, dport=65535)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.16', hlim=64)/TCP(sport=65535, dport=65535, flags=0)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.27', hlim=64)/TCP(sport=65535, dport=65535, flags=0)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IP(frag=0, src='192.168.0.1', tos=0, dst='192.168.1.28', version=4, ttl=64, id=1)/TCP(sport=65535, dport=65535, flags=0)/Raw(),\n+ Ether(dst='00:11:22:33:44:55', src='00:00:20:00:00:00')/IPv6(src='::192.168.0.1', version=6, tc=0, fl=0, dst='::192.168.1.30', hlim=64)/TCP(sport=65535, dport=65535, flags=0)/Raw()\n+\n+Test Case: multiprocess proc_type random packet\n+===============================================\n+\n+Subcase 1: proc_type_auto_4_process\n+-----------------------------------\n+1. Launch the app ``testpmd``, start 4 process with rxq/txq set as 16 (proc_id:0~3, queue id:0~15) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=4 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=4 --proc-id=1\n+ ./dpdk-testpmd -l 5,6 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=4 --proc-id=2\n+ ./dpdk-testpmd -l 7,8 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=4 --proc-id=3\n+\n+2. Send 20 random packets::\n+\n+ packets generated by script, packet type including 'TCP', 'UDP', 'IPv6_TCP', 'IPv6_UDP', like as: Random_Packet\n+\n+3. Check whether each process receives 5 packets with the corresponding queue::\n+\n+ process 0 should receive 5 packets with queue 0~3\n+ process 1 should receive 5 packets with queue 4~7\n+ process 2 should receive 5 packets with queue 8~11\n+ process 3 should receive 5 packets with queue 12~15\n+\n+4. Check the statistics is correctly, the total number of packets received is 20\n+\n+Subcase 2: proc_type_primary_secondary_2_process\n+------------------------------------------------\n+1. Launch the app ``testpmd``, start 2 process with rxq/txq set as 4 (proc_id:0~1, queue id:0~3) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=secondary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=1\n+\n+2. Send 20 random packets::\n+\n+ packets generated by script, packet type including 'TCP', 'TCP', 'IPv6_TCP', 'IPv6_UDP', such as: Random_Packet\n+\n+3. Check whether each process receives 10 packets with the corresponding queue::\n+\n+ process 0 should receive 10 packets with queue 0~1\n+ process 1 should receive 10 packets with queue 2~3\n+\n+\n+4. Check the statistics is correctly, the total number of packets received is 20\n+\n+Test Case: multiprocess proc_type specify packet\n+================================================\n+\n+Subcase 1: proc_type_auto_2_process\n+-----------------------------------\n+1. Launch the app ``testpmd``, start 2 process with rxq/txq set as 8 (proc_id:0~1, queue id:0~7) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=1\n+\n+2. Create rule to set queue as one of each process queues::\n+\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 / end actions queue index 0 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.1.20 / end actions queue index 1 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.2.20 / end actions queue index 2 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.3.20 / end actions queue index 3 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.4.20 / end actions queue index 4 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.5.20 / end actions queue index 5 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.6.20 / end actions queue index 6 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.7.20 / end actions queue index 7 / end\n+\n+3. Send 1 matched packet for each rule::\n+\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.0.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.1.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.2.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.3.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.4.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.5.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.6.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.7.20\")/(\"X\"*46)\n+\n+4. Check whether each process receives 4 packets with the corresponding queue::\n+\n+ process 0 should receive 4 packets with queue 0~3\n+ process 1 should receive 4 packets with queue 4~7\n+\n+5. Check the statistics is correctly, the total number of packets received is 8\n+\n+Subcase 2: proc_type_primary_secondary_3_process\n+------------------------------------------------\n+1. Launch the app ``testpmd``, start 3 process with rxq/txq set as 6 (proc_id:0~2, queue id:0~5) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=6 --txq=6 --num-procs=3 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=6 --txq=6 --num-procs=3 --proc-id=1\n+ ./dpdk-testpmd -l 5,6 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=6 --txq=6 --num-procs=3 --proc-id=2\n+\n+2. Create rule to set queue as one of each process queues::\n+\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 / end actions queue index 0 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.1.20 / end actions queue index 1 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.2.20 / end actions queue index 2 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.3.20 / end actions queue index 3 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.4.20 / end actions queue index 4 / end\n+ flow create 0 ingress pattern eth / ipv4 src is 192.168.5.20 / end actions queue index 5 / end\n+\n+3. Send 1 matched packet for each rule::\n+\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.0.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.1.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.2.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.3.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.4.20\")/(\"X\"*46)\n+ Ether(dst=\"00:11:22:33:44:55\")/IP(src=\"192.168.5.20\")/(\"X\"*46)\n+\n+4. Check whether each process receives 2 packets with the corresponding queue::\n+\n+ process 0 should receive 2 packets with queue 0~1\n+ process 1 should receive 2 packets with queue 2~3\n+ process 2 should receive 2 packets with queue 4~5\n+\n+5. Check the statistics is correctly, the total number of packets received is 6\n+\n+Test Case: test_multiprocess_with_fdir_rule\n+===========================================\n+\n+Launch the app ``testpmd``, start 2 process with rxq/txq set as 16 (proc_id:0~1, queue id:0~15) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=1\n+\n+Subcase 1: mac_ipv4_pay_queue_index\n+-----------------------------------\n+\n+1. Create rule::\n+\n+ flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions queue index 6 / mark id 4 / end\n+\n+2. Send matched packets, check the packets is distributed to queue 6 with FDIR matched ID=0x4.\n+ Send unmatched packets, check the packets are distributed by RSS without FDIR matched ID\n+\n+3. Verify rules can be listed and destroyed::\n+\n+ testpmd> flow list 0\n+\n+ check the rule listed.\n+ destroy the rule::\n+\n+ testpmd> flow destroy 0 rule 0\n+\n+4. Verify matched packet is distributed by RSS without FDIR matched ID.\n+ check there is no rule listed.\n+\n+Subcase 2: mac_ipv4_pay_rss_queues\n+----------------------------------\n+1. Create rule::\n+\n+ flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 10 11 end / mark / end\n+\n+2. Send matched packets, check the packets is distributed to queue 10 or 11.\n+ Send unmatched packets, check the packets are distributed by RSS\n+\n+3. Repeat step 3 of subcase 1\n+\n+4. Verify matched packet is distributed by RSS.\n+ check there is no rule listed.\n+\n+Subcase 3: mac_ipv4_pay_drop\n+----------------------------\n+\n+1. Create rule::\n+\n+ flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions drop / mark / end\n+\n+2. Send matched packets, check the packets are dropped.\n+ Send unmatched packets, check the packets are not dropped\n+\n+3. Repeat step 3 of subcase 1\n+\n+4. Verify matched packets are not dropped.\n+ check there is no rule listed.\n+\n+Subcase 4: mac_ipv4_pay_mark_rss\n+--------------------------------\n+1. Create rule::\n+\n+ flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions mark / rss / end\n+\n+2. Send matched packets, check the packets are distributed by RSS with FDIR matched ID=0x0.\n+ Send unmatched packets, check the packets are distributed by RSS without FDIR matched ID\n+\n+3. Repeat step 3 of subcase 1\n+\n+4. Verify matched packets are distributed to the same queue without FDIR matched ID.\n+ check there is no rule listed.\n+\n+Note: step2 and step4 need to check whether all received packets of each process are distributed by RSS\n+\n+Test Case: test_multiprocess_with_rss_toeplitz\n+==============================================\n+Launch the app ``testpmd``,start 2 process with queue num set as 16 (proc_id: 0~1, queue id: 0~15) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=1\n+\n+all the test cases run the same test steps as below::\n+\n+ 1. validate rule.\n+ 2. create rule and list rule.\n+ 3. send a basic hit pattern packet,record the hash value,\n+ check the packet is distributed to queues by RSS.\n+ 4. send hit pattern packet with changed input set in the rule.\n+ check the received packet have different hash value with basic packet.\n+ check the packet is distributed to queues by rss.\n+ 5. send hit pattern packet with changed input set not in the rule.\n+ check the received packet have same hash value with the basic packet.\n+ check the packet is distributed to queues by rss.\n+ 6. destroy the rule and list rule.\n+ 7. send same packet with step 3.\n+ check the received packets have no hash value, and distributed to queue 0.\n+\n+ Note: step3, step4 and step5 need to check whether all received packets of each process are distributed by RSS\n+\n+basic hit pattern packets are the same in this test case.\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase 1: mac_ipv4_tcp_l2_src\n+------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types eth l2-src-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E1\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.3\", src=\"192.168.0.5\")/TCP(sport=25,dport=99)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l2_dst\n+----------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types eth l2-dst-only end key_len 0 queues end / end\n+\n+2. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.3\", src=\"192.168.0.5\")/TCP(sport=25,dport=99)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l2src_l2dst\n+---------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types eth end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E1\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.3\", src=\"192.168.0.5\")/TCP(sport=25,dport=99)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3_src\n+----------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3_dst\n+----------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=32,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3src_l4src\n+---------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l4-src-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3src_l4dst\n+---------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-src-only l4-dst-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3dst_l4src\n+---------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only l4-src-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l3dst_l4dst\n+---------------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l3-dst-only l4-dst-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l4_src\n+----------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l4-src-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_l4_dst\n+----------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp l4-dst-only end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.1.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Subcase: mac_ipv4_tcp_ipv4\n+--------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4 end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=33)/(\"X\"*480)],iface=\"enp134s0f0\")\n+\n+Subcase: mac_ipv4_tcp_all\n+-------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=33)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=32,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.1.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.1.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+3. hit pattern/not defined input set:\n+ipv4-tcp packets::\n+\n+ sendp([Ether(src=\"00:11:22:33:44:55\", dst=\"68:05:CA:BB:27:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Test Case: test_multiprocess_with_rss_symmetric\n+===============================================\n+Launch the app ``testpmd``, start 2 process with queue num set as 16(proc_id: 0~1, queue id: 0~15) with the following arguments::\n+\n+ ./dpdk-testpmd -l 1,2 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 -n 4 -a 0000:05:01.0 --proc-type=auto --log-level=ice,7 -- -i --rxq=16 --txq=16 --num-procs=2 --proc-id=1\n+\n+test steps as below::\n+\n+ 1. validate and create rule.\n+ 2. set \"port config all rss all\".\n+ 3. send hit pattern packets with switched value of input set in the rule.\n+ check the received packets have the same hash value.\n+ check all the packets are distributed to queues by rss\n+ 4. destroy the rule and list rule.\n+ 5. send same packets with step 3\n+ check the received packets have no hash value, or have different hash value.\n+\n+ Note: step3 needs to check whether all received packets of each process are distributed by RSS\n+\n+Subcase: mac_ipv4_symmetric\n+---------------------------\n+1. create rss rule::\n+\n+ flow create 0 ingress pattern eth / ipv4 / end actions rss func symmetric_toeplitz types ipv4 end key_len 0 queues end / end\n+\n+2. hit pattern/defined input set:\n+ipv4-nonfrag packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.2\", src=\"192.168.0.1\")/(\"X\"*480)],iface=\"ens786f0\")\n+\n+ipv4-frag packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\",frag=6)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.2\", src=\"192.168.0.1\",frag=6)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+ipv4-tcp packets::\n+\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.1\", src=\"192.168.0.2\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+ sendp([Ether(dst=\"00:11:22:33:44:55\", src=\"68:05:CA:BB:26:E0\")/IP(dst=\"192.168.0.2\", src=\"192.168.0.1\")/TCP(sport=22,dport=23)/(\"X\"*480)],iface=\"ens786f0\")\n+\n+Test Case: test_multiprocess_auto_process_type_detected\n+=======================================================\n+1. start 2 process with queue num set as 8 (proc_id:0~1,queue id:0~7)::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=auto -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=1\n+\n+2. check the ouput of each process::\n+\n+ process 1 output contains 'Auto-detected process type: PRIMARY'\n+ process 2 output contains 'Auto-detected process type: SECONDARY'\n+\n+Test Case: test_multiprocess_negative_2_primary_process\n+=======================================================\n+1. start 2 process with queue num set as 4 (proc_id:0~1,queue id:0~3)::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=4 --txq=4 --num-procs=2 --proc-id=1\n+\n+2. check the ouput of each process::\n+\n+ process 1 launches successfully\n+ process 2 launches failed and output contains 'Is another primary process running?'\n+\n+Test Case: test_multiprocess_negative_exceed_process_num\n+========================================================\n+1. start 3 process exceed the specifed num 2::\n+\n+ ./dpdk-testpmd -l 1,2 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=0\n+ ./dpdk-testpmd -l 3,4 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=1\n+ ./dpdk-testpmd -l 5,6 --proc-type=primary -a 0000:05:01.0 --log-level=ice,7 -- -i --rxq=8 --txq=8 --num-procs=2 --proc-id=2\n+\n+2. check the ouput of each process::\n+\n+ the first and second processes should be launched successfully\n+ the third process should be launched failed and output should contain the following string:\n+ 'multi-process option proc-id(2) should be less than num-procs(2)'\n", "prefixes": [ "V2" ] }{ "id": 109894, "url": "