get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83940/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83940,
    "url": "http://patches.dpdk.org/api/patches/83940/?format=api",
    "web_url": "http://patches.dpdk.org/project/dts/patch/20201111062701.36217-4-yingyax.han@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111062701.36217-4-yingyax.han@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20201111062701.36217-4-yingyax.han@intel.com",
    "date": "2020-11-11T06:27:01",
    "name": "[V1,3/3] tests: add vf single core test script",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "129fb4de160a27ac8949160b60320aabfd6d6723",
    "submitter": {
        "id": 1645,
        "url": "http://patches.dpdk.org/api/people/1645/?format=api",
        "name": "Yingya Han",
        "email": "yingyax.han@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dts/patch/20201111062701.36217-4-yingyax.han@intel.com/mbox/",
    "series": [
        {
            "id": 13796,
            "url": "http://patches.dpdk.org/api/series/13796/?format=api",
            "web_url": "http://patches.dpdk.org/project/dts/list/?series=13796",
            "date": "2020-11-11T06:26:58",
            "name": "add vf single core perf test suite",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/13796/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83940/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/83940/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B238FA09D3;\n\tWed, 11 Nov 2020 07:31:25 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id ADDBC567C;\n\tWed, 11 Nov 2020 07:31:24 +0100 (CET)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by dpdk.org (Postfix) with ESMTP id 2CE7E2BAB\n for <dts@dpdk.org>; Wed, 11 Nov 2020 07:31:23 +0100 (CET)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 10 Nov 2020 22:31:22 -0800",
            "from dpdk-lijuan-purley2.sh.intel.com ([10.67.119.168])\n by fmsmga008.fm.intel.com with ESMTP; 10 Nov 2020 22:31:21 -0800"
        ],
        "IronPort-SDR": [
            "\n 3CoIZN/kSltW39g296WeIIv9L8a72X8VrTukwL+71BQXGu2HOntJhN6/sKCdhEs722wMmXBv6t\n Utn3XQmmjI4A==",
            "\n 0bhBrc4CNoyWJAlHRz1/JHnBkciiHCZvQfzeyeXw5UadvzUlJ9YgTIyUVr32d2qJlSicP9/NcZ\n s93hLbaUqTHQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9801\"; a=\"149951486\"",
            "E=Sophos;i=\"5.77,468,1596524400\"; d=\"scan'208\";a=\"149951486\"",
            "E=Sophos;i=\"5.77,468,1596524400\"; d=\"scan'208\";a=\"308345750\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "hanyingya <yingyax.han@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "hanyingya <yingyax.han@intel.com>",
        "Date": "Wed, 11 Nov 2020 14:27:01 +0800",
        "Message-Id": "<20201111062701.36217-4-yingyax.han@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20201111062701.36217-1-yingyax.han@intel.com>",
        "References": "<20201111062701.36217-1-yingyax.han@intel.com>",
        "Subject": "[dts] [PATCH V1 3/3]tests: add vf single core test script",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org",
        "Sender": "\"dts\" <dts-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: hanyingya <yingyax.han@intel.com>\n---\n tests/TestSuite_vf_single_core_perf.py | 399 +++++++++++++++++++++++++\n 1 file changed, 399 insertions(+)\n create mode 100644 tests/TestSuite_vf_single_core_perf.py",
    "diff": "diff --git a/tests/TestSuite_vf_single_core_perf.py b/tests/TestSuite_vf_single_core_perf.py\nnew file mode 100644\nindex 00000000..57ab7240\n--- /dev/null\n+++ b/tests/TestSuite_vf_single_core_perf.py\n@@ -0,0 +1,399 @@\n+#\n+# Copyright(c) 2020 Intel Corporation. All rights reserved.\n+# All rights reserved.\n+#\n+# Redistribution and use in source and binary forms, with or without\n+# modification, are permitted provided that the following conditions\n+# are met:\n+#\n+#   * Redistributions of source code must retain the above copyright\n+#     notice, this list of conditions and the following disclaimer.\n+#   * Redistributions in binary form must reproduce the above copyright\n+#     notice, this list of conditions and the following disclaimer in\n+#     the documentation and/or other materials provided with the\n+#     distribution.\n+#   * Neither the name of Intel Corporation nor the names of its\n+#     contributors may be used to endorse or promote products derived\n+#     from this software without specific prior written permission.\n+#\n+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+\"\"\"\n+DPDK Test suite.\n+\"\"\"\n+\n+import utils\n+import json\n+import os\n+from test_case import TestCase\n+from settings import HEADER_SIZE, UPDATE_EXPECTED, load_global_setting\n+from pmd_output import PmdOutput\n+from copy import deepcopy\n+import rst\n+from pktgen import PacketGeneratorHelper\n+\n+\n+class TestVfSingleCorePerf(TestCase):\n+\n+    def set_up_all(self):\n+        \"\"\"\n+        Run at the start of each test suite.\n+        PMD prerequisites.\n+        \"\"\"\n+        # Based on h/w type, choose how many ports to use\n+        self.dut_ports = self.dut.get_ports()\n+        self.verify(len(self.dut_ports) >= 1, \"At least 1 port is required to test\")\n+        self.socket = self.dut.get_numa_id(self.dut_ports[0])\n+        self.vfs_mac = [\"00:12:34:56:78:0%d\" % (i + 1) for i in self.dut_ports]\n+        self.pmdout = PmdOutput(self.dut)\n+\n+        # set vf assign method and vf driver\n+        self.vf_driver = self.get_suite_cfg()['vf_driver']\n+        if self.vf_driver is None:\n+            self.vf_driver = self.drivername\n+\n+        # get dts output path\n+        if self.logger.log_path.startswith(os.sep):\n+            self.output_path = self.logger.log_path\n+        else:\n+            cur_path = os.path.dirname(\n+                os.path.dirname(os.path.realpath(__file__)))\n+            self.output_path = os.sep.join([cur_path, self.logger.log_path])\n+        # create an instance to set stream field setting\n+        self.pktgen_helper = PacketGeneratorHelper()\n+\n+        # determine if to save test result as a separated file\n+        self.save_result_flag = True\n+\n+    def set_up(self):\n+        \"\"\"\n+        Run before each test case.\n+        It's more convenient to load suite configuration here than\n+        set_up_all in debug mode.\n+        \"\"\"\n+        # test parameters include: frames size, descriptor numbers\n+        self.test_parameters = self.get_suite_cfg()['test_parameters']\n+\n+        # traffic duraion in second\n+        self.test_duration = self.get_suite_cfg()['test_duration']\n+        self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.nic]\n+\n+        # Accepted tolerance in Mpps\n+        self.gap = self.get_suite_cfg()['accepted_tolerance']\n+\n+        # header to print test result table\n+        self.table_header = ['Fwd_core', 'Frame Size', 'TXD/RXD', 'Throughput', 'Rate',\n+                             'Expected Throughput', 'Throughput Difference']\n+        self.test_result = {'header': [], 'data': []}\n+\n+        # initilize throughput attribution\n+        self.throughput = {}\n+\n+    def setup_vf_env(self):\n+        \"\"\"\n+        require enough PF ports,using kernel or dpdk driver, create 1 VF from each PF.\n+        \"\"\"\n+        self.used_dut_port = [port for port in self.dut_ports]\n+        self.sriov_vfs_port = []\n+        for i in self.dut_ports:\n+            host_driver = self.dut.ports_info[i]['port'].default_driver\n+            self.dut.generate_sriov_vfs_by_port(self.used_dut_port[i], 1, driver=host_driver)\n+            sriov_vfs_port = self.dut.ports_info[self.used_dut_port[i]]['vfs_port']\n+            self.sriov_vfs_port.append(sriov_vfs_port)\n+\n+        # set vf mac address.\n+        for i in self.dut_ports:\n+            pf_intf = self.dut.ports_info[i]['port'].get_interface_name()\n+            self.dut.send_expect(\"ip link set %s vf 0 mac %s\" % (pf_intf, self.vfs_mac[i]), \"#\")\n+\n+        # bind vf to vf driver\n+        try:\n+            for i in self.dut_ports:\n+                for port in self.sriov_vfs_port[i]:\n+                    port.bind_driver(self.vf_driver)\n+        except Exception as e:\n+            self.destroy_vf_env()\n+            raise Exception(e)\n+\n+    def destroy_vf_env(self):\n+        \"\"\"\n+        destroy the setup VFs\n+        \"\"\"\n+        for i in self.dut_ports:\n+            self.dut.destroy_sriov_vfs_by_port(self.dut_ports[i])\n+\n+    def flows(self):\n+        \"\"\"\n+        Return a list of packets that implements the flows described in l3fwd.\n+        \"\"\"\n+        return [\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.1.0\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.1.1\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.0.0\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.0.1\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.3.0\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.3.1\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.2.0\")',\n+            'IP(src=\"1.2.3.4\",dst=\"192.18.2.1\")']\n+\n+    def create_pacap_file(self, frame_size, port_num):\n+        \"\"\"\n+        Prepare traffic flow\n+        \"\"\"\n+        payload_size = frame_size - HEADER_SIZE['ip'] - HEADER_SIZE['eth']\n+        pcaps = {}\n+        for _port in self.dut_ports:\n+            if 1 == port_num:\n+                flow = ['Ether(dst=\"%s\")/%s/(\"X\"*%d)' % (self.vfs_mac[_port], self.flows()[_port], payload_size)]\n+                pcap = os.sep.join([self.output_path, \"dst{0}.pcap\".format(_port)])\n+                self.tester.scapy_append('wrpcap(\"%s\", [%s])' % (pcap, ','.join(flow)))\n+                self.tester.scapy_execute()\n+                pcaps[_port] = []\n+                pcaps[_port].append(pcap)\n+            else:\n+                index = self.dut_ports[_port]\n+                cnt = 0\n+                for layer in self.flows()[_port * 2:(_port + 1) * 2]:\n+                    flow = ['Ether(dst=\"%s\")/%s/(\"X\"*%d)' % (self.vfs_mac[_port], layer, payload_size)]\n+                    pcap = os.sep.join([self.output_path, \"dst{0}_{1}.pcap\".format(index, cnt)])\n+                    self.tester.scapy_append('wrpcap(\"%s\", [%s])' % (pcap, ','.join(flow)))\n+                    self.tester.scapy_execute()\n+                    if index not in pcaps:\n+                        pcaps[index] = []\n+                    pcaps[index].append(pcap)\n+                    cnt += 1\n+        return pcaps\n+\n+    def prepare_stream(self, pcaps, port_num):\n+        \"\"\"\n+        create streams for ports,one port one stream\n+        \"\"\"\n+        tgen_input = []\n+        if 1 == port_num:\n+            txIntf = self.tester.get_local_port(self.dut_ports[0])\n+            rxIntf = txIntf\n+            for pcap in pcaps[0]:\n+                tgen_input.append((txIntf, rxIntf, pcap))\n+        else:\n+            for rxPort in range(port_num):\n+                if rxPort % port_num == 0 or rxPort ** 2 == port_num:\n+                    txIntf = self.tester.get_local_port(self.dut_ports[rxPort + 1])\n+                    port_id = self.dut_ports[rxPort + 1]\n+                else:\n+                    txIntf = self.tester.get_local_port(self.dut_ports[rxPort - 1])\n+                    port_id = self.dut_ports[rxPort - 1]\n+                rxIntf = self.tester.get_local_port(self.dut_ports[rxPort])\n+                for pcap in pcaps[port_id]:\n+                    tgen_input.append((txIntf, rxIntf, pcap))\n+        return tgen_input\n+\n+    def test_perf_vf_single_core(self):\n+        \"\"\"\n+        Run nic single core performance\n+        \"\"\"\n+        self.setup_vf_env()\n+        port_num = len(self.dut_ports)\n+        self.perf_test(port_num)\n+        self.handle_expected()\n+        self.handle_results()\n+\n+    def handle_expected(self):\n+        \"\"\"\n+        Update expected numbers to configurate file: conf/$suite_name.cfg\n+        \"\"\"\n+        if load_global_setting(UPDATE_EXPECTED) == \"yes\":\n+            for fwd_config in list(self.test_parameters.keys()):\n+                for frame_size in list(self.test_parameters[fwd_config].keys()):\n+                    for nb_desc in self.test_parameters[fwd_config][frame_size]:\n+                        self.expected_throughput[fwd_config][frame_size][nb_desc] = \\\n+                            round(self.throughput[fwd_config][frame_size][nb_desc], 3)\n+\n+    def perf_test(self, port_num):\n+        \"\"\"\n+        Single core Performance Benchmarking test\n+        \"\"\"\n+        # ports whitelist\n+        eal_para = \"\"\n+        for i in range(port_num):\n+            eal_para += \" -w \" + self.sriov_vfs_port[i][0].pci\n+        port_mask = utils.create_mask(self.dut_ports)\n+        # parameters for application/testpmd\n+        param = \" --portmask=%s\" % (port_mask)\n+\n+        for fwd_config in list(self.test_parameters.keys()):\n+            # the fwd_config just the config for fwd core\n+            # to start testpmd should add 1C to it\n+            core_config = '1S/%s' % fwd_config\n+            thread_num = int(fwd_config[fwd_config.find('/')+1: fwd_config.find('T')])\n+            core_list = self.dut.get_core_list(core_config, socket=self.socket)\n+            self.verify(len(core_list) >= thread_num, \"the Hyper-threading not open, please open it to test\")\n+\n+            # need add one more core for start testpmd\n+            core_list = [core_list[0]] + [str(int(i) + 1) for i in core_list]\n+\n+            self.logger.info(\"Executing Test Using cores: %s of config %s\" % (core_list, fwd_config))\n+\n+            nb_cores = thread_num\n+\n+            # fortville has to use 2 queues at least to get the best performance\n+            if self.nic in [\"fortville_25g\", \"fortville_spirit\"] or thread_num == 2:\n+                param += \" --rxq=2 --txq=2\"\n+            # columbiaville use one queue per port for best performance.\n+            elif self.nic in [\"columbiaville_100g\", \"columbiaville_25g\"]:\n+                param += \" --rxq=1 --txq=1\"\n+                # workaround for that testpmd can't forward packets in io forward mode\n+                param += \" --port-topology=loop\"\n+\n+            self.throughput[fwd_config] = dict()\n+            for frame_size in list(self.test_parameters[fwd_config].keys()):\n+                self.throughput[fwd_config][frame_size] = dict()\n+                pcaps = self.create_pacap_file(frame_size, port_num)\n+                tgenInput = self.prepare_stream(pcaps, port_num)\n+                for nb_desc in self.test_parameters[fwd_config][frame_size]:\n+                    self.logger.info(\"Test running at parameters: \" + \"framesize: {}, rxd/txd: {}\".format(frame_size, nb_desc))\n+                    parameter = param + \" --txd=%d --rxd=%d --nb-cores=%d\" % (nb_desc, nb_desc, nb_cores)\n+                    self.pmdout.start_testpmd(\n+                        core_list, parameter, eal_para, socket=self.socket)\n+                    self.dut.send_expect(\"set fwd mac\", \"testpmd> \", 15)\n+                    self.dut.send_expect(\"start\", \"testpmd> \", 15)\n+\n+                    vm_config = self.set_fields()\n+                    # clear streams before add new streams\n+                    self.tester.pktgen.clear_streams()\n+\n+                    # run packet generator\n+                    streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, vm_config, self.tester.pktgen)\n+                    # set traffic option\n+                    traffic_opt = {'duration': self.test_duration}\n+                    _, packets_received = self.tester.pktgen.measure_throughput(stream_ids=streams, options=traffic_opt)\n+                    self.verify(packets_received > 0, \"No traffic detected\")\n+                    throughput = packets_received / 1000000.0\n+                    self.throughput[fwd_config][frame_size][nb_desc] = throughput\n+\n+                    self.dut.send_expect(\"stop\", \"testpmd> \")\n+                    self.dut.send_expect(\"quit\", \"# \", 30)\n+\n+                    self.verify(throughput, \"No traffic detected, please check your configuration\")\n+                    self.logger.info(\"Trouthput of \" + \"framesize: {}, rxd/txd: {} is :{} Mpps\".format(\n+                            frame_size, nb_desc, throughput))\n+\n+        return self.throughput\n+\n+    def handle_results(self):\n+        \"\"\"\n+        results handled process:\n+        1, save to self.test_results\n+        2, create test results table\n+        3, save to json file for Open Lab\n+        \"\"\"\n+        # save test results to self.test_result\n+        header = self.table_header\n+        for fwd_config in list(self.test_parameters.keys()):\n+            ret_datas = {}\n+            for frame_size in list(self.test_parameters[fwd_config].keys()):\n+                wirespeed = self.wirespeed(self.nic, frame_size, len(self.dut_ports))\n+                ret_datas[frame_size] = {}\n+                for nb_desc in self.test_parameters[fwd_config][frame_size]:\n+                    ret_data = {}\n+                    ret_data[header[0]] = fwd_config\n+                    ret_data[header[1]] = frame_size\n+                    ret_data[header[2]] = nb_desc\n+                    ret_data[header[3]] = \"{:.3f} Mpps\".format(\n+                        self.throughput[fwd_config][frame_size][nb_desc])\n+                    ret_data[header[4]] = \"{:.3f}%\".format(\n+                        self.throughput[fwd_config][frame_size][nb_desc] * 100 / wirespeed)\n+                    ret_data[header[5]] = \"{:.3f} Mpps\".format(\n+                        self.expected_throughput[fwd_config][frame_size][nb_desc])\n+                    ret_data[header[6]] = \"{:.3f} Mpps\".format(\n+                        self.throughput[fwd_config][frame_size][nb_desc] -\n+                        self.expected_throughput[fwd_config][frame_size][nb_desc])\n+\n+                    ret_datas[frame_size][nb_desc] = deepcopy(ret_data)\n+                self.test_result[fwd_config] = deepcopy(ret_datas)\n+\n+        # Create test results table\n+        self.result_table_create(header)\n+        for fwd_config in list(self.test_parameters.keys()):\n+            for frame_size in list(self.test_parameters[fwd_config].keys()):\n+                for nb_desc in self.test_parameters[fwd_config][frame_size]:\n+                    table_row = list()\n+                    for i in range(len(header)):\n+                        table_row.append(\n+                            self.test_result[fwd_config][frame_size][nb_desc][header[i]])\n+                    self.result_table_add(table_row)\n+        # present test results to screen\n+        self.result_table_print()\n+\n+        # save test results as a file\n+        if self.save_result_flag:\n+            self.save_result(self.test_result)\n+\n+    def save_result(self, data):\n+        \"\"\"\n+        Saves the test results as a separated file named with\n+        self.nic+_single_core_perf.json in output folder\n+        if self.save_result_flag is True\n+        \"\"\"\n+        json_obj = dict()\n+        case_name = self.running_case\n+        json_obj[case_name] = list()\n+        status_result = []\n+        for fwd_config in list(self.test_parameters.keys()):\n+            for frame_size in list(self.test_parameters[fwd_config].keys()):\n+                for nb_desc in self.test_parameters[fwd_config][frame_size]:\n+                    row_in = self.test_result[fwd_config][frame_size][nb_desc]\n+                    row_dict0 = dict()\n+                    row_dict0['performance'] = list()\n+                    row_dict0['parameters'] = list()\n+                    result_throughput = float(row_in['Throughput'].split()[0])\n+                    expected_throughput = float(row_in['Expected Throughput'].split()[0])\n+                    # delta value and accepted tolerance in percentage\n+                    delta = result_throughput - expected_throughput\n+                    if delta > -self.gap:\n+                        row_dict0['status'] = 'PASS'\n+                    else:\n+                        row_dict0['status'] = 'FAIL'\n+                    row_dict1 = dict(name=\"Throughput\", value=result_throughput, unit=\"Mpps\", delta=delta)\n+                    row_dict2 = dict(name=\"Txd/Rxd\", value=row_in[\"TXD/RXD\"], unit=\"descriptor\")\n+                    row_dict3 = dict(name=\"frame_size\", value=row_in[\"Frame Size\"], unit=\"bytes\")\n+                    row_dict4 = dict(name=\"Fwd_core\", value=row_in[\"Fwd_core\"])\n+                    row_dict0['performance'].append(row_dict1)\n+                    row_dict0['parameters'].append(row_dict2)\n+                    row_dict0['parameters'].append(row_dict3)\n+                    row_dict0['parameters'].append(row_dict4)\n+                    json_obj[case_name].append(row_dict0)\n+                    status_result.append(row_dict0['status'])\n+        with open(os.path.join(rst.path2Result,\n+                               '{0:s}_single_core_perf.json'.format(\n+                                   self.nic)), 'w') as fp:\n+            json.dump(json_obj, fp)\n+        self.verify(\"FAIL\" not in status_result, \"Excessive gap between test results and expectations\")\n+\n+    def set_fields(self):\n+        \"\"\"\n+        set ip protocol field behavior\n+        \"\"\"\n+        fields_config = {'ip': {'src': {'action': 'random'}, }, }\n+        return fields_config\n+\n+    def tear_down(self):\n+        \"\"\"\n+        Run after each test case.\n+        \"\"\"\n+        self.destroy_vf_env()\n+\n+    def tear_down_all(self):\n+        \"\"\"\n+        Run after each test suite.\n+        \"\"\"\n+        self.dut.kill_all()\n",
    "prefixes": [
        "V1",
        "3/3"
    ]
}