get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/89536/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 89536,
    "url": "https://patches.dpdk.org/api/patches/89536/?format=api",
    "web_url": "https://patches.dpdk.org/project/dts/patch/20210319064705.4834-1-weix.ling@intel.com/",
    "project": {
        "id": 3,
        "url": "https://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210319064705.4834-1-weix.ling@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20210319064705.4834-1-weix.ling@intel.com",
    "date": "2021-03-19T06:47:05",
    "name": "[V1,8/9] tests/vm2vm_virtio_net_perf:modify hard code bind cbdma device to igb_uio by drivername in execution.cfg",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "f06226297dd4bd7c900b15e3a0631574b1a1a553",
    "submitter": {
        "id": 1828,
        "url": "https://patches.dpdk.org/api/people/1828/?format=api",
        "name": "Ling, WeiX",
        "email": "weix.ling@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dts/patch/20210319064705.4834-1-weix.ling@intel.com/mbox/",
    "series": [
        {
            "id": 15786,
            "url": "https://patches.dpdk.org/api/series/15786/?format=api",
            "web_url": "https://patches.dpdk.org/project/dts/list/?series=15786",
            "date": "2021-03-19T06:45:58",
            "name": "modify hard code bind cbdma device to igb_uio by drivername in execution.cfg",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/15786/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/89536/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/89536/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9343BA0562;\n\tFri, 19 Mar 2021 07:48:11 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 87CEB140EA6;\n\tFri, 19 Mar 2021 07:48:11 +0100 (CET)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 0C0604003F\n for <dts@dpdk.org>; Fri, 19 Mar 2021 07:48:09 +0100 (CET)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Mar 2021 23:48:08 -0700",
            "from unknown (HELO localhost.localdomain) ([10.240.183.222])\n by fmsmga005-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Mar 2021 23:48:06 -0700"
        ],
        "IronPort-SDR": [
            "\n 4D97KESZC/Z42LqMjxVB2NniuQQgsc53YOrkm1F9LoBhxnIIkPMBGQjkKL/4gg1IrrERfRq3GI\n +If1q23ifcfA==",
            "\n NJwoXe4pvytVprPIgCooaIxmfvZK5DYsL+uXesxdJwVVsP5q0fHZsvH3DLt5LjMXxRgVCS05vA\n cbmdr+R23OLw=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9927\"; a=\"189878348\"",
            "E=Sophos;i=\"5.81,261,1610438400\"; d=\"scan'208\";a=\"189878348\"",
            "E=Sophos;i=\"5.81,261,1610438400\"; d=\"scan'208\";a=\"606504833\""
        ],
        "From": "Ling Wei <weix.ling@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Ling Wei <weix.ling@intel.com>",
        "Date": "Fri, 19 Mar 2021 14:47:05 +0800",
        "Message-Id": "<20210319064705.4834-1-weix.ling@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dts] [PATCH V1 8/9] tests/vm2vm_virtio_net_perf:modify hard code\n bind cbdma device to igb_uio by drivername in execution.cfg",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org",
        "Sender": "\"dts\" <dts-bounces@dpdk.org>"
    },
    "content": "1.Modify hard code bind cbdma device to igb_uio by drivername\n in execution.cfg.\n2.Adjust code format.\n3.Use the same socket of dut's nic to start testpmd.\n\nSigned-off-by: Ling Wei <weix.ling@intel.com>\n---\n tests/TestSuite_vm2vm_virtio_net_perf.py | 79 ++++++++++++++----------\n 1 file changed, 47 insertions(+), 32 deletions(-)",
    "diff": "diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py\nindex 89dad610..83035b07 100644\n--- a/tests/TestSuite_vm2vm_virtio_net_perf.py\n+++ b/tests/TestSuite_vm2vm_virtio_net_perf.py\n@@ -49,8 +49,10 @@ from pmd_output import PmdOutput\n \n class TestVM2VMVirtioNetPerf(TestCase):\n     def set_up_all(self):\n+        self.dut_ports = self.dut.get_ports()\n+        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])\n         core_config = \"1S/5C/1T\"\n-        self.cores_list = self.dut.get_core_list(core_config, socket=0)\n+        self.cores_list = self.dut.get_core_list(core_config, socket=self.ports_socket)\n         self.verify(len(self.cores_list) >= 4,\n                     \"There has not enough cores to test this suite %s\" %\n                     self.suite_name)\n@@ -86,6 +88,9 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         \"\"\"\n         get all cbdma ports\n         \"\"\"\n+        # check driver name in execution.cfg\n+        self.verify(self.drivername == 'igb_uio',\n+                    \"CBDMA test case only use igb_uio driver, need config drivername=igb_uio in execution.cfg\")\n         str_info = 'Misc (rawdev) devices using kernel driver'\n         out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30)\n         device_info = out.split('\\n')\n@@ -117,8 +122,7 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             dmas_info += dmas\n         self.dmas_info = dmas_info[:-1]\n         self.device_str = ' '.join(used_cbdma)\n-        self.dut.setup_modules(self.target, \"igb_uio\",\"None\")\n-        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s' % (\"igb_uio\", self.device_str), '# ', 60)\n+        self.dut.send_expect('./usertools/dpdk-devbind.py --force --bind=%s %s' % (self.drivername, self.device_str), '# ', 60)\n \n     def bind_cbdma_device_to_kernel(self):\n         if self.device_str is not None:\n@@ -155,6 +159,7 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         params = \" -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d\" % (nb_cores, used_queues, used_queues)\n         self.command_line = testcmd + eal_params + vdev1 + vdev2 + params\n         self.pmd_vhost.execute_cmd(self.command_line, timeout=30)\n+        self.pmd_vhost.execute_cmd('vhost enable tx all', timeout=30)\n         self.pmd_vhost.execute_cmd('start', timeout=30)\n \n     def start_vms(self, path_mode, server_mode=False, opt_queue=1):\n@@ -217,11 +222,13 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.vm_dut[0].send_expect(\"arp -s %s %s\" % (self.virtio_ip2, self.virtio_mac2), \"#\", 10)\n         self.vm_dut[1].send_expect(\"arp -s %s %s\" % (self.virtio_ip1, self.virtio_mac1), \"#\", 10)\n \n-    def prepare_test_env(self, path_mode, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, server_mode=False, opt_queue=1, combined=False, used_queues=1):\n+    def prepare_test_env(self, path_mode, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,\n+                         server_mode=False, opt_queue=1, combined=False, used_queues=1):\n         \"\"\"\n         start vhost testpmd and qemu, and config the vm env\n         \"\"\"\n-        self.start_vhost_testpmd(cbdma=cbdma, no_pci=no_pci, client_mode=client_mode, enable_queues=enable_queues, nb_cores=nb_cores, used_queues=used_queues)\n+        self.start_vhost_testpmd(cbdma=cbdma, no_pci=no_pci, client_mode=client_mode, enable_queues=enable_queues,\n+                                 nb_cores=nb_cores, used_queues=used_queues)\n         self.start_vms(path_mode=path_mode, server_mode=server_mode, opt_queue=opt_queue)\n         self.config_vm_env(combined=combined, used_queues=used_queues)\n \n@@ -232,12 +239,13 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         # clear the port xstats before iperf\n         self.vhost.send_expect(\"clear port xstats all\", \"testpmd> \", 10)\n \n+        # add -f g param, use Gbits/sec report teste result\n         if iperf_mode == \"tso\":\n-            iperf_server = \"iperf -s -i 1\"\n-            iperf_client = \"iperf -c 1.1.1.2 -i 1 -t 60\"\n+            iperf_server = \"iperf -f g -s -i 1\"\n+            iperf_client = \"iperf -f g -c 1.1.1.2 -i 1 -t 60\"\n         else:\n-            iperf_server = \"iperf -s -u -i 1\"\n-            iperf_client = \"iperf -c 1.1.1.2 -i 1 -t 30 -P 4 -u -b 1G -l 9000\"\n+            iperf_server = \"iperf -f g -s -u -i 1\"\n+            iperf_client = \"iperf -f g -c 1.1.1.2 -i 1 -t 30 -P 4 -u -b 1G -l 9000\"\n         self.vm_dut[0].send_expect(\"%s > iperf_server.log &\" % iperf_server, \"\", 10)\n         self.vm_dut[1].send_expect(\"%s > iperf_client.log &\" % iperf_client, \"\", 60)\n         time.sleep(90)\n@@ -349,7 +357,8 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         \"\"\"\n         TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic\n         \"\"\"\n-        self.prepare_test_env(path_mode=1, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, server_mode=False, opt_queue=1, combined=False, used_queues=1)\n+        self.prepare_test_env(path_mode=1, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,\n+                              server_mode=False, opt_queue=1, combined=False, used_queues=1)\n         self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n \n     def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self):\n@@ -357,7 +366,8 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic\n         \"\"\"\n         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-        self.prepare_test_env(path_mode=1, cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2, server_mode=False, opt_queue=1, combined=False, used_queues=1)\n+        self.prepare_test_env(path_mode=1, cbdma=True, no_pci=False, client_mode=False, enable_queues=1, nb_cores=2,\n+                              server_mode=False, opt_queue=1, combined=False, used_queues=1)\n         cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n         expect_value = self.get_suite_cfg()['expected_throughput'][self.running_case]\n         self.verify(cbdma_value > expect_value, \"CBDMA enable performance: %s is lower than CBDMA disable: %s.\" %(cbdma_value, expect_value))\n@@ -366,21 +376,24 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         \"\"\"\n         TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic\n         \"\"\"\n-        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, server_mode=False, opt_queue=1, combined=False, used_queues=1)\n+        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,\n+                              server_mode=False, opt_queue=1, combined=False, used_queues=1)\n         self.start_iperf_and_verify_vhost_xstats_info()\n \n     def test_vm2vm_split_ring_iperf_with_ufo(self):\n         \"\"\"\n         TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic\n         \"\"\"\n-        self.prepare_test_env(path_mode=2, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1, server_mode=False, opt_queue=1, combined=False, used_queues=1)\n+        self.prepare_test_env(path_mode=2, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=1,\n+                              server_mode=False, opt_queue=1, combined=False, used_queues=1)\n         self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='ufo')\n \n     def test_vm2vm_packed_ring_iperf_with_ufo(self):\n         \"\"\"\n         TestCase8: VM2VM packed ring vhost-user/virtio-net test with udp traffic\n         \"\"\"\n-        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2, server_mode=False, opt_queue=1, combined=False, used_queues=1)\n+        self.prepare_test_env(path_mode=4, cbdma=False, no_pci=True, client_mode=False, enable_queues=1, nb_cores=2,\n+                              server_mode=False, opt_queue=1, combined=False, used_queues=1)\n         self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='other')\n \n     def test_vm2vm_split_ring_device_capbility(self):\n@@ -410,27 +423,28 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n \n         self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.prepare_test_env(path_mode=5, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, server_mode=True, opt_queue=8, combined=True, used_queues=8)\n-        self.check_scp_file_valid_between_vms(file_size=1024)\n+        self.prepare_test_env(path_mode=5, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,\n+                              server_mode=True, opt_queue=8, combined=True, used_queues=8)\n+        self.check_scp_file_valid_between_vms(file_size=1)\n         iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Eabled', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue])\n+        ipef_result.append(['Enable', 'mergeable path', 8, iperf_data_cbdma_enable_8_queue])\n \n         self.logger.info(\"Re-launch without CBDMA and used 8 queue\")\n         self.vhost.send_expect(\"quit\", \"# \", 30)\n         self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8)\n-        self.check_scp_file_valid_between_vms(file_size=1024)\n+        self.check_scp_file_valid_between_vms(file_size=1)\n         iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Disabled','mergeable path', 8, iperf_data_cbdma_disable_8_queue])\n+        ipef_result.append(['Disable','mergeable path', 8, iperf_data_cbdma_disable_8_queue])\n \n         self.logger.info(\"Re-launch without CBDMA and used 1 queue\")\n         self.vhost.send_expect(\"quit\", \"# \", 30)\n         self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=1)\n         self.config_vm_env(combined=True, used_queues=1)\n-        self.check_scp_file_valid_between_vms(file_size=1024)\n+        self.check_scp_file_valid_between_vms(file_size=1)\n         iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Disabled', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue])\n+        ipef_result.append(['Disable', 'mergeable path', 1, iperf_data_cbdma_disable_1_queue])\n \n-        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', '[M|G]bits/sec']\n+        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec']\n         self.result_table_create(self.table_header)\n         for table_row in ipef_result:\n             self.result_table_add(table_row)\n@@ -448,17 +462,18 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n \n         self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.prepare_test_env(path_mode=6, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, server_mode=True, opt_queue=8, combined=True, used_queues=8)\n+        self.prepare_test_env(path_mode=6, cbdma=True, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4,\n+                              server_mode=True, opt_queue=8, combined=True, used_queues=8)\n         self.check_scp_file_valid_between_vms(file_size=1024)\n         iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Eabled', 'no-mergeable path', 8, iperf_data_cbdma_enable_8_queue])\n+        ipef_result.append(['Enable', 'no-mergeable path', 8, iperf_data_cbdma_enable_8_queue])\n \n         self.logger.info(\"Re-launch without CBDMA and used 8 queue\")\n         self.vhost.send_expect(\"quit\", \"# \", 30)\n         self.start_vhost_testpmd(cbdma=False, no_pci=False, client_mode=True, enable_queues=8, nb_cores=4, used_queues=8)\n         self.check_scp_file_valid_between_vms(file_size=1024)\n         iperf_data_cbdma_disable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Disabled','no-mergeable path', 8, iperf_data_cbdma_disable_8_queue])\n+        ipef_result.append(['Disable','no-mergeable path', 8, iperf_data_cbdma_disable_8_queue])\n \n         self.logger.info(\"Re-launch without CBDMA and used 1 queue\")\n         self.vhost.send_expect(\"quit\", \"# \", 30)\n@@ -466,9 +481,9 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.config_vm_env(combined=True, used_queues=1)\n         self.check_scp_file_valid_between_vms(file_size=1024)\n         iperf_data_cbdma_disable_1_queue = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode='tso')\n-        ipef_result.append(['Disabled','no-mergeable path', 1, iperf_data_cbdma_disable_1_queue])\n+        ipef_result.append(['Disable','no-mergeable path', 1, iperf_data_cbdma_disable_1_queue])\n \n-        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', '[M|G]bits/sec']\n+        self.table_header = ['CBDMA Enable/Disable', 'Mode', 'rxq/txq', 'Gbits/sec']\n         self.result_table_create(self.table_header)\n         for table_row in ipef_result:\n             self.result_table_add(table_row)\n@@ -481,25 +496,25 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         \"\"\"\n         TestCase10: VM2VM packed ring virtio-net mergeable with large packet payload valid check\n         \"\"\"\n-        self.prepare_test_env(path_mode=10, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4, server_mode=True, opt_queue=8, combined=True, used_queues=8)\n+        self.prepare_test_env(path_mode=10, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4,\n+                              server_mode=True, opt_queue=8, combined=True, used_queues=8)\n         self.check_scp_file_valid_between_vms(file_size=1024)\n \n     def test_vm2vm_packed_ring_no_mergeable_path_check_large_packet(self):\n         \"\"\"\n         TestCase11: VM2VM packed ring virtio-net non-mergeable with large packet payload valid check\n         \"\"\"\n-        self.prepare_test_env(path_mode=11, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4, server_mode=True, opt_queue=8, combined=True, used_queues=8)\n+        self.prepare_test_env(path_mode=11, cbdma=False, no_pci=True, client_mode=True, enable_queues=8, nb_cores=4,\n+                              server_mode=True, opt_queue=8, combined=True, used_queues=8)\n         self.check_scp_file_valid_between_vms(file_size=1024)\n \n     def tear_down(self):\n         \"\"\"\n         run after each test case.\n         \"\"\"\n-        if \"cbdma_enable\" in self.running_case:\n-            self.bind_cbdma_device_to_kernel()\n         self.stop_all_apps()\n         self.dut.kill_all()\n-        time.sleep(2)\n+        self.bind_cbdma_device_to_kernel()\n \n     def tear_down_all(self):\n         \"\"\"\n",
    "prefixes": [
        "V1",
        "8/9"
    ]
}