get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/89066/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 89066,
    "url": "https://patches.dpdk.org/api/patches/89066/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210314095427.10101-2-wisamm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210314095427.10101-2-wisamm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210314095427.10101-2-wisamm@nvidia.com",
    "date": "2021-03-14T09:54:21",
    "name": "[v4,1/7] app/flow-perf: start using more generic wrapper for cycles",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8e2d3c24316a2a2f7fe4b735d06b63e94cd4e66e",
    "submitter": {
        "id": 1963,
        "url": "https://patches.dpdk.org/api/people/1963/?format=api",
        "name": "Wisam Monther",
        "email": "wisamm@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210314095427.10101-2-wisamm@nvidia.com/mbox/",
    "series": [
        {
            "id": 15641,
            "url": "https://patches.dpdk.org/api/series/15641/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=15641",
            "date": "2021-03-14T09:54:21",
            "name": "[v4,1/7] app/flow-perf: start using more generic wrapper for cycles",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/15641/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/89066/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/89066/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 52F8BA0524;\n\tSun, 14 Mar 2021 10:54:54 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DF48C160844;\n\tSun, 14 Mar 2021 10:54:47 +0100 (CET)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2078.outbound.protection.outlook.com [40.107.244.78])\n by mails.dpdk.org (Postfix) with ESMTP id 46C62160842\n for <dev@dpdk.org>; Sun, 14 Mar 2021 10:54:46 +0100 (CET)",
            "from BN8PR04CA0027.namprd04.prod.outlook.com (2603:10b6:408:70::40)\n by BL0PR12MB4866.namprd12.prod.outlook.com (2603:10b6:208:1cf::18)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.3933.31; Sun, 14 Mar\n 2021 09:54:44 +0000",
            "from BN8NAM11FT048.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:70:cafe::f7) by BN8PR04CA0027.outlook.office365.com\n (2603:10b6:408:70::40) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.3933.32 via Frontend\n Transport; Sun, 14 Mar 2021 09:54:43 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT048.mail.protection.outlook.com (10.13.177.117) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.3933.31 via Frontend Transport; Sun, 14 Mar 2021 09:54:43 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Sun, 14 Mar\n 2021 09:54:41 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=YjG2w2HBOJ3FuREM8OaGUXzy7wtZdHqLEiVd6Wr2VeiDun29TeIV88C8iAyCE2g30al2CJDIpVL+dFv5rBU0UKi+FGgPYbg7bvVAmpQJ2d2vgsWt02YbP9f9gN4Ms46LSD7LNz4BfQcv2hK5xFpQm1qol+TIOF75q+hda8z/whs++26qqI/4v1cLTfrMBV9uTdxW85ztVg8MdFyWWFvoQDfT9eO36b4xkYbVabgBlMpU+INAsIzDknT7IZ45hdtEtaKlH18UzJGB7XqNELTIzGSn93Be25VqOLL/n40/lc2y7Qf+4Tq0dRkJ2CZOINZyRz/m3j3xmQJY7c5z0L4C4Q==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=3WMCO2qQIWfSwcuTODuizyg4GU5OxCnCsdpVkAgZZ7I=;\n b=aN3lB/Hk0ox8yLNu2JkD2xHtayv+hpnxnGHi3T2slt7xxD+qiaureWMaM95CziQcuI3a+tmgUcOqqAaomLVkYMRenlIfOnJENvOVtfH9FPHeJjNnw9r/g6BfvrJWZePeeNFWEI3GEyyPkfRtBnTzEsNKTaTscLccfi7SCgNrsBT1Ehl4ia/OKQntPDPmg7gjxNqPrd75QGMJW0CjGmBSYDJAOequdl8kAPG+Z7Ve+1V7sjhzo9DLDBSfCl8liflLVZsnbpvfH5KruMJzRBWB4i6ppkhBkU3/UvpAPWd/HxzEEvbVH/cr9nv6kRmhbJ3sxEBVVhpHaSNE5nWJNrgSjA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=3WMCO2qQIWfSwcuTODuizyg4GU5OxCnCsdpVkAgZZ7I=;\n b=ohA76wM+79c6ABoA4u9R6Mu8dy75RfhG5zLIEJ4Q1bRDVWhSV/dEjvXhLpL3q2NGHnDzQFIG67uA7LYQKg55octkAmhjwNGncZj9QPijaUbNk0g+ld1rrc0vUKwvMsScwyEEyZsU4LceLUjIvUgt2ZMEZAXgOW7mnpSEnSSliF7JoU4eVLMlgst8adE8jcE1IguBdWl+ufwbbFqT95ryBlgSLFEsREVmfCjOwNZBsQ+MOJRsm6OOB5mBfTU/C4X3lhlUwTWy6i/imWUnQkcU4WiKzEqWmJlxsvdpIqz56lk9cW870ejiQd0TZNjUfHlVeGQTanjPIb/CJxqb0MfIwA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Wisam Jaddo <wisamm@nvidia.com>",
        "To": "<arybchenko@solarflare.com>, <thomas@monjalon.net>, <akozyrev@nvidia.com>,\n <rasland@nvidia.com>, <dev@dpdk.org>",
        "Date": "Sun, 14 Mar 2021 11:54:21 +0200",
        "Message-ID": "<20210314095427.10101-2-wisamm@nvidia.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210314095427.10101-1-wisamm@nvidia.com>",
        "References": "<20210310135546.8680-2-wisamm@nvidia.com>\n <20210314095427.10101-1-wisamm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "c80706a2-63d4-48f3-99ae-08d8e6cf31af",
        "X-MS-TrafficTypeDiagnostic": "BL0PR12MB4866:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BL0PR12MB486690C7AAD0B7570E970E43A46D9@BL0PR12MB4866.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:1051;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n WHEtUU6as3TOipCZMYdrzEHb+yuRF6J3vU3OxS0S84h06PMwwCEesLdPG7hXEz6PFayztFIIXw82HLZ1gN7ZzoOgXiQ7vO+8Y2H5pE4qr0tdoKNEa5Gqu9C88VMZggNhm7Vh6zJuPgsM1ivj9QeWc+0y33PfWqYVaae4Ij7JApKUsn4QVHUuhw57cY8BJHZGpB+BoddjqqURK7WgUvSuPkSAEVJGg33mBcNFa13HeS/QkIpzaMogYMTTRxGvsPKw46R8hlUz7zNe2Lg9fkCuX2vsig1Uuo3EzOfqIOSfbAO6kDbcwRGkmzF15wrKhPuOaTPiypJFkAdvp8PUiM/L1qyc1biy/La1o0kvMdCNLYbCYZ9ALyjX9fAlTscThzb3ZuZVHDnIB2QIqunhqjY0ndhSpNfvWXuiKbmOnvs05rQi+C3lC3BS6B5GpyWxhYcVnIZmLLeD/BlZWmI6lWw6KHAIsyJ0OR9ohtl+rAnpqhO3LLVM4lBNKuh0tI0DEIzm3RqkBzH8LLKtvkmoAlbLkSa8qEzSjxIldDL/LDRXwCm0n18iaV7oPkBdt1E8mp+MvCAvG001Q9j24X9QkkNcH0Gy2QR2bAbH3i1WCIRNj/Ok0kkIhsZdPF0kZau/StQ+SSk46jY9nC7OTPKSNyQDjAkofEGaJlu4v6ZYbrX2ipKVgPUholUK2hlmXQnuVaV9fqcJ3Bs6ujzGzDwNlZmKzg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(376002)(39860400002)(136003)(346002)(396003)(46966006)(36840700001)(36756003)(356005)(2906002)(8676002)(82740400003)(8936002)(47076005)(55016002)(86362001)(7636003)(6666004)(36860700001)(36906005)(70586007)(110136005)(2616005)(316002)(336012)(186003)(1076003)(82310400003)(34020700004)(5660300002)(478600001)(426003)(16526019)(83380400001)(6286002)(26005)(7696005)(70206006)(41533002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "14 Mar 2021 09:54:43.7546 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n c80706a2-63d4-48f3-99ae-08d8e6cf31af",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT048.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BL0PR12MB4866",
        "Subject": "[dpdk-dev] [PATCH v4 1/7] app/flow-perf: start using more generic\n wrapper for cycles",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "rdtsc() is x86 related, while this might fail for other archs,\nso it's better to use more generic API for cycles measurement.\n\nSigned-off-by: Wisam Jaddo <wisamm@nvidia.com>\nAcked-by: Alexander Kozyrev <akozyrev@nvidia.com>\n---\n app/test-flow-perf/main.c | 24 ++++++++++++------------\n 1 file changed, 12 insertions(+), 12 deletions(-)",
    "diff": "diff --git a/app/test-flow-perf/main.c b/app/test-flow-perf/main.c\nindex 99d0463456..8b5a11c15e 100644\n--- a/app/test-flow-perf/main.c\n+++ b/app/test-flow-perf/main.c\n@@ -969,7 +969,7 @@ meters_handler(int port_id, uint8_t core_id, uint8_t ops)\n \tend_counter = (core_id + 1) * rules_count_per_core;\n \n \tcpu_time_used = 0;\n-\tstart_batch = rte_rdtsc();\n+\tstart_batch = rte_get_timer_cycles();\n \tfor (counter = start_counter; counter < end_counter; counter++) {\n \t\tif (ops == METER_CREATE)\n \t\t\tcreate_meter_rule(port_id, counter);\n@@ -984,10 +984,10 @@ meters_handler(int port_id, uint8_t core_id, uint8_t ops)\n \t\tif (!((counter + 1) % rules_batch)) {\n \t\t\trules_batch_idx = ((counter + 1) / rules_batch) - 1;\n \t\t\tcpu_time_per_batch[rules_batch_idx] =\n-\t\t\t\t((double)(rte_rdtsc() - start_batch))\n-\t\t\t\t/ rte_get_tsc_hz();\n+\t\t\t\t((double)(rte_get_timer_cycles() - start_batch))\n+\t\t\t\t/ rte_get_timer_hz();\n \t\t\tcpu_time_used += cpu_time_per_batch[rules_batch_idx];\n-\t\t\tstart_batch = rte_rdtsc();\n+\t\t\tstart_batch = rte_get_timer_cycles();\n \t\t}\n \t}\n \n@@ -1089,7 +1089,7 @@ destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)\n \tif (flow_group > 0 && core_id == 0)\n \t\trules_count_per_core++;\n \n-\tstart_batch = rte_rdtsc();\n+\tstart_batch = rte_get_timer_cycles();\n \tfor (i = 0; i < (uint32_t) rules_count_per_core; i++) {\n \t\tif (flows_list[i] == 0)\n \t\t\tbreak;\n@@ -1107,12 +1107,12 @@ destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)\n \t\t * for this batch.\n \t\t */\n \t\tif (!((i + 1) % rules_batch)) {\n-\t\t\tend_batch = rte_rdtsc();\n+\t\t\tend_batch = rte_get_timer_cycles();\n \t\t\tdelta = (double) (end_batch - start_batch);\n \t\t\trules_batch_idx = ((i + 1) / rules_batch) - 1;\n-\t\t\tcpu_time_per_batch[rules_batch_idx] = delta / rte_get_tsc_hz();\n+\t\t\tcpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();\n \t\t\tcpu_time_used += cpu_time_per_batch[rules_batch_idx];\n-\t\t\tstart_batch = rte_rdtsc();\n+\t\t\tstart_batch = rte_get_timer_cycles();\n \t\t}\n \t}\n \n@@ -1185,7 +1185,7 @@ insert_flows(int port_id, uint8_t core_id)\n \t\tflows_list[flow_index++] = flow;\n \t}\n \n-\tstart_batch = rte_rdtsc();\n+\tstart_batch = rte_get_timer_cycles();\n \tfor (counter = start_counter; counter < end_counter; counter++) {\n \t\tflow = generate_flow(port_id, flow_group,\n \t\t\tflow_attrs, flow_items, flow_actions,\n@@ -1211,12 +1211,12 @@ insert_flows(int port_id, uint8_t core_id)\n \t\t * for this batch.\n \t\t */\n \t\tif (!((counter + 1) % rules_batch)) {\n-\t\t\tend_batch = rte_rdtsc();\n+\t\t\tend_batch = rte_get_timer_cycles();\n \t\t\tdelta = (double) (end_batch - start_batch);\n \t\t\trules_batch_idx = ((counter + 1) / rules_batch) - 1;\n-\t\t\tcpu_time_per_batch[rules_batch_idx] = delta / rte_get_tsc_hz();\n+\t\t\tcpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();\n \t\t\tcpu_time_used += cpu_time_per_batch[rules_batch_idx];\n-\t\t\tstart_batch = rte_rdtsc();\n+\t\t\tstart_batch = rte_get_timer_cycles();\n \t\t}\n \t}\n \n",
    "prefixes": [
        "v4",
        "1/7"
    ]
}