get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/81952/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 81952,
    "url": "http://patches.dpdk.org/api/patches/81952/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1603473615-11935-1-git-send-email-liang.j.ma@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1603473615-11935-1-git-send-email-liang.j.ma@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1603473615-11935-1-git-send-email-liang.j.ma@intel.com",
    "date": "2020-10-23T17:20:15",
    "name": "[v8,02/10] eal: add power management intrinsics",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "327a6a5c9829be764767ff15901eb856deee2e31",
    "submitter": {
        "id": 904,
        "url": "http://patches.dpdk.org/api/people/904/?format=api",
        "name": "Liang, Ma",
        "email": "liang.j.ma@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1603473615-11935-1-git-send-email-liang.j.ma@intel.com/mbox/",
    "series": [
        {
            "id": 13274,
            "url": "http://patches.dpdk.org/api/series/13274/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13274",
            "date": "2020-10-23T17:20:15",
            "name": null,
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/13274/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/81952/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/81952/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 002D1A04DD;\n\tFri, 23 Oct 2020 19:20:24 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B1DE35AAE;\n\tFri, 23 Oct 2020 19:20:23 +0200 (CEST)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by dpdk.org (Postfix) with ESMTP id 208FD5AA4\n for <dev@dpdk.org>; Fri, 23 Oct 2020 19:20:20 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 23 Oct 2020 10:20:20 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n by orsmga008.jf.intel.com with ESMTP; 23 Oct 2020 10:20:17 -0700",
            "from sivswdev09.ir.intel.com (sivswdev09.ir.intel.com\n [10.237.217.48])\n by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n 09NHKHsu013921; Fri, 23 Oct 2020 18:20:17 +0100",
            "from sivswdev09.ir.intel.com (localhost [127.0.0.1])\n by sivswdev09.ir.intel.com with ESMTP id 09NHKGPq011992;\n Fri, 23 Oct 2020 18:20:16 +0100",
            "(from lma25@localhost)\n by sivswdev09.ir.intel.com with LOCAL id 09NHKGYv011984;\n Fri, 23 Oct 2020 18:20:16 +0100"
        ],
        "IronPort-SDR": [
            "\n yOLz8yQd2fM+RROmEtjMzfcOZcTPi2RajVfvOQowsqGtW/zr/mWm6VokR+Fl0x7VTnRlfN2mDW\n MDBliuXkB9iA==",
            "\n fmuOVDaNfm5sQipnwOirG//nNKAVt3qFBPVN31hlHExLgnsumaxFTfdbU7bG+reNioCuT+0kJL\n 5I+zf+42Qzag=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9782\"; a=\"155491942\"",
            "E=Sophos;i=\"5.77,409,1596524400\"; d=\"scan'208\";a=\"155491942\"",
            "E=Sophos;i=\"5.77,409,1596524400\"; d=\"scan'208\";a=\"349262479\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Liang Ma <liang.j.ma@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "anatoly.burakov@intel.com, viktorin@rehivetech.com, ruifeng.wang@arm.com,\n bruce.richardson@intel.com, konstantin.ananyev@intel.com,\n david.hunt@intel.com, jerinjacobk@gmail.com, thomas@monjalon.net,\n timothy.mcdaniel@intel.com, gage.eads@intel.com,\n drc@linux.vnet.ibm.com, Liang Ma <liang.j.ma@intel.com>",
        "Date": "Fri, 23 Oct 2020 18:20:15 +0100",
        "Message-Id": "<1603473615-11935-1-git-send-email-liang.j.ma@intel.com>",
        "X-Mailer": "git-send-email 1.7.7.4",
        "In-Reply-To": "\n <da3fe4e5bbe976882c56a71d20a9055c1600e6aa.1602763439.git.anatoly.burakov@intel.com>",
        "References": "\n <da3fe4e5bbe976882c56a71d20a9055c1600e6aa.1602763439.git.anatoly.burakov@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v8 02/10] eal: add power management intrinsics",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add two new power management intrinsics, and provide an implementation\nin eal/x86 based on UMONITOR/UMWAIT instructions. The instructions\nare implemented as raw byte opcodes because there is not yet widespread\ncompiler support for these instructions.\n\nThe power management instructions provide an architecture-specific\nfunction to either wait until a specified TSC timestamp is reached, or\noptionally wait until either a TSC timestamp is reached or a memory\nlocation is written to. The monitor function also provides an optional\ncomparison, to avoid sleeping when the expected write has already\nhappened, and no more writes are expected.\n\nFor more details, please refer to Intel(R) 64 and IA-32 Architectures\nSoftware Developer's Manual, Volume 2.\n\nSigned-off-by: Liang Ma <liang.j.ma@intel.com>\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\nAcked-by: David Christensen <drc@linux.vnet.ibm.com>\nAcked-by: Jerin Jacob <jerinj@marvell.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\nAcked-by: Ruifeng Wang <ruifeng.wang@arm.com>\n---\n lib/librte_eal/arm/include/meson.build        |   1 +\n .../arm/include/rte_power_intrinsics.h        |  60 ++++++++\n .../include/generic/rte_power_intrinsics.h    | 111 ++++++++++++++\n lib/librte_eal/include/meson.build            |   1 +\n lib/librte_eal/ppc/include/meson.build        |   1 +\n .../ppc/include/rte_power_intrinsics.h        |  60 ++++++++\n lib/librte_eal/x86/include/meson.build        |   1 +\n .../x86/include/rte_power_intrinsics.h        | 135 ++++++++++++++++++\n 8 files changed, 370 insertions(+)\n create mode 100644 lib/librte_eal/arm/include/rte_power_intrinsics.h\n create mode 100644 lib/librte_eal/include/generic/rte_power_intrinsics.h\n create mode 100644 lib/librte_eal/ppc/include/rte_power_intrinsics.h\n create mode 100644 lib/librte_eal/x86/include/rte_power_intrinsics.h",
    "diff": "diff --git a/lib/librte_eal/arm/include/meson.build b/lib/librte_eal/arm/include/meson.build\nindex 73b750a18f..c6a9f70d73 100644\n--- a/lib/librte_eal/arm/include/meson.build\n+++ b/lib/librte_eal/arm/include/meson.build\n@@ -20,6 +20,7 @@ arch_headers = files(\n \t'rte_pause_32.h',\n \t'rte_pause_64.h',\n \t'rte_pause.h',\n+\t'rte_power_intrinsics.h',\n \t'rte_prefetch_32.h',\n \t'rte_prefetch_64.h',\n \t'rte_prefetch.h',\ndiff --git a/lib/librte_eal/arm/include/rte_power_intrinsics.h b/lib/librte_eal/arm/include/rte_power_intrinsics.h\nnew file mode 100644\nindex 0000000000..a4a1bc1159\n--- /dev/null\n+++ b/lib/librte_eal/arm/include/rte_power_intrinsics.h\n@@ -0,0 +1,60 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _RTE_POWER_INTRINSIC_ARM_H_\n+#define _RTE_POWER_INTRINSIC_ARM_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <rte_common.h>\n+\n+#include \"generic/rte_power_intrinsics.h\"\n+\n+/**\n+ * This function is not supported on ARM.\n+ */\n+static inline void\n+rte_power_monitor(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz)\n+{\n+\tRTE_SET_USED(p);\n+\tRTE_SET_USED(expected_value);\n+\tRTE_SET_USED(value_mask);\n+\tRTE_SET_USED(tsc_timestamp);\n+\tRTE_SET_USED(data_sz);\n+}\n+\n+/**\n+ * This function is not supported on ARM.\n+ */\n+static inline void\n+rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz, rte_spinlock_t *lck)\n+{\n+\tRTE_SET_USED(p);\n+\tRTE_SET_USED(expected_value);\n+\tRTE_SET_USED(value_mask);\n+\tRTE_SET_USED(tsc_timestamp);\n+\tRTE_SET_USED(lck);\n+\tRTE_SET_USED(data_sz);\n+}\n+\n+/**\n+ * This function is not supported on ARM.\n+ */\n+static inline void\n+rte_power_pause(const uint64_t tsc_timestamp)\n+{\n+\tRTE_SET_USED(tsc_timestamp);\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_POWER_INTRINSIC_ARM_H_ */\ndiff --git a/lib/librte_eal/include/generic/rte_power_intrinsics.h b/lib/librte_eal/include/generic/rte_power_intrinsics.h\nnew file mode 100644\nindex 0000000000..fb897d9060\n--- /dev/null\n+++ b/lib/librte_eal/include/generic/rte_power_intrinsics.h\n@@ -0,0 +1,111 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _RTE_POWER_INTRINSIC_H_\n+#define _RTE_POWER_INTRINSIC_H_\n+\n+#include <inttypes.h>\n+\n+#include <rte_compat.h>\n+#include <rte_spinlock.h>\n+\n+/**\n+ * @file\n+ * Advanced power management operations.\n+ *\n+ * This file define APIs for advanced power management,\n+ * which are architecture-dependent.\n+ */\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Monitor specific address for changes. This will cause the CPU to enter an\n+ * architecture-defined optimized power state until either the specified\n+ * memory address is written to, a certain TSC timestamp is reached, or other\n+ * reasons cause the CPU to wake up.\n+ *\n+ * Additionally, an `expected` 64-bit value and 64-bit mask are provided. If\n+ * mask is non-zero, the current value pointed to by the `p` pointer will be\n+ * checked against the expected value, and if they match, the entering of\n+ * optimized power state may be aborted.\n+ *\n+ * @param p\n+ *   Address to monitor for changes.\n+ * @param expected_value\n+ *   Before attempting the monitoring, the `p` address may be read and compared\n+ *   against this value. If `value_mask` is zero, this step will be skipped.\n+ * @param value_mask\n+ *   The 64-bit mask to use to extract current value from `p`.\n+ * @param tsc_timestamp\n+ *   Maximum TSC timestamp to wait for. Note that the wait behavior is\n+ *   architecture-dependent.\n+ * @param data_sz\n+ *   Data size (in bytes) that will be used to compare expected value with the\n+ *   memory address. Can be 1, 2, 4 or 8. Supplying any other value will lead\n+ *   to undefined result.\n+ */\n+__rte_experimental\n+static inline void rte_power_monitor(const volatile void *p,\n+\t\tconst uint64_t expected_value, const uint64_t value_mask,\n+\t\tconst uint64_t tsc_timestamp, const uint8_t data_sz);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Monitor specific address for changes. This will cause the CPU to enter an\n+ * architecture-defined optimized power state until either the specified\n+ * memory address is written to, a certain TSC timestamp is reached, or other\n+ * reasons cause the CPU to wake up.\n+ *\n+ * Additionally, an `expected` 64-bit value and 64-bit mask are provided. If\n+ * mask is non-zero, the current value pointed to by the `p` pointer will be\n+ * checked against the expected value, and if they match, the entering of\n+ * optimized power state may be aborted.\n+ *\n+ * This call will also lock a spinlock on entering sleep, and release it on\n+ * waking up the CPU.\n+ *\n+ * @param p\n+ *   Address to monitor for changes.\n+ * @param expected_value\n+ *   Before attempting the monitoring, the `p` address may be read and compared\n+ *   against this value. If `value_mask` is zero, this step will be skipped.\n+ * @param value_mask\n+ *   The 64-bit mask to use to extract current value from `p`.\n+ * @param tsc_timestamp\n+ *   Maximum TSC timestamp to wait for. Note that the wait behavior is\n+ *   architecture-dependent.\n+ * @param data_sz\n+ *   Data size (in bytes) that will be used to compare expected value with the\n+ *   memory address. Can be 1, 2, 4 or 8. Supplying any other value will lead\n+ *   to undefined result.\n+ * @param lck\n+ *   A spinlock that must be locked before entering the function, will be\n+ *   unlocked while the CPU is sleeping, and will be locked again once the CPU\n+ *   wakes up.\n+ */\n+__rte_experimental\n+static inline void rte_power_monitor_sync(const volatile void *p,\n+\t\tconst uint64_t expected_value, const uint64_t value_mask,\n+\t\tconst uint64_t tsc_timestamp, const uint8_t data_sz,\n+\t\trte_spinlock_t *lck);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enter an architecture-defined optimized power state until a certain TSC\n+ * timestamp is reached.\n+ *\n+ * @param tsc_timestamp\n+ *   Maximum TSC timestamp to wait for. Note that the wait behavior is\n+ *   architecture-dependent.\n+ */\n+__rte_experimental\n+static inline void rte_power_pause(const uint64_t tsc_timestamp);\n+\n+#endif /* _RTE_POWER_INTRINSIC_H_ */\ndiff --git a/lib/librte_eal/include/meson.build b/lib/librte_eal/include/meson.build\nindex cd09027958..3a12e87e19 100644\n--- a/lib/librte_eal/include/meson.build\n+++ b/lib/librte_eal/include/meson.build\n@@ -60,6 +60,7 @@ generic_headers = files(\n \t'generic/rte_memcpy.h',\n \t'generic/rte_pause.h',\n \t'generic/rte_prefetch.h',\n+\t'generic/rte_power_intrinsics.h',\n \t'generic/rte_rwlock.h',\n \t'generic/rte_spinlock.h',\n \t'generic/rte_ticketlock.h',\ndiff --git a/lib/librte_eal/ppc/include/meson.build b/lib/librte_eal/ppc/include/meson.build\nindex ab4bd28092..0873b2aecb 100644\n--- a/lib/librte_eal/ppc/include/meson.build\n+++ b/lib/librte_eal/ppc/include/meson.build\n@@ -10,6 +10,7 @@ arch_headers = files(\n \t'rte_io.h',\n \t'rte_memcpy.h',\n \t'rte_pause.h',\n+\t'rte_power_intrinsics.h',\n \t'rte_prefetch.h',\n \t'rte_rwlock.h',\n \t'rte_spinlock.h',\ndiff --git a/lib/librte_eal/ppc/include/rte_power_intrinsics.h b/lib/librte_eal/ppc/include/rte_power_intrinsics.h\nnew file mode 100644\nindex 0000000000..4ed03d521f\n--- /dev/null\n+++ b/lib/librte_eal/ppc/include/rte_power_intrinsics.h\n@@ -0,0 +1,60 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _RTE_POWER_INTRINSIC_PPC_H_\n+#define _RTE_POWER_INTRINSIC_PPC_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <rte_common.h>\n+\n+#include \"generic/rte_power_intrinsics.h\"\n+\n+/**\n+ * This function is not supported on PPC64.\n+ */\n+static inline void\n+rte_power_monitor(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz)\n+{\n+\tRTE_SET_USED(p);\n+\tRTE_SET_USED(expected_value);\n+\tRTE_SET_USED(value_mask);\n+\tRTE_SET_USED(tsc_timestamp);\n+\tRTE_SET_USED(data_sz);\n+}\n+\n+/**\n+ * This function is not supported on PPC64.\n+ */\n+static inline void\n+rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz, rte_spinlock_t *lck)\n+{\n+\tRTE_SET_USED(p);\n+\tRTE_SET_USED(expected_value);\n+\tRTE_SET_USED(value_mask);\n+\tRTE_SET_USED(tsc_timestamp);\n+\tRTE_SET_USED(lck);\n+\tRTE_SET_USED(data_sz);\n+}\n+\n+/**\n+ * This function is not supported on PPC64.\n+ */\n+static inline void\n+rte_power_pause(const uint64_t tsc_timestamp)\n+{\n+\tRTE_SET_USED(tsc_timestamp);\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_POWER_INTRINSIC_PPC_H_ */\ndiff --git a/lib/librte_eal/x86/include/meson.build b/lib/librte_eal/x86/include/meson.build\nindex f0e998c2fe..494a8142a2 100644\n--- a/lib/librte_eal/x86/include/meson.build\n+++ b/lib/librte_eal/x86/include/meson.build\n@@ -13,6 +13,7 @@ arch_headers = files(\n \t'rte_io.h',\n \t'rte_memcpy.h',\n \t'rte_prefetch.h',\n+\t'rte_power_intrinsics.h',\n \t'rte_pause.h',\n \t'rte_rtm.h',\n \t'rte_rwlock.h',\ndiff --git a/lib/librte_eal/x86/include/rte_power_intrinsics.h b/lib/librte_eal/x86/include/rte_power_intrinsics.h\nnew file mode 100644\nindex 0000000000..f9b761d796\n--- /dev/null\n+++ b/lib/librte_eal/x86/include/rte_power_intrinsics.h\n@@ -0,0 +1,135 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _RTE_POWER_INTRINSIC_X86_H_\n+#define _RTE_POWER_INTRINSIC_X86_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <rte_common.h>\n+\n+#include \"generic/rte_power_intrinsics.h\"\n+\n+static inline uint64_t\n+__get_umwait_val(const volatile void *p, const uint8_t sz)\n+{\n+\tswitch (sz) {\n+\tcase sizeof(uint8_t):\n+\t\treturn *(const volatile uint8_t *)p;\n+\tcase sizeof(uint16_t):\n+\t\treturn *(const volatile uint16_t *)p;\n+\tcase sizeof(uint32_t):\n+\t\treturn *(const volatile uint32_t *)p;\n+\tcase sizeof(uint64_t):\n+\t\treturn *(const volatile uint64_t *)p;\n+\tdefault:\n+\t\t/* this is an intrinsic, so we can't have any error handling */\n+\t\tRTE_ASSERT(0);\n+\t\treturn 0;\n+\t}\n+}\n+\n+/**\n+ * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.\n+ * For more information about usage of these instructions, please refer to\n+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual.\n+ */\n+static inline void\n+rte_power_monitor(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz)\n+{\n+\tconst uint32_t tsc_l = (uint32_t)tsc_timestamp;\n+\tconst uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);\n+\t/*\n+\t * we're using raw byte codes for now as only the newest compiler\n+\t * versions support this instruction natively.\n+\t */\n+\n+\t/* set address for UMONITOR */\n+\tasm volatile(\".byte 0xf3, 0x0f, 0xae, 0xf7;\"\n+\t\t\t:\n+\t\t\t: \"D\"(p));\n+\n+\tif (value_mask) {\n+\t\tconst uint64_t cur_value = __get_umwait_val(p, data_sz);\n+\t\tconst uint64_t masked = cur_value & value_mask;\n+\n+\t\t/* if the masked value is already matching, abort */\n+\t\tif (masked == expected_value)\n+\t\t\treturn;\n+\t}\n+\t/* execute UMWAIT */\n+\tasm volatile(\".byte 0xf2, 0x0f, 0xae, 0xf7;\"\n+\t\t\t: /* ignore rflags */\n+\t\t\t: \"D\"(0), /* enter C0.2 */\n+\t\t\t  \"a\"(tsc_l), \"d\"(tsc_h));\n+}\n+\n+/**\n+ * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.\n+ * For more information about usage of these instructions, please refer to\n+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual.\n+ */\n+static inline void\n+rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,\n+\t\tconst uint64_t value_mask, const uint64_t tsc_timestamp,\n+\t\tconst uint8_t data_sz, rte_spinlock_t *lck)\n+{\n+\tconst uint32_t tsc_l = (uint32_t)tsc_timestamp;\n+\tconst uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);\n+\t/*\n+\t * we're using raw byte codes for now as only the newest compiler\n+\t * versions support this instruction natively.\n+\t */\n+\n+\t/* set address for UMONITOR */\n+\tasm volatile(\".byte 0xf3, 0x0f, 0xae, 0xf7;\"\n+\t\t\t:\n+\t\t\t: \"D\"(p));\n+\n+\tif (value_mask) {\n+\t\tconst uint64_t cur_value = __get_umwait_val(p, data_sz);\n+\t\tconst uint64_t masked = cur_value & value_mask;\n+\n+\t\t/* if the masked value is already matching, abort */\n+\t\tif (masked == expected_value)\n+\t\t\treturn;\n+\t}\n+\trte_spinlock_unlock(lck);\n+\n+\t/* execute UMWAIT */\n+\tasm volatile(\".byte 0xf2, 0x0f, 0xae, 0xf7;\"\n+\t\t\t: /* ignore rflags */\n+\t\t\t: \"D\"(0), /* enter C0.2 */\n+\t\t\t  \"a\"(tsc_l), \"d\"(tsc_h));\n+\n+\trte_spinlock_lock(lck);\n+}\n+\n+/**\n+ * This function uses TPAUSE instruction  and will enter C0.2 state. For more\n+ * information about usage of this instruction, please refer to Intel(R) 64 and\n+ * IA-32 Architectures Software Developer's Manual.\n+ */\n+static inline void\n+rte_power_pause(const uint64_t tsc_timestamp)\n+{\n+\tconst uint32_t tsc_l = (uint32_t)tsc_timestamp;\n+\tconst uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);\n+\n+\t/* execute TPAUSE */\n+\tasm volatile(\".byte 0x66, 0x0f, 0xae, 0xf7;\"\n+\t\t: /* ignore rflags */\n+\t\t: \"D\"(0), /* enter C0.2 */\n+\t\t  \"a\"(tsc_l), \"d\"(tsc_h));\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_POWER_INTRINSIC_X86_H_ */\n",
    "prefixes": [
        "v8",
        "02/10"
    ]
}