get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/61869/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 61869,
    "url": "http://patches.dpdk.org/api/patches/61869/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1571913748-51735-3-git-send-email-gavin.hu@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1571913748-51735-3-git-send-email-gavin.hu@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1571913748-51735-3-git-send-email-gavin.hu@arm.com",
    "date": "2019-10-24T10:42:25",
    "name": "[v9,2/5] eal: add the APIs to wait until equal",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a7ea46ce0d8198fe8b9ecb45fafe62f56cdacef3",
    "submitter": {
        "id": 1018,
        "url": "http://patches.dpdk.org/api/people/1018/?format=api",
        "name": "Gavin Hu",
        "email": "gavin.hu@arm.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1571913748-51735-3-git-send-email-gavin.hu@arm.com/mbox/",
    "series": [
        {
            "id": 7037,
            "url": "http://patches.dpdk.org/api/series/7037/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7037",
            "date": "2019-10-24T10:42:23",
            "name": "use WFE for aarch64",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/7037/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/61869/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/61869/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 3BBD61E931;\n\tThu, 24 Oct 2019 12:43:01 +0200 (CEST)",
            "from foss.arm.com (unknown [217.140.110.172])\n\tby dpdk.org (Postfix) with ESMTP id A51341E931\n\tfor <dev@dpdk.org>; Thu, 24 Oct 2019 12:42:58 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n\tby usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E32F94AC;\n\tThu, 24 Oct 2019 03:42:50 -0700 (PDT)",
            "from net-arm-thunderx2-01.test.ast.arm.com\n\t(net-arm-thunderx2-01.shanghai.arm.com [10.169.40.40])\n\tby usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id\n\t9484A3F71F; Thu, 24 Oct 2019 03:42:47 -0700 (PDT)"
        ],
        "From": "Gavin Hu <gavin.hu@arm.com>",
        "To": "dev@dpdk.org",
        "Cc": "nd@arm.com, david.marchand@redhat.com, konstantin.ananyev@intel.com,\n\tthomas@monjalon.net, stephen@networkplumber.org, hemant.agrawal@nxp.com, \n\tjerinj@marvell.com, pbhagavatula@marvell.com,\n\tHonnappa.Nagarahalli@arm.com, \n\truifeng.wang@arm.com, phil.yang@arm.com, steve.capper@arm.com",
        "Date": "Thu, 24 Oct 2019 18:42:25 +0800",
        "Message-Id": "<1571913748-51735-3-git-send-email-gavin.hu@arm.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": [
            "<1571913748-51735-1-git-send-email-gavin.hu@arm.com>",
            "<1561911676-37718-1-git-send-email-gavin.hu@arm.com>"
        ],
        "References": [
            "<1571913748-51735-1-git-send-email-gavin.hu@arm.com>",
            "<1561911676-37718-1-git-send-email-gavin.hu@arm.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v9 2/5] eal: add the APIs to wait until equal",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The rte_wait_until_equal_xx APIs abstract the functionality of\n'polling for a memory location to become equal to a given value'.\n\nAdd the RTE_ARM_USE_WFE configuration entry for aarch64, disabled\nby default. When it is enabled, the above APIs will call WFE instruction\nto save CPU cycles and power.\n\nFrom a VM, when calling this API on aarch64, it may trap in and out to\nrelease vCPUs whereas cause high exit latency. Since kernel 4.18.20 an\nadaptive trapping mechanism is introduced to balance the latency and\nworkload.\n\nSigned-off-by: Gavin Hu <gavin.hu@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\nReviewed-by: Steve Capper <steve.capper@arm.com>\nReviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nReviewed-by: Phil Yang <phil.yang@arm.com>\nAcked-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nAcked-by: Jerin Jacob <jerinj@marvell.com>\n---\n config/arm/meson.build                             |   1 +\n config/common_base                                 |   5 +\n .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++\n lib/librte_eal/common/include/generic/rte_pause.h  | 217 +++++++++++++++++++++\n 4 files changed, 293 insertions(+)",
    "diff": "diff --git a/config/arm/meson.build b/config/arm/meson.build\nindex 979018e..b4b4cac 100644\n--- a/config/arm/meson.build\n+++ b/config/arm/meson.build\n@@ -26,6 +26,7 @@ flags_common_default = [\n \t['RTE_LIBRTE_AVP_PMD', false],\n \n \t['RTE_SCHED_VECTOR', false],\n+\t['RTE_ARM_USE_WFE', false],\n ]\n \n flags_generic = [\ndiff --git a/config/common_base b/config/common_base\nindex e843a21..c812156 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -111,6 +111,11 @@ CONFIG_RTE_MAX_VFIO_CONTAINERS=64\n CONFIG_RTE_MALLOC_DEBUG=n\n CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n\n CONFIG_RTE_USE_LIBBSD=n\n+# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,\n+# calling these APIs put the cores in low power state while waiting\n+# for the memory address to become equal to the expected value.\n+# This is supported only by aarch64.\n+CONFIG_RTE_ARM_USE_WFE=n\n \n #\n # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.\ndiff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h\nindex 93895d3..7bc8efb 100644\n--- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h\n+++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h\n@@ -1,5 +1,6 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  * Copyright(c) 2017 Cavium, Inc\n+ * Copyright(c) 2019 Arm Limited\n  */\n \n #ifndef _RTE_PAUSE_ARM64_H_\n@@ -17,6 +18,75 @@ static inline void rte_pause(void)\n \tasm volatile(\"yield\" ::: \"memory\");\n }\n \n+#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED\n+static inline void rte_sevl(void)\n+{\n+\tasm volatile(\"sevl\" : : : \"memory\");\n+}\n+\n+static inline void rte_wfe(void)\n+{\n+\tasm volatile(\"wfe\" : : : \"memory\");\n+}\n+\n+static __rte_always_inline uint16_t\n+__atomic_load_ex_16(volatile uint16_t *addr, int memorder)\n+{\n+\tuint16_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\tif (memorder == __ATOMIC_ACQUIRE)\n+\t\tasm volatile(\"ldaxrh %w[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\telse if (memorder == __ATOMIC_RELAXED)\n+\t\tasm volatile(\"ldxrh %w[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\treturn tmp;\n+}\n+\n+static __rte_always_inline uint32_t\n+__atomic_load_ex_32(volatile uint32_t *addr, int memorder)\n+{\n+\tuint32_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\tif (memorder == __ATOMIC_ACQUIRE)\n+\t\tasm volatile(\"ldaxr %w[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\telse if (memorder == __ATOMIC_RELAXED)\n+\t\tasm volatile(\"ldxr %w[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\treturn tmp;\n+}\n+\n+static __rte_always_inline uint64_t\n+__atomic_load_ex_64(volatile uint64_t *addr, int memorder)\n+{\n+\tuint64_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\tif (memorder == __ATOMIC_ACQUIRE)\n+\t\tasm volatile(\"ldaxr %x[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\telse if (memorder == __ATOMIC_RELAXED)\n+\t\tasm volatile(\"ldxr %x[tmp], [%x[addr]]\"\n+\t\t\t: [tmp] \"=&r\" (tmp)\n+\t\t\t: [addr] \"r\"(addr)\n+\t\t\t: \"memory\");\n+\treturn tmp;\n+}\n+#endif\n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h\nindex 52bd4db..4db44f9 100644\n--- a/lib/librte_eal/common/include/generic/rte_pause.h\n+++ b/lib/librte_eal/common/include/generic/rte_pause.h\n@@ -1,5 +1,6 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  * Copyright(c) 2017 Cavium, Inc\n+ * Copyright(c) 2019 Arm Limited\n  */\n \n #ifndef _RTE_PAUSE_H_\n@@ -12,6 +13,12 @@\n  *\n  */\n \n+#include <stdint.h>\n+#include <rte_common.h>\n+#include <rte_atomic.h>\n+#include <rte_compat.h>\n+#include <assert.h>\n+\n /**\n  * Pause CPU execution for a short while\n  *\n@@ -20,4 +27,214 @@\n  */\n static inline void rte_pause(void);\n \n+static inline void rte_sevl(void);\n+static inline void rte_wfe(void);\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Atomic load from addr, it returns the 16-bit content of *addr.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param memorder\n+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.\n+ *  These map to C++11 memory orders with the same names, see the C++11 standard\n+ *  the GCC wiki on atomic synchronization for detailed definitions.\n+ */\n+static __rte_always_inline uint16_t\n+__atomic_load_ex_16(volatile uint16_t *addr, int memorder);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Atomic load from addr, it returns the 32-bit content of *addr.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param memorder\n+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.\n+ *  These map to C++11 memory orders with the same names, see the C++11 standard\n+ *  the GCC wiki on atomic synchronization for detailed definitions.\n+ */\n+static __rte_always_inline uint32_t\n+__atomic_load_ex_32(volatile uint32_t *addr, int memorder);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Atomic load from addr, it returns the 64-bit content of *addr.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param memorder\n+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.\n+ *  These map to C++11 memory orders with the same names, see the C++11 standard\n+ *  the GCC wiki on atomic synchronization for detailed definitions.\n+ */\n+static __rte_always_inline uint64_t\n+__atomic_load_ex_64(volatile uint64_t *addr, int memorder);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Wait for *addr to be updated with a 16-bit expected value, with a relaxed\n+ * memory ordering model meaning the loads around this API can be reordered.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param expected\n+ *  A 16-bit expected value to be in the memory location.\n+ * @param memorder\n+ *  Two different memory orders that can be specified:\n+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  C++11 memory orders with the same names, see the C++11 standard or\n+ *  the GCC wiki on atomic synchronization for detailed definition.\n+ */\n+__rte_experimental\n+static __rte_always_inline void\n+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n+int memorder);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Wait for *addr to be updated with a 32-bit expected value, with a relaxed\n+ * memory ordering model meaning the loads around this API can be reordered.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param expected\n+ *  A 32-bit expected value to be in the memory location.\n+ * @param memorder\n+ *  Two different memory orders that can be specified:\n+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  C++11 memory orders with the same names, see the C++11 standard or\n+ *  the GCC wiki on atomic synchronization for detailed definition.\n+ */\n+__rte_experimental\n+static __rte_always_inline void\n+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n+int memorder);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Wait for *addr to be updated with a 64-bit expected value, with a relaxed\n+ * memory ordering model meaning the loads around this API can be reordered.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param expected\n+ *  A 64-bit expected value to be in the memory location.\n+ * @param memorder\n+ *  Two different memory orders that can be specified:\n+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  C++11 memory orders with the same names, see the C++11 standard or\n+ *  the GCC wiki on atomic synchronization for detailed definition.\n+ */\n+__rte_experimental\n+static __rte_always_inline void\n+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n+int memorder);\n+\n+#ifdef RTE_ARM_USE_WFE\n+#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED\n+#endif\n+\n+#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED\n+static inline void rte_sevl(void)\n+{\n+}\n+\n+static inline void rte_wfe(void)\n+{\n+\trte_pause();\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Atomic load from addr, it returns the 16-bit content of *addr.\n+ *\n+ * @param addr\n+ *  A pointer to the memory location.\n+ * @param memorder\n+ *  The valid memory order variants are __ATOMIC_ACQUIRE and __ATOMIC_RELAXED.\n+ *  These map to C++11 memory orders with the same names, see the C++11 standard\n+ *  the GCC wiki on atomic synchronization for detailed definitions.\n+ */\n+static __rte_always_inline uint16_t\n+__atomic_load_ex_16(volatile uint16_t *addr, int memorder)\n+{\n+\tuint16_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\ttmp = __atomic_load_n(addr, memorder);\n+\treturn tmp;\n+}\n+\n+static __rte_always_inline uint32_t\n+__atomic_load_ex_32(volatile uint32_t *addr, int memorder)\n+{\n+\tuint32_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\ttmp = __atomic_load_n(addr, memorder);\n+\treturn tmp;\n+}\n+\n+static __rte_always_inline uint64_t\n+__atomic_load_ex_64(volatile uint64_t *addr, int memorder)\n+{\n+\tuint64_t tmp;\n+\tassert((memorder == __ATOMIC_ACQUIRE)\n+\t\t\t|| (memorder == __ATOMIC_RELAXED));\n+\ttmp = __atomic_load_n(addr, memorder);\n+\treturn tmp;\n+}\n+\n+static __rte_always_inline void\n+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n+int memorder)\n+{\n+\tif (__atomic_load_n(addr, memorder) != expected) {\n+\t\trte_sevl();\n+\t\tdo {\n+\t\t\trte_wfe();\n+\t\t} while (__atomic_load_ex_16(addr, memorder) != expected);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n+int memorder)\n+{\n+\tif (__atomic_load_ex_32(addr, memorder) != expected) {\n+\t\trte_sevl();\n+\t\tdo {\n+\t\t\trte_wfe();\n+\t\t} while (__atomic_load_ex_32(addr, memorder) != expected);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n+int memorder)\n+{\n+\tif (__atomic_load_ex_64(addr, memorder) != expected) {\n+\t\trte_sevl();\n+\t\tdo {\n+\t\t\trte_wfe();\n+\t\t} while (__atomic_load_ex_64(addr, memorder) != expected);\n+\t}\n+}\n+#endif\n+\n #endif /* _RTE_PAUSE_H_ */\n",
    "prefixes": [
        "v9",
        "2/5"
    ]
}