get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132807/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132807,
    "url": "http://patches.dpdk.org/api/patches/132807/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1697574677-16578-4-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1697574677-16578-4-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1697574677-16578-4-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-10-17T20:31:01",
    "name": "[v2,03/19] eal: use rte optional stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "897de5ab8d698b7e5b8f47521d22059fe4dd5ec1",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1697574677-16578-4-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29892,
            "url": "http://patches.dpdk.org/api/series/29892/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29892",
            "date": "2023-10-17T20:30:58",
            "name": "use rte optional stdatomic API",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/29892/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132807/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132807/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 94D0943190;\n\tTue, 17 Oct 2023 22:31:50 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 03AC742E14;\n\tTue, 17 Oct 2023 22:31:30 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 6810840A75\n for <dev@dpdk.org>; Tue, 17 Oct 2023 22:31:19 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id 8EB8B20B74C3; Tue, 17 Oct 2023 13:31:18 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com 8EB8B20B74C3",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1697574678;\n bh=dCxUZBtq3/V86NFcWw7F7aaKcfJwC/v0P68A8nSv4Gc=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=lz1V3SAY0krsU2wtDYU7BjgvV7hfp/OJM7iqA486KUmNR+VpQ+p7Ih1lmYsiMTF6G\n bHn2/YoV7KeYS+dD2yGN8C3iKgkah98cyDXte0JkN1bvs22N4J6gDNZPxh4lTucGI/\n bgiXnQYfNVjF2ksAEI+g5SX63tXklSi8jfECkryw=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "Akhil Goyal <gakhil@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Ciara Power <ciara.power@intel.com>,\n David Christensen <drc@linux.vnet.ibm.com>,\n David Hunt <david.hunt@intel.com>,\n Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,\n Dmitry Malloy <dmitrym@microsoft.com>,\n Elena Agostini <eagostini@nvidia.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Fan Zhang <fanzhang.oss@gmail.com>, Ferruh Yigit <ferruh.yigit@amd.com>,\n Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jerin Jacob <jerinj@marvell.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>,\n Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,\n Pallavi Kadam <pallavi.kadam@intel.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Reshma Pattan <reshma.pattan@intel.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Shijith Thotton <sthotton@marvell.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Yipeng Wang <yipeng1.wang@intel.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v2 03/19] eal: use rte optional stdatomic API",
        "Date": "Tue, 17 Oct 2023 13:31:01 -0700",
        "Message-Id": "<1697574677-16578-4-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>\n <1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding\nrte_atomic_xxx optional stdatomic API\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\n---\n lib/eal/common/eal_common_launch.c    |  10 +--\n lib/eal/common/eal_common_mcfg.c      |   2 +-\n lib/eal/common/eal_common_proc.c      |  14 ++--\n lib/eal/common/eal_common_thread.c    |  26 ++++----\n lib/eal/common/eal_common_trace.c     |   8 +--\n lib/eal/common/eal_common_trace_ctf.c |   4 +-\n lib/eal/common/eal_memcfg.h           |   2 +-\n lib/eal/common/eal_private.h          |   4 +-\n lib/eal/common/eal_trace.h            |   4 +-\n lib/eal/common/rte_service.c          | 122 +++++++++++++++++-----------------\n lib/eal/freebsd/eal.c                 |  20 +++---\n lib/eal/include/rte_epoll.h           |   3 +-\n lib/eal/linux/eal.c                   |  26 ++++----\n lib/eal/linux/eal_interrupts.c        |  42 ++++++------\n lib/eal/ppc/include/rte_atomic.h      |   6 +-\n lib/eal/windows/rte_thread.c          |   8 ++-\n 16 files changed, 152 insertions(+), 149 deletions(-)",
    "diff": "diff --git a/lib/eal/common/eal_common_launch.c b/lib/eal/common/eal_common_launch.c\nindex 0504598..5320c3b 100644\n--- a/lib/eal/common/eal_common_launch.c\n+++ b/lib/eal/common/eal_common_launch.c\n@@ -18,8 +18,8 @@\n int\n rte_eal_wait_lcore(unsigned worker_id)\n {\n-\twhile (__atomic_load_n(&lcore_config[worker_id].state,\n-\t\t\t__ATOMIC_ACQUIRE) != WAIT)\n+\twhile (rte_atomic_load_explicit(&lcore_config[worker_id].state,\n+\t\t\trte_memory_order_acquire) != WAIT)\n \t\trte_pause();\n \n \treturn lcore_config[worker_id].ret;\n@@ -38,8 +38,8 @@\n \t/* Check if the worker is in 'WAIT' state. Use acquire order\n \t * since 'state' variable is used as the guard variable.\n \t */\n-\tif (__atomic_load_n(&lcore_config[worker_id].state,\n-\t\t\t__ATOMIC_ACQUIRE) != WAIT)\n+\tif (rte_atomic_load_explicit(&lcore_config[worker_id].state,\n+\t\t\trte_memory_order_acquire) != WAIT)\n \t\tgoto finish;\n \n \tlcore_config[worker_id].arg = arg;\n@@ -47,7 +47,7 @@\n \t * before the worker thread starts running the function.\n \t * Use worker thread function as the guard variable.\n \t */\n-\t__atomic_store_n(&lcore_config[worker_id].f, f, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&lcore_config[worker_id].f, f, rte_memory_order_release);\n \n \trc = eal_thread_wake_worker(worker_id);\n \ndiff --git a/lib/eal/common/eal_common_mcfg.c b/lib/eal/common/eal_common_mcfg.c\nindex 2a785e7..dabb80e 100644\n--- a/lib/eal/common/eal_common_mcfg.c\n+++ b/lib/eal/common/eal_common_mcfg.c\n@@ -30,7 +30,7 @@\n \tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n \n \t/* wait until shared mem_config finish initialising */\n-\trte_wait_until_equal_32(&mcfg->magic, RTE_MAGIC, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_32(&mcfg->magic, RTE_MAGIC, rte_memory_order_relaxed);\n }\n \n int\ndiff --git a/lib/eal/common/eal_common_proc.c b/lib/eal/common/eal_common_proc.c\nindex f20a348..728815c 100644\n--- a/lib/eal/common/eal_common_proc.c\n+++ b/lib/eal/common/eal_common_proc.c\n@@ -33,7 +33,7 @@\n #include \"eal_filesystem.h\"\n #include \"eal_internal_cfg.h\"\n \n-static int mp_fd = -1;\n+static RTE_ATOMIC(int) mp_fd = -1;\n static rte_thread_t mp_handle_tid;\n static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */\n static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */\n@@ -404,7 +404,7 @@ struct pending_request {\n \tstruct sockaddr_un sa;\n \tint fd;\n \n-\twhile ((fd = __atomic_load_n(&mp_fd, __ATOMIC_RELAXED)) >= 0) {\n+\twhile ((fd = rte_atomic_load_explicit(&mp_fd, rte_memory_order_relaxed)) >= 0) {\n \t\tint ret;\n \n \t\tret = read_msg(fd, &msg, &sa);\n@@ -652,7 +652,7 @@ enum async_action {\n \t\tRTE_LOG(ERR, EAL, \"failed to create mp thread: %s\\n\",\n \t\t\tstrerror(errno));\n \t\tclose(dir_fd);\n-\t\tclose(__atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED));\n+\t\tclose(rte_atomic_exchange_explicit(&mp_fd, -1, rte_memory_order_relaxed));\n \t\treturn -1;\n \t}\n \n@@ -668,7 +668,7 @@ enum async_action {\n {\n \tint fd;\n \n-\tfd = __atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED);\n+\tfd = rte_atomic_exchange_explicit(&mp_fd, -1, rte_memory_order_relaxed);\n \tif (fd < 0)\n \t\treturn;\n \n@@ -1282,11 +1282,11 @@ enum mp_status {\n \n \texpected = MP_STATUS_UNKNOWN;\n \tdesired = status;\n-\tif (__atomic_compare_exchange_n(&mcfg->mp_status, &expected, desired,\n-\t\t\tfalse, __ATOMIC_RELAXED, __ATOMIC_RELAXED))\n+\tif (rte_atomic_compare_exchange_strong_explicit(&mcfg->mp_status, &expected, desired,\n+\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed))\n \t\treturn true;\n \n-\treturn __atomic_load_n(&mcfg->mp_status, __ATOMIC_RELAXED) == desired;\n+\treturn rte_atomic_load_explicit(&mcfg->mp_status, rte_memory_order_relaxed) == desired;\n }\n \n bool\ndiff --git a/lib/eal/common/eal_common_thread.c b/lib/eal/common/eal_common_thread.c\nindex 668b1ed..c422ea8 100644\n--- a/lib/eal/common/eal_common_thread.c\n+++ b/lib/eal/common/eal_common_thread.c\n@@ -191,8 +191,8 @@ unsigned rte_socket_id(void)\n \t\t/* Set the state to 'RUNNING'. Use release order\n \t\t * since 'state' variable is used as the guard variable.\n \t\t */\n-\t\t__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&lcore_config[lcore_id].state, RUNNING,\n+\t\t\trte_memory_order_release);\n \n \t\teal_thread_ack_command();\n \n@@ -201,8 +201,8 @@ unsigned rte_socket_id(void)\n \t\t * are accessed only after update to 'f' is visible.\n \t\t * Wait till the update to 'f' is visible to the worker.\n \t\t */\n-\t\twhile ((f = __atomic_load_n(&lcore_config[lcore_id].f,\n-\t\t\t\t__ATOMIC_ACQUIRE)) == NULL)\n+\t\twhile ((f = rte_atomic_load_explicit(&lcore_config[lcore_id].f,\n+\t\t\t\trte_memory_order_acquire)) == NULL)\n \t\t\trte_pause();\n \n \t\trte_eal_trace_thread_lcore_running(lcore_id, f);\n@@ -219,8 +219,8 @@ unsigned rte_socket_id(void)\n \t\t * are completed before the state is updated.\n \t\t * Use 'state' as the guard variable.\n \t\t */\n-\t\t__atomic_store_n(&lcore_config[lcore_id].state, WAIT,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&lcore_config[lcore_id].state, WAIT,\n+\t\t\trte_memory_order_release);\n \n \t\trte_eal_trace_thread_lcore_stopped(lcore_id);\n \t}\n@@ -242,7 +242,7 @@ struct control_thread_params {\n \t/* Control thread status.\n \t * If the status is CTRL_THREAD_ERROR, 'ret' has the error code.\n \t */\n-\tenum __rte_ctrl_thread_status status;\n+\tRTE_ATOMIC(enum __rte_ctrl_thread_status) status;\n };\n \n static int control_thread_init(void *arg)\n@@ -259,13 +259,13 @@ static int control_thread_init(void *arg)\n \tRTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY;\n \tparams->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset);\n \tif (params->ret != 0) {\n-\t\t__atomic_store_n(&params->status,\n-\t\t\tCTRL_THREAD_ERROR, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&params->status,\n+\t\t\tCTRL_THREAD_ERROR, rte_memory_order_release);\n \t\treturn 1;\n \t}\n \n-\t__atomic_store_n(&params->status,\n-\t\tCTRL_THREAD_RUNNING, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&params->status,\n+\t\tCTRL_THREAD_RUNNING, rte_memory_order_release);\n \n \treturn 0;\n }\n@@ -310,8 +310,8 @@ static uint32_t control_thread_start(void *arg)\n \n \t/* Wait for the control thread to initialize successfully */\n \twhile ((ctrl_thread_status =\n-\t\t\t__atomic_load_n(&params->status,\n-\t\t\t__ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) {\n+\t\t\trte_atomic_load_explicit(&params->status,\n+\t\t\trte_memory_order_acquire)) == CTRL_THREAD_LAUNCHING) {\n \t\trte_delay_us_sleep(1);\n \t}\n \ndiff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c\nindex d2eac2d..6ad87fc 100644\n--- a/lib/eal/common/eal_common_trace.c\n+++ b/lib/eal/common/eal_common_trace.c\n@@ -97,7 +97,7 @@ struct trace_point_head *\n bool\n rte_trace_is_enabled(void)\n {\n-\treturn __atomic_load_n(&trace.status, __ATOMIC_ACQUIRE) != 0;\n+\treturn rte_atomic_load_explicit(&trace.status, rte_memory_order_acquire) != 0;\n }\n \n static void\n@@ -157,7 +157,7 @@ rte_trace_mode rte_trace_mode_get(void)\n \tprev = rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_MASK,\n \t\trte_memory_order_release);\n \tif ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)\n-\t\t__atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_add_explicit(&trace.status, 1, rte_memory_order_release);\n \treturn 0;\n }\n \n@@ -172,7 +172,7 @@ rte_trace_mode rte_trace_mode_get(void)\n \tprev = rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_MASK,\n \t\trte_memory_order_release);\n \tif ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)\n-\t\t__atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_sub_explicit(&trace.status, 1, rte_memory_order_release);\n \treturn 0;\n }\n \n@@ -526,7 +526,7 @@ rte_trace_mode rte_trace_mode_get(void)\n \n \t/* Add the trace point at tail */\n \tSTAILQ_INSERT_TAIL(&tp_list, tp, next);\n-\t__atomic_thread_fence(__ATOMIC_RELEASE);\n+\t__atomic_thread_fence(rte_memory_order_release);\n \n \t/* All Good !!! */\n \treturn 0;\ndiff --git a/lib/eal/common/eal_common_trace_ctf.c b/lib/eal/common/eal_common_trace_ctf.c\nindex c6775c3..04c4f71 100644\n--- a/lib/eal/common/eal_common_trace_ctf.c\n+++ b/lib/eal/common/eal_common_trace_ctf.c\n@@ -361,10 +361,10 @@\n \tif (ctf_meta == NULL)\n \t\treturn -EINVAL;\n \n-\tif (!__atomic_load_n(&trace->ctf_fixup_done, __ATOMIC_SEQ_CST) &&\n+\tif (!rte_atomic_load_explicit(&trace->ctf_fixup_done, rte_memory_order_seq_cst) &&\n \t\t\t\trte_get_timer_hz()) {\n \t\tmeta_fixup(trace, ctf_meta);\n-\t\t__atomic_store_n(&trace->ctf_fixup_done, 1, __ATOMIC_SEQ_CST);\n+\t\trte_atomic_store_explicit(&trace->ctf_fixup_done, 1, rte_memory_order_seq_cst);\n \t}\n \n \trc = fprintf(f, \"%s\", ctf_meta);\ndiff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h\nindex d5c63e2..60e2089 100644\n--- a/lib/eal/common/eal_memcfg.h\n+++ b/lib/eal/common/eal_memcfg.h\n@@ -42,7 +42,7 @@ struct rte_mem_config {\n \trte_rwlock_t memory_hotplug_lock;\n \t/**< Indicates whether memory hotplug request is in progress. */\n \n-\tuint8_t mp_status; /**< Multiprocess status. */\n+\tRTE_ATOMIC(uint8_t) mp_status; /**< Multiprocess status. */\n \n \t/* memory segments and zones */\n \tstruct rte_fbarray memzones; /**< Memzone descriptors. */\ndiff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h\nindex ebd496b..4d2e806 100644\n--- a/lib/eal/common/eal_private.h\n+++ b/lib/eal/common/eal_private.h\n@@ -24,11 +24,11 @@ struct lcore_config {\n \tint pipe_main2worker[2];   /**< communication pipe with main */\n \tint pipe_worker2main[2];   /**< communication pipe with main */\n \n-\tlcore_function_t * volatile f; /**< function to call */\n+\tRTE_ATOMIC(lcore_function_t *) volatile f; /**< function to call */\n \tvoid * volatile arg;       /**< argument of function */\n \tvolatile int ret;          /**< return value of function */\n \n-\tvolatile enum rte_lcore_state_t state; /**< lcore state */\n+\tvolatile RTE_ATOMIC(enum rte_lcore_state_t) state; /**< lcore state */\n \tunsigned int socket_id;    /**< physical socket id for this lcore */\n \tunsigned int core_id;      /**< core number on socket for this lcore */\n \tint core_index;            /**< relative index, starting from 0 */\ndiff --git a/lib/eal/common/eal_trace.h b/lib/eal/common/eal_trace.h\nindex d66bcfe..ace2ef3 100644\n--- a/lib/eal/common/eal_trace.h\n+++ b/lib/eal/common/eal_trace.h\n@@ -50,7 +50,7 @@ struct trace_arg {\n struct trace {\n \tchar *dir;\n \tint register_errno;\n-\tuint32_t status;\n+\tRTE_ATOMIC(uint32_t) status;\n \tenum rte_trace_mode mode;\n \trte_uuid_t uuid;\n \tuint32_t buff_len;\n@@ -65,7 +65,7 @@ struct trace {\n \tuint32_t ctf_meta_offset_freq;\n \tuint32_t ctf_meta_offset_freq_off_s;\n \tuint32_t ctf_meta_offset_freq_off;\n-\tuint16_t ctf_fixup_done;\n+\tRTE_ATOMIC(uint16_t) ctf_fixup_done;\n \trte_spinlock_t lock;\n };\n \ndiff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c\nindex 9e2aa4a..3fc2b9a 100644\n--- a/lib/eal/common/rte_service.c\n+++ b/lib/eal/common/rte_service.c\n@@ -43,8 +43,8 @@ struct rte_service_spec_impl {\n \trte_spinlock_t execute_lock;\n \n \t/* API set/get-able variables */\n-\tint8_t app_runstate;\n-\tint8_t comp_runstate;\n+\tRTE_ATOMIC(int8_t) app_runstate;\n+\tRTE_ATOMIC(int8_t) comp_runstate;\n \tuint8_t internal_flags;\n \n \t/* per service statistics */\n@@ -52,24 +52,24 @@ struct rte_service_spec_impl {\n \t * It does not indicate the number of cores the service is running\n \t * on currently.\n \t */\n-\tuint32_t num_mapped_cores;\n+\tRTE_ATOMIC(uint32_t) num_mapped_cores;\n } __rte_cache_aligned;\n \n struct service_stats {\n-\tuint64_t calls;\n-\tuint64_t cycles;\n+\tRTE_ATOMIC(uint64_t) calls;\n+\tRTE_ATOMIC(uint64_t) cycles;\n };\n \n /* the internal values of a service core */\n struct core_state {\n \t/* map of services IDs are run on this core */\n \tuint64_t service_mask;\n-\tuint8_t runstate; /* running or stopped */\n-\tuint8_t thread_active; /* indicates when thread is in service_run() */\n+\tRTE_ATOMIC(uint8_t) runstate; /* running or stopped */\n+\tRTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in service_run() */\n \tuint8_t is_service_core; /* set if core is currently a service core */\n \tuint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];\n-\tuint64_t loops;\n-\tuint64_t cycles;\n+\tRTE_ATOMIC(uint64_t) loops;\n+\tRTE_ATOMIC(uint64_t) cycles;\n \tstruct service_stats service_stats[RTE_SERVICE_NUM_MAX];\n } __rte_cache_aligned;\n \n@@ -314,11 +314,11 @@ struct core_state {\n \t * service_run and service_runstate_get function.\n \t */\n \tif (runstate)\n-\t\t__atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING,\n+\t\t\trte_memory_order_release);\n \telse\n-\t\t__atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED,\n+\t\t\trte_memory_order_release);\n \n \treturn 0;\n }\n@@ -334,11 +334,11 @@ struct core_state {\n \t * service_run runstate_get function.\n \t */\n \tif (runstate)\n-\t\t__atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING,\n+\t\t\trte_memory_order_release);\n \telse\n-\t\t__atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED,\n+\t\t\trte_memory_order_release);\n \n \trte_eal_trace_service_runstate_set(id, runstate);\n \treturn 0;\n@@ -354,14 +354,14 @@ struct core_state {\n \t * Use load-acquire memory order. This synchronizes with\n \t * store-release in service state set functions.\n \t */\n-\tif (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==\n+\tif (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) ==\n \t\t\tRUNSTATE_RUNNING &&\n-\t    __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==\n+\t    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) ==\n \t\t\tRUNSTATE_RUNNING) {\n \t\tint check_disabled = !(s->internal_flags &\n \t\t\tSERVICE_F_START_CHECK);\n-\t\tint lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,\n-\t\t\t__ATOMIC_RELAXED) > 0);\n+\t\tint lcore_mapped = (rte_atomic_load_explicit(&s->num_mapped_cores,\n+\t\t\trte_memory_order_relaxed) > 0);\n \n \t\treturn (check_disabled | lcore_mapped);\n \t} else\n@@ -392,15 +392,15 @@ struct core_state {\n \t\t\tuint64_t end = rte_rdtsc();\n \t\t\tuint64_t cycles = end - start;\n \n-\t\t\t__atomic_store_n(&cs->cycles, cs->cycles + cycles,\n-\t\t\t\t__ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&service_stats->cycles,\n+\t\t\trte_atomic_store_explicit(&cs->cycles, cs->cycles + cycles,\n+\t\t\t\trte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&service_stats->cycles,\n \t\t\t\tservice_stats->cycles + cycles,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_memory_order_relaxed);\n \t\t}\n \n-\t\t__atomic_store_n(&service_stats->calls,\n-\t\t\tservice_stats->calls + 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&service_stats->calls,\n+\t\t\tservice_stats->calls + 1, rte_memory_order_relaxed);\n \t} else {\n \t\ts->spec.callback(userdata);\n \t}\n@@ -420,9 +420,9 @@ struct core_state {\n \t * Use load-acquire memory order. This synchronizes with\n \t * store-release in service state set functions.\n \t */\n-\tif (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=\n+\tif (rte_atomic_load_explicit(&s->comp_runstate, rte_memory_order_acquire) !=\n \t\t\tRUNSTATE_RUNNING ||\n-\t    __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=\n+\t    rte_atomic_load_explicit(&s->app_runstate, rte_memory_order_acquire) !=\n \t\t\tRUNSTATE_RUNNING ||\n \t    !(service_mask & (UINT64_C(1) << i))) {\n \t\tcs->service_active_on_lcore[i] = 0;\n@@ -472,11 +472,11 @@ struct core_state {\n \t/* Increment num_mapped_cores to reflect that this core is\n \t * now mapped capable of running the service.\n \t */\n-\t__atomic_fetch_add(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);\n \n \tint ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);\n \n-\t__atomic_fetch_sub(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, rte_memory_order_relaxed);\n \n \treturn ret;\n }\n@@ -489,13 +489,13 @@ struct core_state {\n \tconst int lcore = rte_lcore_id();\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\t__atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst);\n \n \t/* runstate act as the guard variable. Use load-acquire\n \t * memory order here to synchronize with store-release\n \t * in runstate update functions.\n \t */\n-\twhile (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==\n+\twhile (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==\n \t\t\tRUNSTATE_RUNNING) {\n \n \t\tconst uint64_t service_mask = cs->service_mask;\n@@ -513,7 +513,7 @@ struct core_state {\n \t\t\tservice_run(i, cs, service_mask, service_get(i), 1);\n \t\t}\n \n-\t\t__atomic_store_n(&cs->loops, cs->loops + 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&cs->loops, cs->loops + 1, rte_memory_order_relaxed);\n \t}\n \n \t/* Switch off this core for all services, to ensure that future\n@@ -526,7 +526,7 @@ struct core_state {\n \t * this store, ensuring that once this store is visible, the service\n \t * lcore thread really is done in service cores code.\n \t */\n-\t__atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&cs->thread_active, 0, rte_memory_order_seq_cst);\n \treturn 0;\n }\n \n@@ -539,8 +539,8 @@ struct core_state {\n \t/* Load thread_active using ACQUIRE to avoid instructions dependent on\n \t * the result being re-ordered before this load completes.\n \t */\n-\treturn __atomic_load_n(&lcore_states[lcore].thread_active,\n-\t\t\t       __ATOMIC_ACQUIRE);\n+\treturn rte_atomic_load_explicit(&lcore_states[lcore].thread_active,\n+\t\t\t       rte_memory_order_acquire);\n }\n \n int32_t\n@@ -646,13 +646,13 @@ struct core_state {\n \n \t\tif (*set && !lcore_mapped) {\n \t\t\tlcore_states[lcore].service_mask |= sid_mask;\n-\t\t\t__atomic_fetch_add(&rte_services[sid].num_mapped_cores,\n-\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores,\n+\t\t\t\t1, rte_memory_order_relaxed);\n \t\t}\n \t\tif (!*set && lcore_mapped) {\n \t\t\tlcore_states[lcore].service_mask &= ~(sid_mask);\n-\t\t\t__atomic_fetch_sub(&rte_services[sid].num_mapped_cores,\n-\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores,\n+\t\t\t\t1, rte_memory_order_relaxed);\n \t\t}\n \t}\n \n@@ -709,13 +709,13 @@ struct core_state {\n \t\t\t * store-release memory order here to synchronize\n \t\t\t * with load-acquire in runstate read functions.\n \t\t\t */\n-\t\t\t__atomic_store_n(&lcore_states[i].runstate,\n-\t\t\t\tRUNSTATE_STOPPED, __ATOMIC_RELEASE);\n+\t\t\trte_atomic_store_explicit(&lcore_states[i].runstate,\n+\t\t\t\tRUNSTATE_STOPPED, rte_memory_order_release);\n \t\t}\n \t}\n \tfor (i = 0; i < RTE_SERVICE_NUM_MAX; i++)\n-\t\t__atomic_store_n(&rte_services[i].num_mapped_cores, 0,\n-\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0,\n+\t\t\trte_memory_order_relaxed);\n \n \treturn 0;\n }\n@@ -735,8 +735,8 @@ struct core_state {\n \t/* Use store-release memory order here to synchronize with\n \t * load-acquire in runstate read functions.\n \t */\n-\t__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n-\t\t__ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n+\t\trte_memory_order_release);\n \n \treturn rte_eal_wait_lcore(lcore);\n }\n@@ -755,7 +755,7 @@ struct core_state {\n \t * memory order here to synchronize with store-release\n \t * in runstate update functions.\n \t */\n-\tif (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=\n+\tif (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) !=\n \t\t\tRUNSTATE_STOPPED)\n \t\treturn -EBUSY;\n \n@@ -779,7 +779,7 @@ struct core_state {\n \t * memory order here to synchronize with store-release\n \t * in runstate update functions.\n \t */\n-\tif (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==\n+\tif (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==\n \t\t\tRUNSTATE_RUNNING)\n \t\treturn -EALREADY;\n \n@@ -789,7 +789,7 @@ struct core_state {\n \t/* Use load-acquire memory order here to synchronize with\n \t * store-release in runstate update functions.\n \t */\n-\t__atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, rte_memory_order_release);\n \n \trte_eal_trace_service_lcore_start(lcore);\n \n@@ -808,7 +808,7 @@ struct core_state {\n \t * memory order here to synchronize with store-release\n \t * in runstate update functions.\n \t */\n-\tif (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==\n+\tif (rte_atomic_load_explicit(&lcore_states[lcore].runstate, rte_memory_order_acquire) ==\n \t\t\tRUNSTATE_STOPPED)\n \t\treturn -EALREADY;\n \n@@ -820,8 +820,8 @@ struct core_state {\n \t\tint32_t enabled = service_mask & (UINT64_C(1) << i);\n \t\tint32_t service_running = rte_service_runstate_get(i);\n \t\tint32_t only_core = (1 ==\n-\t\t\t__atomic_load_n(&rte_services[i].num_mapped_cores,\n-\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&rte_services[i].num_mapped_cores,\n+\t\t\t\trte_memory_order_relaxed));\n \n \t\t/* if the core is mapped, and the service is running, and this\n \t\t * is the only core that is mapped, the service would cease to\n@@ -834,8 +834,8 @@ struct core_state {\n \t/* Use store-release memory order here to synchronize with\n \t * load-acquire in runstate read functions.\n \t */\n-\t__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n-\t\t__ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n+\t\trte_memory_order_release);\n \n \trte_eal_trace_service_lcore_stop(lcore);\n \n@@ -847,7 +847,7 @@ struct core_state {\n {\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\treturn __atomic_load_n(&cs->loops, __ATOMIC_RELAXED);\n+\treturn rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed);\n }\n \n static uint64_t\n@@ -855,7 +855,7 @@ struct core_state {\n {\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\treturn __atomic_load_n(&cs->cycles, __ATOMIC_RELAXED);\n+\treturn rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed);\n }\n \n static uint64_t\n@@ -863,8 +863,8 @@ struct core_state {\n {\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\treturn __atomic_load_n(&cs->service_stats[service_id].calls,\n-\t\t__ATOMIC_RELAXED);\n+\treturn rte_atomic_load_explicit(&cs->service_stats[service_id].calls,\n+\t\trte_memory_order_relaxed);\n }\n \n static uint64_t\n@@ -872,8 +872,8 @@ struct core_state {\n {\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\treturn __atomic_load_n(&cs->service_stats[service_id].cycles,\n-\t\t__ATOMIC_RELAXED);\n+\treturn rte_atomic_load_explicit(&cs->service_stats[service_id].cycles,\n+\t\trte_memory_order_relaxed);\n }\n \n typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id,\ndiff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c\nindex 39a2868..568e06e 100644\n--- a/lib/eal/freebsd/eal.c\n+++ b/lib/eal/freebsd/eal.c\n@@ -597,8 +597,8 @@ static void rte_eal_init_alert(const char *msg)\n \t\treturn -1;\n \t}\n \n-\tif (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\trte_eal_init_alert(\"already called initialization.\");\n \t\trte_errno = EALREADY;\n \t\treturn -1;\n@@ -622,7 +622,7 @@ static void rte_eal_init_alert(const char *msg)\n \tif (fctret < 0) {\n \t\trte_eal_init_alert(\"Invalid 'command line' arguments.\");\n \t\trte_errno = EINVAL;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -636,20 +636,20 @@ static void rte_eal_init_alert(const char *msg)\n \tif (eal_plugins_init() < 0) {\n \t\trte_eal_init_alert(\"Cannot init plugins\");\n \t\trte_errno = EINVAL;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n \tif (eal_trace_init() < 0) {\n \t\trte_eal_init_alert(\"Cannot init trace\");\n \t\trte_errno = EFAULT;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n \tif (eal_option_device_parse()) {\n \t\trte_errno = ENODEV;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -683,7 +683,7 @@ static void rte_eal_init_alert(const char *msg)\n \tif (rte_bus_scan()) {\n \t\trte_eal_init_alert(\"Cannot scan the buses for devices\");\n \t\trte_errno = ENODEV;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -736,7 +736,7 @@ static void rte_eal_init_alert(const char *msg)\n \t\tif (ret < 0) {\n \t\t\trte_eal_init_alert(\"Cannot get hugepage information.\");\n \t\t\trte_errno = EACCES;\n-\t\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\t\treturn -1;\n \t\t}\n \t}\n@@ -915,8 +915,8 @@ static void rte_eal_init_alert(const char *msg)\n \tstatic uint32_t run_once;\n \tuint32_t has_run = 0;\n \n-\tif (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,\n-\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,\n+\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\tRTE_LOG(WARNING, EAL, \"Already called cleanup\\n\");\n \t\trte_errno = EALREADY;\n \t\treturn -1;\ndiff --git a/lib/eal/include/rte_epoll.h b/lib/eal/include/rte_epoll.h\nindex 01525f5..ae0cf20 100644\n--- a/lib/eal/include/rte_epoll.h\n+++ b/lib/eal/include/rte_epoll.h\n@@ -13,6 +13,7 @@\n \n #include <stdint.h>\n \n+#include <rte_stdatomic.h>\n \n #ifdef __cplusplus\n extern \"C\" {\n@@ -38,7 +39,7 @@ enum {\n \n /** interrupt epoll event obj, taken by epoll_event.ptr */\n struct rte_epoll_event {\n-\tuint32_t status;           /**< OUT: event status */\n+\tRTE_ATOMIC(uint32_t) status;           /**< OUT: event status */\n \tint fd;                    /**< OUT: event fd */\n \tint epfd;       /**< OUT: epoll instance the ev associated with */\n \tstruct rte_epoll_data epdata;\ndiff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c\nindex 5f4b2fb..57da058 100644\n--- a/lib/eal/linux/eal.c\n+++ b/lib/eal/linux/eal.c\n@@ -967,7 +967,7 @@ static void rte_eal_init_alert(const char *msg)\n rte_eal_init(int argc, char **argv)\n {\n \tint i, fctret, ret;\n-\tstatic uint32_t run_once;\n+\tstatic RTE_ATOMIC(uint32_t) run_once;\n \tuint32_t has_run = 0;\n \tchar cpuset[RTE_CPU_AFFINITY_STR_LEN];\n \tchar thread_name[RTE_THREAD_NAME_SIZE];\n@@ -983,8 +983,8 @@ static void rte_eal_init_alert(const char *msg)\n \t\treturn -1;\n \t}\n \n-\tif (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\trte_eal_init_alert(\"already called initialization.\");\n \t\trte_errno = EALREADY;\n \t\treturn -1;\n@@ -1008,14 +1008,14 @@ static void rte_eal_init_alert(const char *msg)\n \tif (fctret < 0) {\n \t\trte_eal_init_alert(\"Invalid 'command line' arguments.\");\n \t\trte_errno = EINVAL;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n \tif (eal_plugins_init() < 0) {\n \t\trte_eal_init_alert(\"Cannot init plugins\");\n \t\trte_errno = EINVAL;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -1027,7 +1027,7 @@ static void rte_eal_init_alert(const char *msg)\n \n \tif (eal_option_device_parse()) {\n \t\trte_errno = ENODEV;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -1061,7 +1061,7 @@ static void rte_eal_init_alert(const char *msg)\n \tif (rte_bus_scan()) {\n \t\trte_eal_init_alert(\"Cannot scan the buses for devices\");\n \t\trte_errno = ENODEV;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -1125,7 +1125,7 @@ static void rte_eal_init_alert(const char *msg)\n \t\tif (ret < 0) {\n \t\t\trte_eal_init_alert(\"Cannot get hugepage information.\");\n \t\t\trte_errno = EACCES;\n-\t\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\t\treturn -1;\n \t\t}\n \t}\n@@ -1150,7 +1150,7 @@ static void rte_eal_init_alert(const char *msg)\n \t\t\t internal_conf->syslog_facility) < 0) {\n \t\trte_eal_init_alert(\"Cannot init logging.\");\n \t\trte_errno = ENOMEM;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n@@ -1158,7 +1158,7 @@ static void rte_eal_init_alert(const char *msg)\n \tif (rte_eal_vfio_setup() < 0) {\n \t\trte_eal_init_alert(\"Cannot init VFIO\");\n \t\trte_errno = EAGAIN;\n-\t\t__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n #endif\n@@ -1345,11 +1345,11 @@ static void rte_eal_init_alert(const char *msg)\n int\n rte_eal_cleanup(void)\n {\n-\tstatic uint32_t run_once;\n+\tstatic RTE_ATOMIC(uint32_t) run_once;\n \tuint32_t has_run = 0;\n \n-\tif (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\tRTE_LOG(WARNING, EAL, \"Already called cleanup\\n\");\n \t\trte_errno = EALREADY;\n \t\treturn -1;\ndiff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c\nindex 24fff3d..d4919df 100644\n--- a/lib/eal/linux/eal_interrupts.c\n+++ b/lib/eal/linux/eal_interrupts.c\n@@ -1266,9 +1266,9 @@ struct rte_intr_source {\n \t\t * ordering below acting as a lock to synchronize\n \t\t * the event data updating.\n \t\t */\n-\t\tif (!rev || !__atomic_compare_exchange_n(&rev->status,\n-\t\t\t\t    &valid_status, RTE_EPOLL_EXEC, 0,\n-\t\t\t\t    __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n+\t\tif (!rev || !rte_atomic_compare_exchange_strong_explicit(&rev->status,\n+\t\t\t\t    &valid_status, RTE_EPOLL_EXEC,\n+\t\t\t\t    rte_memory_order_acquire, rte_memory_order_relaxed))\n \t\t\tcontinue;\n \n \t\tevents[count].status        = RTE_EPOLL_VALID;\n@@ -1283,8 +1283,8 @@ struct rte_intr_source {\n \t\t/* the status update should be observed after\n \t\t * the other fields change.\n \t\t */\n-\t\t__atomic_store_n(&rev->status, RTE_EPOLL_VALID,\n-\t\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&rev->status, RTE_EPOLL_VALID,\n+\t\t\t\trte_memory_order_release);\n \t\tcount++;\n \t}\n \treturn count;\n@@ -1374,10 +1374,10 @@ struct rte_intr_source {\n {\n \tuint32_t valid_status = RTE_EPOLL_VALID;\n \n-\twhile (!__atomic_compare_exchange_n(&ev->status, &valid_status,\n-\t\t    RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {\n-\t\twhile (__atomic_load_n(&ev->status,\n-\t\t\t\t__ATOMIC_RELAXED) != RTE_EPOLL_VALID)\n+\twhile (!rte_atomic_compare_exchange_strong_explicit(&ev->status, &valid_status,\n+\t\t    RTE_EPOLL_INVALID, rte_memory_order_acquire, rte_memory_order_relaxed)) {\n+\t\twhile (rte_atomic_load_explicit(&ev->status,\n+\t\t\t\trte_memory_order_relaxed) != RTE_EPOLL_VALID)\n \t\t\trte_pause();\n \t\tvalid_status = RTE_EPOLL_VALID;\n \t}\n@@ -1402,8 +1402,8 @@ struct rte_intr_source {\n \t\tepfd = rte_intr_tls_epfd();\n \n \tif (op == EPOLL_CTL_ADD) {\n-\t\t__atomic_store_n(&event->status, RTE_EPOLL_VALID,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&event->status, RTE_EPOLL_VALID,\n+\t\t\t\trte_memory_order_relaxed);\n \t\tevent->fd = fd;  /* ignore fd in event */\n \t\tevent->epfd = epfd;\n \t\tev.data.ptr = (void *)event;\n@@ -1415,13 +1415,13 @@ struct rte_intr_source {\n \t\t\top, fd, strerror(errno));\n \t\tif (op == EPOLL_CTL_ADD)\n \t\t\t/* rollback status when CTL_ADD fail */\n-\t\t\t__atomic_store_n(&event->status, RTE_EPOLL_INVALID,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&event->status, RTE_EPOLL_INVALID,\n+\t\t\t\t\trte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \n-\tif (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,\n-\t\t\t__ATOMIC_RELAXED) != RTE_EPOLL_INVALID)\n+\tif (op == EPOLL_CTL_DEL && rte_atomic_load_explicit(&event->status,\n+\t\t\trte_memory_order_relaxed) != RTE_EPOLL_INVALID)\n \t\teal_epoll_data_safe_free(event);\n \n \treturn 0;\n@@ -1450,8 +1450,8 @@ struct rte_intr_source {\n \tcase RTE_INTR_EVENT_ADD:\n \t\tepfd_op = EPOLL_CTL_ADD;\n \t\trev = rte_intr_elist_index_get(intr_handle, efd_idx);\n-\t\tif (__atomic_load_n(&rev->status,\n-\t\t\t\t__ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {\n+\t\tif (rte_atomic_load_explicit(&rev->status,\n+\t\t\t\trte_memory_order_relaxed) != RTE_EPOLL_INVALID) {\n \t\t\tRTE_LOG(INFO, EAL, \"Event already been added.\\n\");\n \t\t\treturn -EEXIST;\n \t\t}\n@@ -1474,8 +1474,8 @@ struct rte_intr_source {\n \tcase RTE_INTR_EVENT_DEL:\n \t\tepfd_op = EPOLL_CTL_DEL;\n \t\trev = rte_intr_elist_index_get(intr_handle, efd_idx);\n-\t\tif (__atomic_load_n(&rev->status,\n-\t\t\t\t__ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {\n+\t\tif (rte_atomic_load_explicit(&rev->status,\n+\t\t\t\trte_memory_order_relaxed) == RTE_EPOLL_INVALID) {\n \t\t\tRTE_LOG(INFO, EAL, \"Event does not exist.\\n\");\n \t\t\treturn -EPERM;\n \t\t}\n@@ -1500,8 +1500,8 @@ struct rte_intr_source {\n \n \tfor (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++) {\n \t\trev = rte_intr_elist_index_get(intr_handle, i);\n-\t\tif (__atomic_load_n(&rev->status,\n-\t\t\t\t__ATOMIC_RELAXED) == RTE_EPOLL_INVALID)\n+\t\tif (rte_atomic_load_explicit(&rev->status,\n+\t\t\t\trte_memory_order_relaxed) == RTE_EPOLL_INVALID)\n \t\t\tcontinue;\n \t\tif (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {\n \t\t\t/* force free if the entry valid */\ndiff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h\nindex 7382412..645c713 100644\n--- a/lib/eal/ppc/include/rte_atomic.h\n+++ b/lib/eal/ppc/include/rte_atomic.h\n@@ -48,7 +48,7 @@\n static inline int\n rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\treturn rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, rte_memory_order_acquire,\n \t\trte_memory_order_acquire) ? 1 : 0;\n }\n \n@@ -90,7 +90,7 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)\n static inline int\n rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\treturn rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, rte_memory_order_acquire,\n \t\trte_memory_order_acquire) ? 1 : 0;\n }\n \n@@ -132,7 +132,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)\n static inline int\n rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\treturn rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, rte_memory_order_acquire,\n \t\trte_memory_order_acquire) ? 1 : 0;\n }\n \ndiff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c\nindex acf6484..145ac4b 100644\n--- a/lib/eal/windows/rte_thread.c\n+++ b/lib/eal/windows/rte_thread.c\n@@ -9,6 +9,7 @@\n #include <rte_eal.h>\n #include <rte_common.h>\n #include <rte_errno.h>\n+#include <rte_stdatomic.h>\n #include <rte_thread.h>\n \n #include \"eal_windows.h\"\n@@ -19,7 +20,7 @@ struct eal_tls_key {\n \n struct thread_routine_ctx {\n \trte_thread_func thread_func;\n-\tbool thread_init_failed;\n+\tRTE_ATOMIC(bool) thread_init_failed;\n \tvoid *routine_args;\n };\n \n@@ -168,7 +169,8 @@ struct thread_routine_ctx {\n thread_func_wrapper(void *arg)\n {\n \tstruct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;\n-\tconst bool thread_exit = __atomic_load_n(&ctx.thread_init_failed, __ATOMIC_ACQUIRE);\n+\tconst bool thread_exit = rte_atomic_load_explicit(\n+\t\t&ctx.thread_init_failed, rte_memory_order_acquire);\n \n \tfree(arg);\n \n@@ -237,7 +239,7 @@ struct thread_routine_ctx {\n \t}\n \n resume_thread:\n-\t__atomic_store_n(&ctx->thread_init_failed, thread_exit, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&ctx->thread_init_failed, thread_exit, rte_memory_order_release);\n \n \tif (ResumeThread(thread_handle) == (DWORD)-1) {\n \t\tret = thread_log_last_error(\"ResumeThread()\");\n",
    "prefixes": [
        "v2",
        "03/19"
    ]
}