get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138917/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138917,
    "url": "http://patches.dpdk.org/api/patches/138917/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-42-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-42-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-42-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:54",
    "name": "[v3,41/45] app/test: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "d3fd2a0fa3de0e512e3e1c3afe3902d65f40e636",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-42-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138917/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/138917/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D7CDB43D55;\n\tWed, 27 Mar 2024 23:42:06 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2F9A242E4B;\n\tWed, 27 Mar 2024 23:38:55 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 61A5142D27\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:10 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id BBEF220E6F38; Wed, 27 Mar 2024 15:38:00 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com BBEF220E6F38",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579082;\n bh=yipTH2fzPByvru+YLpwgpOFk3q55g0RhfE+1UUqNVKg=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=AdNeMEYmRt3ROQt16Qg3fbBfREeYvQTxIG664DC8P1WiKL/Zh0Cd6XZYCrr87E96h\n X3+UuM8FEJnwij2BUwe4l/HWk53vBXElRoYOX/bEWcw7UZAdwI7WyvE99fXkdRACNp\n sfEtM50TQeLIMjuOg/NEDwuIAvqvO0p0JWOQzEFM=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 41/45] app/test: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:54 -0700",
        "Message-Id": "<1711579078-10624-42-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n app/test/test_bpf.c                    |  46 ++++++++-----\n app/test/test_distributor.c            | 114 ++++++++++++++++-----------------\n app/test/test_distributor_perf.c       |   4 +-\n app/test/test_func_reentrancy.c        |  28 ++++----\n app/test/test_hash_multiwriter.c       |  16 ++---\n app/test/test_hash_readwrite.c         |  74 ++++++++++-----------\n app/test/test_hash_readwrite_lf_perf.c |  88 ++++++++++++-------------\n app/test/test_lcores.c                 |  25 ++++----\n app/test/test_lpm_perf.c               |  14 ++--\n app/test/test_mcslock.c                |  12 ++--\n app/test/test_mempool_perf.c           |   9 +--\n app/test/test_pflock.c                 |  13 ++--\n app/test/test_pmd_perf.c               |  10 +--\n app/test/test_rcu_qsbr_perf.c          | 114 +++++++++++++++++----------------\n app/test/test_ring_perf.c              |  11 ++--\n app/test/test_ring_stress_impl.h       |  10 +--\n app/test/test_rwlock.c                 |   9 +--\n app/test/test_seqlock.c                |   6 +-\n app/test/test_service_cores.c          |  24 +++----\n app/test/test_spinlock.c               |   9 +--\n app/test/test_stack_perf.c             |  12 ++--\n app/test/test_threads.c                |  33 +++++-----\n app/test/test_ticketlock.c             |   9 +--\n app/test/test_timer.c                  |  31 +++++----\n 24 files changed, 378 insertions(+), 343 deletions(-)",
    "diff": "diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c\nindex 53e3a31..2e43442 100644\n--- a/app/test/test_bpf.c\n+++ b/app/test/test_bpf.c\n@@ -39,8 +39,8 @@\n  */\n \n struct dummy_offset {\n-\tuint64_t u64;\n-\tuint32_t u32;\n+\tRTE_ATOMIC(uint64_t) u64;\n+\tRTE_ATOMIC(uint32_t) u32;\n \tuint16_t u16;\n \tuint8_t  u8;\n };\n@@ -1581,32 +1581,46 @@ struct bpf_test {\n \tmemset(&dfe, 0, sizeof(dfe));\n \n \trv = 1;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = -1;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = (int32_t)TEST_FILL_1;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = TEST_MUL_1;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = TEST_MUL_2;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = TEST_JCC_2;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \trv = TEST_JCC_3;\n-\t__atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,\n+\t    rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,\n+\t    rte_memory_order_relaxed);\n \n \treturn cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));\n }\ndiff --git a/app/test/test_distributor.c b/app/test/test_distributor.c\nindex d2037b7..df871e3 100644\n--- a/app/test/test_distributor.c\n+++ b/app/test/test_distributor.c\n@@ -47,14 +47,14 @@ struct worker_params {\n struct worker_params worker_params;\n \n /* statics - all zero-initialized by default */\n-static volatile int quit;      /**< general quit variable for all threads */\n-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/\n-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/\n-static volatile unsigned worker_idx;\n-static volatile unsigned zero_idx;\n+static volatile RTE_ATOMIC(int) quit;      /**< general quit variable for all threads */\n+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/\n+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/\n+static volatile RTE_ATOMIC(unsigned int) worker_idx;\n+static volatile RTE_ATOMIC(unsigned int) zero_idx;\n \n struct worker_stats {\n-\tvolatile unsigned handled_packets;\n+\tvolatile RTE_ATOMIC(unsigned int) handled_packets;\n } __rte_cache_aligned;\n struct worker_stats worker_stats[RTE_MAX_LCORE];\n \n@@ -66,8 +66,8 @@ struct worker_stats {\n {\n \tunsigned i, count = 0;\n \tfor (i = 0; i < worker_idx; i++)\n-\t\tcount += __atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\tcount += rte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\trte_memory_order_relaxed);\n \treturn count;\n }\n \n@@ -77,8 +77,8 @@ struct worker_stats {\n {\n \tunsigned int i;\n \tfor (i = 0; i < RTE_MAX_LCORE; i++)\n-\t\t__atomic_store_n(&worker_stats[i].handled_packets, 0,\n-\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,\n+\t\t\trte_memory_order_relaxed);\n }\n \n /* this is the basic worker function for sanity test\n@@ -91,17 +91,17 @@ struct worker_stats {\n \tstruct worker_params *wp = arg;\n \tstruct rte_distributor *db = wp->dist;\n \tunsigned int num;\n-\tunsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);\n+\tunsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);\n \n \tnum = rte_distributor_get_pkt(db, id, buf, NULL, 0);\n \twhile (!quit) {\n-\t\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\t\trte_memory_order_relaxed);\n \t\tnum = rte_distributor_get_pkt(db, id,\n \t\t\t\tbuf, buf, num);\n \t}\n-\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t__ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\trte_memory_order_relaxed);\n \trte_distributor_return_pkt(db, id, buf, num);\n \treturn 0;\n }\n@@ -162,8 +162,8 @@ struct worker_stats {\n \n \tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t__atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\t\trte_memory_order_relaxed));\n \tprintf(\"Sanity test with all zero hashes done.\\n\");\n \n \t/* pick two flows and check they go correctly */\n@@ -189,9 +189,9 @@ struct worker_stats {\n \n \t\tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t\t__atomic_load_n(\n+\t\t\t\trte_atomic_load_explicit(\n \t\t\t\t\t&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\t\t\trte_memory_order_relaxed));\n \t\tprintf(\"Sanity test with two hash values done\\n\");\n \t}\n \n@@ -218,8 +218,8 @@ struct worker_stats {\n \n \tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t__atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\t\trte_memory_order_relaxed));\n \tprintf(\"Sanity test with non-zero hashes done\\n\");\n \n \trte_mempool_put_bulk(p, (void *)bufs, BURST);\n@@ -311,18 +311,18 @@ struct worker_stats {\n \tstruct rte_distributor *d = wp->dist;\n \tunsigned int i;\n \tunsigned int num;\n-\tunsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);\n+\tunsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);\n \n \tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \twhile (!quit) {\n-\t\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\t\trte_memory_order_relaxed);\n \t\tfor (i = 0; i < num; i++)\n \t\t\trte_pktmbuf_free(buf[i]);\n \t\tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \t}\n-\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t__ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\trte_memory_order_relaxed);\n \trte_distributor_return_pkt(d, id, buf, num);\n \treturn 0;\n }\n@@ -381,51 +381,51 @@ struct worker_stats {\n \tunsigned int num;\n \tunsigned int zero_id = 0;\n \tunsigned int zero_unset;\n-\tconst unsigned int id = __atomic_fetch_add(&worker_idx, 1,\n-\t\t\t__ATOMIC_RELAXED);\n+\tconst unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,\n+\t\t\trte_memory_order_relaxed);\n \n \tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \n \tif (num > 0) {\n \t\tzero_unset = RTE_MAX_LCORE;\n-\t\t__atomic_compare_exchange_n(&zero_idx, &zero_unset, id,\n-\t\t\tfalse, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);\n+\t\trte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,\n+\t\t\trte_memory_order_acq_rel, rte_memory_order_acquire);\n \t}\n-\tzero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);\n+\tzero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);\n \n \t/* wait for quit single globally, or for worker zero, wait\n \t * for zero_quit */\n \twhile (!quit && !(id == zero_id && zero_quit)) {\n-\t\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\t\trte_memory_order_relaxed);\n \t\tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \n \t\tif (num > 0) {\n \t\t\tzero_unset = RTE_MAX_LCORE;\n-\t\t\t__atomic_compare_exchange_n(&zero_idx, &zero_unset, id,\n-\t\t\t\tfalse, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);\n+\t\t\trte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,\n+\t\t\t\trte_memory_order_acq_rel, rte_memory_order_acquire);\n \t\t}\n-\t\tzero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);\n+\t\tzero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);\n \t}\n \n-\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t__ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\trte_memory_order_relaxed);\n \tif (id == zero_id) {\n \t\trte_distributor_return_pkt(d, id, NULL, 0);\n \n \t\t/* for worker zero, allow it to restart to pick up last packet\n \t\t * when all workers are shutting down.\n \t\t */\n-\t\t__atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);\n \t\twhile (zero_quit)\n \t\t\tusleep(100);\n-\t\t__atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);\n \n \t\tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \n \t\twhile (!quit) {\n-\t\t\t__atomic_fetch_add(&worker_stats[id].handled_packets,\n-\t\t\t\t\tnum, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,\n+\t\t\t\t\tnum, rte_memory_order_relaxed);\n \t\t\tnum = rte_distributor_get_pkt(d, id, buf, NULL, 0);\n \t\t}\n \t}\n@@ -491,17 +491,17 @@ struct worker_stats {\n \n \t/* flush the distributor */\n \trte_distributor_flush(d);\n-\twhile (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))\n+\twhile (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))\n \t\trte_distributor_flush(d);\n \n \tzero_quit = 0;\n-\twhile (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))\n+\twhile (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))\n \t\trte_delay_us(100);\n \n \tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t__atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\t\trte_memory_order_relaxed));\n \n \tif (total_packet_count() != BURST * 2) {\n \t\tprintf(\"Line %d: Error, not all packets flushed. \"\n@@ -560,18 +560,18 @@ struct worker_stats {\n \t/* flush the distributor */\n \trte_distributor_flush(d);\n \n-\twhile (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))\n+\twhile (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))\n \t\trte_distributor_flush(d);\n \n \tzero_quit = 0;\n \n-\twhile (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))\n+\twhile (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))\n \t\trte_delay_us(100);\n \n \tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t__atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\t\trte_memory_order_relaxed));\n \n \tif (total_packet_count() != BURST) {\n \t\tprintf(\"Line %d: Error, not all packets flushed. \"\n@@ -596,18 +596,18 @@ struct worker_stats {\n \tstruct worker_params *wp = arg;\n \tstruct rte_distributor *db = wp->dist;\n \tunsigned int num, i;\n-\tunsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);\n+\tunsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);\n \tnum = rte_distributor_get_pkt(db, id, buf, NULL, 0);\n \twhile (!quit) {\n-\t\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\t\trte_memory_order_relaxed);\n \t\tfor (i = 0; i < num; i++)\n \t\t\t*seq_field(buf[i]) += id + 1;\n \t\tnum = rte_distributor_get_pkt(db, id,\n \t\t\t\tbuf, buf, num);\n \t}\n-\t__atomic_fetch_add(&worker_stats[id].handled_packets, num,\n-\t\t\t__ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,\n+\t\t\trte_memory_order_relaxed);\n \trte_distributor_return_pkt(db, id, buf, num);\n \treturn 0;\n }\n@@ -679,8 +679,8 @@ struct worker_stats {\n \n \tfor (i = 0; i < rte_lcore_count() - 1; i++)\n \t\tprintf(\"Worker %u handled %u packets\\n\", i,\n-\t\t\t__atomic_load_n(&worker_stats[i].handled_packets,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\trte_atomic_load_explicit(&worker_stats[i].handled_packets,\n+\t\t\t\t\trte_memory_order_relaxed));\n \n \t/* Sort returned packets by sent order (sequence numbers). */\n \tfor (i = 0; i < buf_count; i++) {\ndiff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c\nindex ca86845..ba3cf26 100644\n--- a/app/test/test_distributor_perf.c\n+++ b/app/test/test_distributor_perf.c\n@@ -31,7 +31,7 @@\n \n /* static vars - zero initialized by default */\n static volatile int quit;\n-static volatile unsigned worker_idx;\n+static volatile RTE_ATOMIC(unsigned int) worker_idx;\n \n struct worker_stats {\n \tvolatile unsigned handled_packets;\n@@ -121,7 +121,7 @@ struct worker_stats {\n \tstruct rte_distributor *d = arg;\n \tunsigned int num = 0;\n \tint i;\n-\tunsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);\n+\tunsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);\n \tstruct rte_mbuf *buf[8] __rte_cache_aligned;\n \n \tfor (i = 0; i < 8; i++)\ndiff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c\nindex 9296de2..bae39af 100644\n--- a/app/test/test_func_reentrancy.c\n+++ b/app/test/test_func_reentrancy.c\n@@ -53,12 +53,13 @@\n \n #define MAX_LCORES\t(rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))\n \n-static uint32_t obj_count;\n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) obj_count;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n #define WAIT_SYNCHRO_FOR_WORKERS()   do { \\\n \tif (lcore_self != rte_get_main_lcore())                  \\\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \\\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \\\n+\t\t    rte_memory_order_relaxed); \\\n } while(0)\n \n /*\n@@ -71,7 +72,8 @@\n \n \tWAIT_SYNCHRO_FOR_WORKERS();\n \n-\t__atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */\n+\t/* silent the check in the caller */\n+\trte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \tif (rte_eal_init(0, NULL) != -1)\n \t\treturn -1;\n \n@@ -113,7 +115,7 @@\n \tfor (i = 0; i < MAX_ITER_ONCE; i++) {\n \t\trp = rte_ring_create(\"fr_test_once\", 4096, SOCKET_ID_ANY, 0);\n \t\tif (rp != NULL)\n-\t\t\t__atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \t}\n \n \t/* create/lookup new ring several times */\n@@ -178,7 +180,7 @@\n \t\t\t\t\tmy_obj_init, NULL,\n \t\t\t\t\tSOCKET_ID_ANY, 0);\n \t\tif (mp != NULL)\n-\t\t\t__atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \t}\n \n \t/* create/lookup new ring several times */\n@@ -244,7 +246,7 @@\n \tfor (i = 0; i < MAX_ITER_ONCE; i++) {\n \t\thandle = rte_hash_create(&hash_params);\n \t\tif (handle != NULL)\n-\t\t\t__atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \t}\n \n \t/* create multiple times simultaneously */\n@@ -311,7 +313,7 @@\n \tfor (i = 0; i < MAX_ITER_ONCE; i++) {\n \t\thandle = rte_fbk_hash_create(&fbk_params);\n \t\tif (handle != NULL)\n-\t\t\t__atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \t}\n \n \t/* create multiple fbk tables simultaneously */\n@@ -376,7 +378,7 @@\n \tfor (i = 0; i < MAX_ITER_ONCE; i++) {\n \t\tlpm = rte_lpm_create(\"fr_test_once\",  SOCKET_ID_ANY, &config);\n \t\tif (lpm != NULL)\n-\t\t\t__atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);\n \t}\n \n \t/* create multiple fbk tables simultaneously */\n@@ -437,8 +439,8 @@ struct test_case test_cases[] = {\n \tif (pt_case->func == NULL)\n \t\treturn -1;\n \n-\t__atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \n \tcores = RTE_MIN(rte_lcore_count(), MAX_LCORES);\n \tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n@@ -448,7 +450,7 @@ struct test_case test_cases[] = {\n \t\trte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);\n \t}\n \n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \n \tif (pt_case->func(pt_case->arg) < 0)\n \t\tret = -1;\n@@ -463,7 +465,7 @@ struct test_case test_cases[] = {\n \t\t\tpt_case->clean(lcore_id);\n \t}\n \n-\tcount = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);\n+\tcount = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);\n \tif (count != 1) {\n \t\tprintf(\"%s: common object allocated %d times (should be 1)\\n\",\n \t\t\tpt_case->name, count);\ndiff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c\nindex ed9dd41..33d3147 100644\n--- a/app/test/test_hash_multiwriter.c\n+++ b/app/test/test_hash_multiwriter.c\n@@ -43,8 +43,8 @@ struct {\n const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;\n uint32_t rounded_nb_total_tsx_insertion;\n \n-static uint64_t gcycles;\n-static uint64_t ginsertions;\n+static RTE_ATOMIC(uint64_t) gcycles;\n+static RTE_ATOMIC(uint64_t) ginsertions;\n \n static int use_htm;\n \n@@ -84,8 +84,8 @@ struct {\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);\n \n \tfor (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)\n \t\ttbl_multiwriter_test_params.keys[i]\n@@ -166,8 +166,8 @@ struct {\n \n \ttbl_multiwriter_test_params.found = found;\n \n-\t__atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);\n \n \t/* Get list of enabled cores */\n \ti = 0;\n@@ -233,8 +233,8 @@ struct {\n \tprintf(\"No key corrupted during multiwriter insertion.\\n\");\n \n \tunsigned long long int cycles_per_insertion =\n-\t\t__atomic_load_n(&gcycles, __ATOMIC_RELAXED)/\n-\t\t__atomic_load_n(&ginsertions, __ATOMIC_RELAXED);\n+\t\trte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/\n+\t\trte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);\n \n \tprintf(\" cycles per insertion: %llu\\n\", cycles_per_insertion);\n \ndiff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c\nindex 4997a01..1867376 100644\n--- a/app/test/test_hash_readwrite.c\n+++ b/app/test/test_hash_readwrite.c\n@@ -45,14 +45,14 @@ struct {\n \tstruct rte_hash *h;\n } tbl_rw_test_param;\n \n-static uint64_t gcycles;\n-static uint64_t ginsertions;\n+static RTE_ATOMIC(uint64_t) gcycles;\n+static RTE_ATOMIC(uint64_t) ginsertions;\n \n-static uint64_t gread_cycles;\n-static uint64_t gwrite_cycles;\n+static RTE_ATOMIC(uint64_t) gread_cycles;\n+static RTE_ATOMIC(uint64_t) gwrite_cycles;\n \n-static uint64_t greads;\n-static uint64_t gwrites;\n+static RTE_ATOMIC(uint64_t) greads;\n+static RTE_ATOMIC(uint64_t) gwrites;\n \n static int\n test_hash_readwrite_worker(__rte_unused void *arg)\n@@ -110,8 +110,8 @@ struct {\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);\n \n \tfor (; i < offset + tbl_rw_test_param.num_insert; i++)\n \t\ttbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;\n@@ -209,8 +209,8 @@ struct {\n \tint worker_cnt = rte_lcore_count() - 1;\n \tuint32_t tot_insert = 0;\n \n-\t__atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);\n \n \tif (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)\n \t\tgoto err;\n@@ -269,8 +269,8 @@ struct {\n \tprintf(\"No key corrupted during read-write test.\\n\");\n \n \tunsigned long long int cycles_per_insertion =\n-\t\t__atomic_load_n(&gcycles, __ATOMIC_RELAXED) /\n-\t\t__atomic_load_n(&ginsertions, __ATOMIC_RELAXED);\n+\t\trte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /\n+\t\trte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);\n \n \tprintf(\"cycles per insertion and lookup: %llu\\n\", cycles_per_insertion);\n \n@@ -310,8 +310,8 @@ struct {\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -344,9 +344,9 @@ struct {\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,\n-\t\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,\n+\t\t\t\t\t\t\trte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -369,11 +369,11 @@ struct {\n \n \tuint64_t start = 0, end = 0;\n \n-\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n \n-\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);\n \n \tif (init_params(0, use_htm, 0, use_jhash) != 0)\n \t\tgoto err;\n@@ -430,10 +430,10 @@ struct {\n \t\tif (tot_worker_lcore < core_cnt[n] * 2)\n \t\t\tgoto finish;\n \n-\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);\n \n \t\trte_hash_reset(tbl_rw_test_param.h);\n \n@@ -475,8 +475,8 @@ struct {\n \n \t\tif (reader_faster) {\n \t\t\tunsigned long long int cycles_per_insertion =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /\n-\t\t\t\t__atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /\n+\t\t\t\trte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\tperf_results->read_only[n] = cycles_per_insertion;\n \t\t\tprintf(\"Reader only: cycles per lookup: %llu\\n\",\n \t\t\t\t\t\t\tcycles_per_insertion);\n@@ -484,17 +484,17 @@ struct {\n \n \t\telse {\n \t\t\tunsigned long long int cycles_per_insertion =\n-\t\t\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n-\t\t\t\t__atomic_load_n(&gwrites, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /\n+\t\t\t\trte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);\n \t\t\tperf_results->write_only[n] = cycles_per_insertion;\n \t\t\tprintf(\"Writer only: cycles per writes: %llu\\n\",\n \t\t\t\t\t\t\tcycles_per_insertion);\n \t\t}\n \n-\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);\n \n \t\trte_hash_reset(tbl_rw_test_param.h);\n \n@@ -569,8 +569,8 @@ struct {\n \n \t\tif (reader_faster) {\n \t\t\tunsigned long long int cycles_per_insertion =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /\n-\t\t\t\t__atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /\n+\t\t\t\trte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\tperf_results->read_write_r[n] = cycles_per_insertion;\n \t\t\tprintf(\"Read-write cycles per lookup: %llu\\n\",\n \t\t\t\t\t\t\tcycles_per_insertion);\n@@ -578,8 +578,8 @@ struct {\n \n \t\telse {\n \t\t\tunsigned long long int cycles_per_insertion =\n-\t\t\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n-\t\t\t\t__atomic_load_n(&gwrites, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /\n+\t\t\t\trte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);\n \t\t\tperf_results->read_write_w[n] = cycles_per_insertion;\n \t\t\tprintf(\"Read-write cycles per writes: %llu\\n\",\n \t\t\t\t\t\t\tcycles_per_insertion);\ndiff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c\nindex 5d18850..4523985 100644\n--- a/app/test/test_hash_readwrite_lf_perf.c\n+++ b/app/test/test_hash_readwrite_lf_perf.c\n@@ -86,10 +86,10 @@ struct rwc_perf {\n \tstruct rte_hash *h;\n } tbl_rwc_test_param;\n \n-static uint64_t gread_cycles;\n-static uint64_t greads;\n-static uint64_t gwrite_cycles;\n-static uint64_t gwrites;\n+static RTE_ATOMIC(uint64_t) gread_cycles;\n+static RTE_ATOMIC(uint64_t) greads;\n+static RTE_ATOMIC(uint64_t) gwrite_cycles;\n+static RTE_ATOMIC(uint64_t) gwrites;\n \n static volatile uint8_t writer_done;\n \n@@ -651,8 +651,8 @@ struct rwc_perf {\n \t} while (!writer_done);\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -724,8 +724,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twriter_done = 0;\n@@ -742,8 +742,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_no_ks_r_hit[m][n]\n \t\t\t\t\t\t= cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n@@ -791,8 +791,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twriter_done = 0;\n@@ -811,8 +811,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_no_ks_r_miss[m][n]\n \t\t\t\t\t\t= cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n@@ -861,8 +861,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twriter_done = 0;\n@@ -884,8 +884,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_ks_r_hit_nsp[m][n]\n \t\t\t\t\t\t= cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n@@ -935,8 +935,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twriter_done = 0;\n@@ -958,8 +958,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_ks_r_hit_sp[m][n]\n \t\t\t\t\t\t= cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n@@ -1007,8 +1007,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twriter_done = 0;\n@@ -1030,8 +1030,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n \t\t}\n@@ -1087,9 +1087,9 @@ struct rwc_perf {\n \t\t\t\tprintf(\"\\nNumber of readers: %u\\n\",\n \t\t\t\t       rwc_core_cnt[n]);\n \n-\t\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t\t__atomic_store_n(&gread_cycles, 0,\n-\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\t\trte_atomic_store_explicit(&gread_cycles, 0,\n+\t\t\t\t\t\t rte_memory_order_relaxed);\n \n \t\t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\t\twriter_done = 0;\n@@ -1127,10 +1127,10 @@ struct rwc_perf {\n \t\t\t\t\t\tgoto err;\n \n \t\t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t\t__atomic_load_n(&gread_cycles,\n-\t\t\t\t\t\t\t__ATOMIC_RELAXED) /\n-\t\t\t\t\t__atomic_load_n(&greads,\n-\t\t\t\t\t\t\t  __ATOMIC_RELAXED);\n+\t\t\t\t\trte_atomic_load_explicit(&gread_cycles,\n+\t\t\t\t\t\t\trte_memory_order_relaxed) /\n+\t\t\t\t\trte_atomic_load_explicit(&greads,\n+\t\t\t\t\t\t\t  rte_memory_order_relaxed);\n \t\t\t\trwc_perf_results->multi_rw[m][k][n]\n \t\t\t\t\t= cycles_per_lookup;\n \t\t\t\tprintf(\"Cycles per lookup: %llu\\n\",\n@@ -1178,8 +1178,8 @@ struct rwc_perf {\n \n \t\t\tprintf(\"\\nNumber of readers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t\t__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);\n-\t\t\t__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);\n+\t\t\trte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);\n \n \t\t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\t\twrite_type = WRITE_NO_KEY_SHIFT;\n@@ -1210,8 +1210,8 @@ struct rwc_perf {\n \t\t\t\t\tgoto err;\n \n \t\t\tunsigned long long cycles_per_lookup =\n-\t\t\t\t__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)\n-\t\t\t\t/ __atomic_load_n(&greads, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)\n+\t\t\t\t/ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);\n \t\t\trwc_perf_results->w_ks_r_hit_extbkt[m][n]\n \t\t\t\t\t\t= cycles_per_lookup;\n \t\t\tprintf(\"Cycles per lookup: %llu\\n\", cycles_per_lookup);\n@@ -1280,9 +1280,9 @@ struct rwc_perf {\n \t\t\t\ttbl_rwc_test_param.keys_no_ks + i);\n \t}\n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,\n-\t\t\t   __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,\n+\t\t\t   rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -1328,8 +1328,8 @@ struct rwc_perf {\n \t\t\t\trwc_core_cnt[n];\n \t\tprintf(\"\\nNumber of writers: %u\\n\", rwc_core_cnt[n]);\n \n-\t\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n-\t\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);\n+\t\trte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);\n \n \t\trte_hash_reset(tbl_rwc_test_param.h);\n \t\trte_rcu_qsbr_init(rv, RTE_MAX_LCORE);\n@@ -1364,8 +1364,8 @@ struct rwc_perf {\n \t\trte_eal_mp_wait_lcore();\n \n \t\tunsigned long long cycles_per_write_operation =\n-\t\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n-\t\t\t__atomic_load_n(&gwrites, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /\n+\t\t\trte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);\n \t\trwc_perf_results->writer_add_del[n]\n \t\t\t\t\t= cycles_per_write_operation;\n \t\tprintf(\"Cycles per write operation: %llu\\n\",\ndiff --git a/app/test/test_lcores.c b/app/test/test_lcores.c\nindex 3434a0d..bd5c0dd 100644\n--- a/app/test/test_lcores.c\n+++ b/app/test/test_lcores.c\n@@ -10,6 +10,7 @@\n #include <rte_errno.h>\n #include <rte_lcore.h>\n #include <rte_thread.h>\n+#include <rte_stdatomic.h>\n \n #include \"test.h\"\n \n@@ -25,7 +26,7 @@ struct thread_context {\n \tenum { Thread_INIT, Thread_ERROR, Thread_DONE } state;\n \tbool lcore_id_any;\n \trte_thread_t id;\n-\tunsigned int *registered_count;\n+\tRTE_ATOMIC(unsigned int) *registered_count;\n };\n \n static uint32_t thread_loop(void *arg)\n@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)\n \t\tt->state = Thread_ERROR;\n \t}\n \t/* Report register happened to the control thread. */\n-\t__atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);\n \n \t/* Wait for release from the control thread. */\n-\twhile (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)\n+\twhile (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)\n \t\tsched_yield();\n \trte_thread_unregister();\n \tlcore_id = rte_lcore_id();\n@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)\n {\n \tstruct thread_context thread_contexts[RTE_MAX_LCORE];\n \tunsigned int non_eal_threads_count;\n-\tunsigned int registered_count;\n+\tRTE_ATOMIC(unsigned int) registered_count;\n \tstruct thread_context *t;\n \tunsigned int i;\n \tint ret;\n@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)\n \t}\n \tprintf(\"non-EAL threads count: %u\\n\", non_eal_threads_count);\n \t/* Wait all non-EAL threads to register. */\n-\twhile (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=\n+\twhile (rte_atomic_load_explicit(&registered_count, rte_memory_order_acquire) !=\n \t\t\tnon_eal_threads_count)\n \t\tsched_yield();\n \n@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)\n \tif (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {\n \t\tnon_eal_threads_count++;\n \t\tprintf(\"non-EAL threads count: %u\\n\", non_eal_threads_count);\n-\t\twhile (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=\n+\t\twhile (rte_atomic_load_explicit(&registered_count, rte_memory_order_acquire) !=\n \t\t\t\tnon_eal_threads_count)\n \t\t\tsched_yield();\n \t}\n \n skip_lcore_any:\n \t/* Release all threads, and check their states. */\n-\t__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&registered_count, 0, rte_memory_order_release);\n \tret = 0;\n \tfor (i = 0; i < non_eal_threads_count; i++) {\n \t\tt = &thread_contexts[i];\n@@ -225,7 +226,7 @@ struct limit_lcore_context {\n \tstruct thread_context thread_contexts[2];\n \tunsigned int non_eal_threads_count = 0;\n \tstruct limit_lcore_context l[2] = {};\n-\tunsigned int registered_count = 0;\n+\tRTE_ATOMIC(unsigned int) registered_count = 0;\n \tstruct thread_context *t;\n \tvoid *handle[2] = {};\n \tunsigned int i;\n@@ -275,7 +276,7 @@ struct limit_lcore_context {\n \tif (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)\n \t\tgoto cleanup_threads;\n \tnon_eal_threads_count++;\n-\twhile (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=\n+\twhile (rte_atomic_load_explicit(&registered_count, rte_memory_order_acquire) !=\n \t\t\tnon_eal_threads_count)\n \t\tsched_yield();\n \tif (l[0].init != eal_threads_count + 1 ||\n@@ -298,7 +299,7 @@ struct limit_lcore_context {\n \tif (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)\n \t\tgoto cleanup_threads;\n \tnon_eal_threads_count++;\n-\twhile (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=\n+\twhile (rte_atomic_load_explicit(&registered_count, rte_memory_order_acquire) !=\n \t\t\tnon_eal_threads_count)\n \t\tsched_yield();\n \tif (l[0].init != eal_threads_count + 2 ||\n@@ -315,7 +316,7 @@ struct limit_lcore_context {\n \t}\n \trte_lcore_dump(stdout);\n \t/* Release all threads, and check their states. */\n-\t__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&registered_count, 0, rte_memory_order_release);\n \tret = 0;\n \tfor (i = 0; i < non_eal_threads_count; i++) {\n \t\tt = &thread_contexts[i];\n@@ -337,7 +338,7 @@ struct limit_lcore_context {\n \n cleanup_threads:\n \t/* Release all threads */\n-\t__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&registered_count, 0, rte_memory_order_release);\n \tfor (i = 0; i < non_eal_threads_count; i++) {\n \t\tt = &thread_contexts[i];\n \t\trte_thread_join(t->id, NULL);\ndiff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c\nindex 82daf9e..bc4bdde 100644\n--- a/app/test/test_lpm_perf.c\n+++ b/app/test/test_lpm_perf.c\n@@ -22,8 +22,8 @@\n struct rte_lpm *lpm;\n static struct rte_rcu_qsbr *rv;\n static volatile uint8_t writer_done;\n-static volatile uint32_t thr_id;\n-static uint64_t gwrite_cycles;\n+static volatile RTE_ATOMIC(uint32_t) thr_id;\n+static RTE_ATOMIC(uint64_t) gwrite_cycles;\n static uint32_t num_writers;\n \n /* LPM APIs are not thread safe, use spinlock */\n@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)\n {\n \tuint32_t tmp_thr_id;\n \n-\ttmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);\n+\ttmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);\n \tif (tmp_thr_id >= RTE_MAX_LCORE)\n \t\tprintf(\"Invalid thread id %u\\n\", tmp_thr_id);\n \n@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)\n \n \ttotal_cycles = rte_rdtsc_precise() - begin;\n \n-\t__atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);\n \n \treturn 0;\n \n@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)\n \t\t\treader_f = test_lpm_reader;\n \n \t\twriter_done = 0;\n-\t\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);\n \n-\t\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\t\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);\n \n \t\t/* Launch reader threads */\n \t\tfor (i = j; i < num_cores; i++)\n@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)\n \t\tprintf(\"Total LPM Adds: %d\\n\", TOTAL_WRITES);\n \t\tprintf(\"Total LPM Deletes: %d\\n\", TOTAL_WRITES);\n \t\tprintf(\"Average LPM Add/Del: %\"PRIu64\" cycles\\n\",\n-\t\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)\n+\t\t\trte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)\n \t\t\t/ TOTAL_WRITES);\n \n \t\twriter_done = 1;\ndiff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c\nindex 46ff13c..8fcbc11 100644\n--- a/app/test/test_mcslock.c\n+++ b/app/test/test_mcslock.c\n@@ -42,7 +42,7 @@\n \n static unsigned int count;\n \n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n static int\n test_mcslock_per_core(__rte_unused void *arg)\n@@ -75,7 +75,7 @@\n \trte_mcslock_t ml_perf_me;\n \n \t/* wait synchro */\n-\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (lcount < MAX_LOOP) {\n@@ -100,14 +100,14 @@\n \tconst unsigned int lcore = rte_lcore_id();\n \n \tprintf(\"\\nTest with no lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n \tmemset(time_count, 0, sizeof(time_count));\n \n \tprintf(\"\\nTest with lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tlock = 1;\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n@@ -116,11 +116,11 @@\n \n \tprintf(\"\\nTest with lock on %u cores...\\n\", (rte_lcore_count()));\n \n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c\nindex 96de347..35f0597 100644\n--- a/app/test/test_mempool_perf.c\n+++ b/app/test/test_mempool_perf.c\n@@ -88,7 +88,7 @@\n static int use_external_cache;\n static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;\n \n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n /* number of objects in one bulk operation (get or put) */\n static unsigned n_get_bulk;\n@@ -188,7 +188,8 @@ struct mempool_test_stats {\n \n \t/* wait synchro for workers */\n \tif (lcore_id != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tstart_cycles = rte_get_timer_cycles();\n \n@@ -233,7 +234,7 @@ struct mempool_test_stats {\n \tint ret;\n \tunsigned cores_save = cores;\n \n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \n \t/* reset stats */\n \tmemset(stats, 0, sizeof(stats));\n@@ -258,7 +259,7 @@ struct mempool_test_stats {\n \t}\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \n \tret = per_lcore_mempool_test(mp);\n \ndiff --git a/app/test/test_pflock.c b/app/test/test_pflock.c\nindex 5f77b15..d989a68 100644\n--- a/app/test/test_pflock.c\n+++ b/app/test/test_pflock.c\n@@ -31,7 +31,7 @@\n \n static rte_pflock_t sl;\n static rte_pflock_t sl_tab[RTE_MAX_LCORE];\n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n static int\n test_pflock_per_core(__rte_unused void *arg)\n@@ -69,7 +69,8 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcount < MAX_LOOP) {\n@@ -99,7 +100,7 @@\n \tconst unsigned int lcore = rte_lcore_id();\n \n \tprintf(\"\\nTest with no lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n@@ -107,7 +108,7 @@\n \n \tprintf(\"\\nTest with phase-fair lock on single core...\\n\");\n \tlock = 1;\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n@@ -116,12 +117,12 @@\n \tprintf(\"\\nPhase-fair test on %u cores...\\n\", rte_lcore_count());\n \n \t/* clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \tif (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)\n \t\treturn -1;\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c\nindex f6d97f2..46ae80d 100644\n--- a/app/test/test_pmd_perf.c\n+++ b/app/test/test_pmd_perf.c\n@@ -537,7 +537,7 @@ enum {\n \treturn 0;\n }\n \n-static uint64_t start;\n+static RTE_ATOMIC(uint64_t) start;\n \n static inline int\n poll_burst(void *args)\n@@ -575,7 +575,7 @@ enum {\n \t\tnum[portid] = pkt_per_port;\n \t}\n \n-\trte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);\n \n \tcur_tsc = rte_rdtsc();\n \twhile (total) {\n@@ -629,9 +629,9 @@ enum {\n \n \t/* only when polling first */\n \tif (flags == SC_BURST_POLL_FIRST)\n-\t\t__atomic_store_n(&start, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);\n \telse\n-\t\t__atomic_store_n(&start, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);\n \n \t/* start polling thread\n \t * if in POLL_FIRST mode, poll once launched;\n@@ -655,7 +655,7 @@ enum {\n \n \t/* only when polling second  */\n \tif (flags == SC_BURST_XMIT_FIRST)\n-\t\t__atomic_store_n(&start, 1, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&start, 1, rte_memory_order_release);\n \n \t/* wait for polling finished */\n \tdiff_tsc = rte_eal_wait_lcore(lcore);\ndiff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c\nindex ce88a73..d1bf5c5 100644\n--- a/app/test/test_rcu_qsbr_perf.c\n+++ b/app/test/test_rcu_qsbr_perf.c\n@@ -25,13 +25,15 @@\n static uint32_t *hash_data[TOTAL_ENTRY];\n static volatile uint8_t writer_done;\n static volatile uint8_t all_registered;\n-static volatile uint32_t thr_id;\n+static volatile RTE_ATOMIC(uint32_t) thr_id;\n \n static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];\n static struct rte_hash *h;\n static char hash_name[8];\n-static uint64_t updates, checks;\n-static uint64_t update_cycles, check_cycles;\n+static RTE_ATOMIC(uint64_t) updates;\n+static RTE_ATOMIC(uint64_t) checks;\n+static RTE_ATOMIC(uint64_t) update_cycles;\n+static RTE_ATOMIC(uint64_t) check_cycles;\n \n /* Scale down results to 1000 operations to support lower\n  * granularity clocks.\n@@ -44,7 +46,7 @@\n {\n \tuint32_t tmp_thr_id;\n \n-\ttmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);\n+\ttmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);\n \tif (tmp_thr_id >= RTE_MAX_LCORE)\n \t\tprintf(\"Invalid thread id %u\\n\", tmp_thr_id);\n \n@@ -81,8 +83,8 @@\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);\n \n \t/* Make the thread offline */\n \trte_rcu_qsbr_thread_offline(t[0], thread_id);\n@@ -113,8 +115,8 @@\n \t} while (loop_cnt < 20000000);\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -130,15 +132,15 @@\n \n \twriter_done = 0;\n \n-\t__atomic_store_n(&updates, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&checks, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);\n \n \tprintf(\"\\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\\n\",\n \t\tnum_cores - 1);\n \n-\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);\n \n \tif (all_registered == 1)\n \t\ttmp_num_cores = num_cores - 1;\n@@ -168,15 +170,16 @@\n \trte_eal_mp_wait_lcore();\n \n \tprintf(\"Total quiescent state updates = %\"PRIi64\"\\n\",\n-\t\t__atomic_load_n(&updates, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&updates, rte_memory_order_relaxed));\n \tprintf(\"Cycles per %d quiescent state updates: %\"PRIi64\"\\n\",\n \t\tRCU_SCALE_DOWN,\n-\t\t__atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /\n-\t\t(__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));\n-\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", __atomic_load_n(&checks, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /\n+\t\t(rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));\n+\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", rte_atomic_load_explicit(&checks,\n+\t    rte_memory_order_relaxed));\n \tprintf(\"Cycles per %d checks: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n-\t\t__atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /\n-\t\t(__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));\n+\t\trte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /\n+\t\t(rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));\n \n \trte_free(t[0]);\n \n@@ -193,10 +196,10 @@\n \tsize_t sz;\n \tunsigned int i, tmp_num_cores;\n \n-\t__atomic_store_n(&updates, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);\n \n-\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);\n \n \tprintf(\"\\nPerf Test: %d Readers\\n\", num_cores);\n \n@@ -220,11 +223,11 @@\n \trte_eal_mp_wait_lcore();\n \n \tprintf(\"Total quiescent state updates = %\"PRIi64\"\\n\",\n-\t\t__atomic_load_n(&updates, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&updates, rte_memory_order_relaxed));\n \tprintf(\"Cycles per %d quiescent state updates: %\"PRIi64\"\\n\",\n \t\tRCU_SCALE_DOWN,\n-\t\t__atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /\n-\t\t(__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));\n+\t\trte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /\n+\t\t(rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));\n \n \trte_free(t[0]);\n \n@@ -241,10 +244,10 @@\n \tsize_t sz;\n \tunsigned int i;\n \n-\t__atomic_store_n(&checks, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);\n \n-\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);\n \n \tprintf(\"\\nPerf test: %d Writers ('wait' in qsbr_check == false)\\n\",\n \t\tnum_cores);\n@@ -266,10 +269,11 @@\n \t/* Wait until all readers have exited */\n \trte_eal_mp_wait_lcore();\n \n-\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", __atomic_load_n(&checks, __ATOMIC_RELAXED));\n+\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", rte_atomic_load_explicit(&checks,\n+\t    rte_memory_order_relaxed));\n \tprintf(\"Cycles per %d checks: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n-\t\t__atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /\n-\t\t(__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));\n+\t\trte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /\n+\t\t(rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));\n \n \trte_free(t[0]);\n \n@@ -317,8 +321,8 @@\n \t} while (!writer_done);\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);\n \n \trte_rcu_qsbr_thread_unregister(temp, thread_id);\n \n@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)\n \n \twriter_done = 0;\n \n-\t__atomic_store_n(&updates, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&checks, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);\n \n-\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);\n \n \tprintf(\"\\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\\n\", num_cores);\n \n@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);\n \n \twriter_done = 1;\n \n@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)\n \n \tprintf(\"Following numbers include calls to rte_hash functions\\n\");\n \tprintf(\"Cycles per 1 quiescent state update(online/update/offline): %\"PRIi64\"\\n\",\n-\t\t__atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /\n-\t\t__atomic_load_n(&updates, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /\n+\t\trte_atomic_load_explicit(&updates, rte_memory_order_relaxed));\n \n \tprintf(\"Cycles per 1 check(start, check): %\"PRIi64\"\\n\\n\",\n-\t\t__atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /\n-\t\t__atomic_load_n(&checks, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /\n+\t\trte_atomic_load_explicit(&checks, rte_memory_order_relaxed));\n \n \trte_free(t[0]);\n \n@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)\n \n \tprintf(\"Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\\n\", num_cores);\n \n-\t__atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);\n \n \tif (all_registered == 1)\n \t\ttmp_num_cores = num_cores;\n@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)\n \t}\n \n \tcycles = rte_rdtsc_precise() - begin;\n-\t__atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);\n \n \twriter_done = 1;\n \t/* Wait and check return value from reader threads */\n@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)\n \n \tprintf(\"Following numbers include calls to rte_hash functions\\n\");\n \tprintf(\"Cycles per 1 quiescent state update(online/update/offline): %\"PRIi64\"\\n\",\n-\t\t__atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /\n-\t\t__atomic_load_n(&updates, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /\n+\t\trte_atomic_load_explicit(&updates, rte_memory_order_relaxed));\n \n \tprintf(\"Cycles per 1 check(start, check): %\"PRIi64\"\\n\\n\",\n-\t\t__atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /\n-\t\t__atomic_load_n(&checks, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /\n+\t\trte_atomic_load_explicit(&checks, rte_memory_order_relaxed));\n \n \trte_free(t[0]);\n \n@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)\n \t\treturn TEST_SKIPPED;\n \t}\n \n-\t__atomic_store_n(&updates, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&checks, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);\n \n \tnum_cores = 0;\n \tRTE_LCORE_FOREACH_WORKER(core_id) {\ndiff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c\nindex d7c5a4c..6d7a0a8 100644\n--- a/app/test/test_ring_perf.c\n+++ b/app/test/test_ring_perf.c\n@@ -186,7 +186,7 @@ struct thread_params {\n \tvoid *burst = NULL;\n \n #ifdef RTE_USE_C11_MEM_MODEL\n-\tif (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)\n+\tif (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)\n #else\n \tif (__sync_add_and_fetch(&lcore_count, 1) != 2)\n #endif\n@@ -320,7 +320,7 @@ struct thread_params {\n \treturn 0;\n }\n \n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n static uint64_t queue_count[RTE_MAX_LCORE];\n \n #define TIME_MS 100\n@@ -342,7 +342,8 @@ struct thread_params {\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (time_diff < hz * TIME_MS / 1000) {\n@@ -397,12 +398,12 @@ struct thread_params {\n \t\tparam.r = r;\n \n \t\t/* clear synchro and start workers */\n-\t\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \t\tif (rte_eal_mp_remote_launch(lcore_f, &param, SKIP_MAIN) < 0)\n \t\t\treturn -1;\n \n \t\t/* start synchro and launch test on main */\n-\t\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \t\tlcore_f(&param);\n \n \t\trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h\nindex 2dec897..e6b23c0 100644\n--- a/app/test/test_ring_stress_impl.h\n+++ b/app/test/test_ring_stress_impl.h\n@@ -24,7 +24,7 @@ enum {\n \tWRK_CMD_RUN,\n };\n \n-static uint32_t wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;\n+static RTE_ATOMIC(uint32_t) wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;\n \n /* test run-time in seconds */\n static const uint32_t run_time = 60;\n@@ -203,7 +203,7 @@ struct ring_elem {\n \t * really releasing any data through 'wrk_cmd' to\n \t * the worker.\n \t */\n-\twhile (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)\n+\twhile (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)\n \t\trte_pause();\n \n \tcl = rte_rdtsc_precise();\n@@ -246,7 +246,7 @@ struct ring_elem {\n \n \t\tlcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);\n \n-\t} while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);\n+\t} while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);\n \n \tcl = rte_rdtsc_precise() - cl;\n \tif (prcs == 0)\n@@ -360,12 +360,12 @@ struct ring_elem {\n \t}\n \n \t/* signal worker to start test */\n-\t__atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);\n \n \trte_delay_us(run_time * US_PER_S);\n \n \t/* signal worker to start test */\n-\t__atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);\n \n \t/* wait for workers and collect stats. */\n \tmc = rte_lcore_id();\ndiff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c\nindex 5079895..f67fc35 100644\n--- a/app/test/test_rwlock.c\n+++ b/app/test/test_rwlock.c\n@@ -35,7 +35,7 @@\n \n static rte_rwlock_t sl;\n static rte_rwlock_t sl_tab[RTE_MAX_LCORE];\n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n enum {\n \tLC_TYPE_RDLOCK,\n@@ -101,7 +101,8 @@ struct try_rwlock_lcore {\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcount < MAX_LOOP) {\n@@ -134,12 +135,12 @@ struct try_rwlock_lcore {\n \tprintf(\"\\nRwlock Perf Test on %u cores...\\n\", rte_lcore_count());\n \n \t/* clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \tif (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)\n \t\treturn -1;\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(NULL);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c\nindex 873bd60..7455bac 100644\n--- a/app/test/test_seqlock.c\n+++ b/app/test/test_seqlock.c\n@@ -22,7 +22,7 @@ struct data {\n \n struct reader {\n \tstruct data *data;\n-\tuint8_t stop;\n+\tRTE_ATOMIC(uint8_t) stop;\n };\n \n #define WRITER_RUNTIME 2.0 /* s */\n@@ -79,7 +79,7 @@ struct reader {\n \tstruct reader *r = arg;\n \tint rc = TEST_SUCCESS;\n \n-\twhile (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&\n+\twhile (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&\n \t\t\trc == TEST_SUCCESS) {\n \t\tstruct data *data = r->data;\n \t\tbool interrupted;\n@@ -115,7 +115,7 @@ struct reader {\n static void\n reader_stop(struct reader *reader)\n {\n-\t__atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);\n }\n \n #define NUM_WRITERS 2 /* main lcore + one worker */\ndiff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c\nindex c12d52d..010ab82 100644\n--- a/app/test/test_service_cores.c\n+++ b/app/test/test_service_cores.c\n@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)\n \t * test, because two threads are concurrently in a non-MT safe callback.\n \t */\n \tuint32_t *test_params = args;\n-\tuint32_t *lock = &test_params[0];\n+\tRTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];\n \tuint32_t *pass_test = &test_params[1];\n \tuint32_t exp = 0;\n-\tint lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\tint lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed);\n \tif (lock_taken) {\n \t\t/* delay with the lock held */\n \t\trte_delay_ms(250);\n-\t\t__atomic_store_n(lock, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);\n \t} else {\n \t\t/* 2nd thread will fail to take lock, so clear pass flag */\n \t\t*pass_test = 0;\n@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)\n \t *    that 2 threads are running the callback at the same time: MT safe\n \t */\n \tuint32_t *test_params = args;\n-\tuint32_t *lock = &test_params[0];\n+\tRTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];\n \tuint32_t *pass_test = &test_params[1];\n \tuint32_t exp = 0;\n-\tint lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\tint lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed);\n \tif (lock_taken) {\n \t\t/* delay with the lock held */\n \t\trte_delay_ms(250);\n-\t\t__atomic_store_n(lock, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);\n \t} else {\n \t\t/* 2nd thread will fail to take lock, so set pass flag */\n \t\t*pass_test = 1;\n@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)\n \n \t/* retrieve done flag and lock to add/sub */\n \tuint32_t *done = &params[0];\n-\tuint32_t *lock = &params[1];\n+\tRTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&params[1];\n \n \twhile (!*done) {\n-\t\t__atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);\n \t\trte_delay_us(500);\n-\t\tif (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)\n+\t\tif (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)\n \t\t\t/* pass: second core has simultaneously incremented */\n \t\t\t*done = 1;\n-\t\t__atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);\n \t}\n \n \treturn 0;\ndiff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c\nindex 9a481f2..a29405a 100644\n--- a/app/test/test_spinlock.c\n+++ b/app/test/test_spinlock.c\n@@ -48,7 +48,7 @@\n static rte_spinlock_recursive_t slr;\n static unsigned count = 0;\n \n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n static int\n test_spinlock_per_core(__rte_unused void *arg)\n@@ -110,7 +110,8 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (lcount < MAX_LOOP) {\n@@ -149,11 +150,11 @@\n \tprintf(\"\\nTest with lock on %u cores...\\n\", rte_lcore_count());\n \n \t/* Clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c\nindex c5e1caa..3f17a26 100644\n--- a/app/test/test_stack_perf.c\n+++ b/app/test/test_stack_perf.c\n@@ -23,7 +23,7 @@\n  */\n static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};\n \n-static uint32_t lcore_barrier;\n+static RTE_ATOMIC(uint32_t) lcore_barrier;\n \n struct lcore_pair {\n \tunsigned int c1;\n@@ -143,8 +143,8 @@ struct thread_args {\n \ts = args->s;\n \tsize = args->sz;\n \n-\t__atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);\n+\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);\n \n \tuint64_t start = rte_rdtsc();\n \n@@ -173,7 +173,7 @@ struct thread_args {\n \tunsigned int i;\n \n \tfor (i = 0; i < RTE_DIM(bulk_sizes); i++) {\n-\t\t__atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);\n \n \t\targs[0].sz = args[1].sz = bulk_sizes[i];\n \t\targs[0].s = args[1].s = s;\n@@ -206,7 +206,7 @@ struct thread_args {\n \t\tint cnt = 0;\n \t\tdouble avg;\n \n-\t\t__atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);\n \n \t\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n \t\t\tif (++cnt >= n)\n@@ -300,7 +300,7 @@ struct thread_args {\n \tstruct lcore_pair cores;\n \tstruct rte_stack *s;\n \n-\t__atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);\n \n \ts = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);\n \tif (s == NULL) {\ndiff --git a/app/test/test_threads.c b/app/test/test_threads.c\nindex 4ac3f26..6d6881a 100644\n--- a/app/test/test_threads.c\n+++ b/app/test/test_threads.c\n@@ -6,12 +6,13 @@\n \n #include <rte_thread.h>\n #include <rte_debug.h>\n+#include <rte_stdatomic.h>\n \n #include \"test.h\"\n \n RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);\n \n-static uint32_t thread_id_ready;\n+static RTE_ATOMIC(uint32_t) thread_id_ready;\n \n static uint32_t\n thread_main(void *arg)\n@@ -19,9 +20,9 @@\n \tif (arg != NULL)\n \t\t*(rte_thread_t *)arg = rte_thread_self();\n \n-\t__atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)\n \t\t;\n \n \treturn 0;\n@@ -37,13 +38,13 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,\n \t\t\"Failed to create thread.\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,\n \t\t\"Unexpected thread id.\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \tRTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,\n \t\t\"Failed to join thread.\");\n@@ -61,13 +62,13 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,\n \t\t&thread_main_id) == 0, \"Failed to create thread.\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,\n \t\t\"Unexpected thread id.\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \tRTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,\n \t\t\"Failed to detach thread.\");\n@@ -85,7 +86,7 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,\n \t\t\"Failed to create thread\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tpriority = RTE_THREAD_PRIORITY_NORMAL;\n@@ -121,7 +122,7 @@\n \tRTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,\n \t\t\"Priority set mismatches priority get\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \treturn 0;\n }\n@@ -137,7 +138,7 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,\n \t\t\"Failed to create thread\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,\n@@ -190,7 +191,7 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,\n \t\t\"Failed to create attributes affinity thread.\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,\n@@ -198,7 +199,7 @@\n \tRTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,\n \t\t\"Failed to apply affinity attributes\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \treturn 0;\n }\n@@ -219,7 +220,7 @@\n \tRTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,\n \t\t\"Failed to create attributes priority thread.\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,\n@@ -227,7 +228,7 @@\n \tRTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,\n \t\t\"Failed to apply priority attributes\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \treturn 0;\n }\n@@ -243,13 +244,13 @@\n \t\tthread_main, &thread_main_id) == 0,\n \t\t\"Failed to create thread.\");\n \n-\twhile (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)\n+\twhile (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)\n \t\t;\n \n \tRTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,\n \t\t\"Unexpected thread id.\");\n \n-\t__atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);\n \n \tRTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,\n \t\t\"Failed to join thread.\");\ndiff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c\nindex 1fbbedb..9b6b584 100644\n--- a/app/test/test_ticketlock.c\n+++ b/app/test/test_ticketlock.c\n@@ -48,7 +48,7 @@\n static rte_ticketlock_recursive_t tlr;\n static unsigned int count;\n \n-static uint32_t synchro;\n+static RTE_ATOMIC(uint32_t) synchro;\n \n static int\n test_ticketlock_per_core(__rte_unused void *arg)\n@@ -111,7 +111,8 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,\n+\t\t    rte_memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcore_count[lcore] < MAX_LOOP) {\n@@ -153,11 +154,11 @@\n \tprintf(\"\\nTest with lock on %u cores...\\n\", rte_lcore_count());\n \n \t/* Clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_timer.c b/app/test/test_timer.c\nindex cac8fc0..dc15a80 100644\n--- a/app/test/test_timer.c\n+++ b/app/test/test_timer.c\n@@ -202,7 +202,7 @@ struct mytimerinfo {\n \n /* Need to synchronize worker lcores through multiple steps. */\n enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };\n-static uint16_t lcore_state[RTE_MAX_LCORE];\n+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];\n \n static void\n main_init_workers(void)\n@@ -210,7 +210,8 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\t__atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,\n+\t\t    rte_memory_order_relaxed);\n \t}\n }\n \n@@ -220,10 +221,12 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\t__atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,\n+\t\t    rte_memory_order_release);\n \t}\n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);\n+\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,\n+\t\t    rte_memory_order_acquire);\n \t}\n }\n \n@@ -233,7 +236,8 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);\n+\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,\n+\t\t    rte_memory_order_acquire);\n \t}\n }\n \n@@ -242,8 +246,10 @@ struct mytimerinfo {\n {\n \tunsigned lcore_id = rte_lcore_id();\n \n-\trte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);\n-\t__atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,\n+\t    rte_memory_order_acquire);\n+\trte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,\n+\t    rte_memory_order_release);\n }\n \n static void\n@@ -251,7 +257,8 @@ struct mytimerinfo {\n {\n \tunsigned lcore_id = rte_lcore_id();\n \n-\t__atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,\n+\t    rte_memory_order_release);\n }\n \n \n@@ -277,12 +284,12 @@ struct mytimerinfo {\n \tunsigned int lcore_id = rte_lcore_id();\n \tunsigned int main_lcore = rte_get_main_lcore();\n \tint32_t my_collisions = 0;\n-\tstatic uint32_t collisions;\n+\tstatic RTE_ATOMIC(uint32_t) collisions;\n \n \tif (lcore_id == main_lcore) {\n \t\tcb_count = 0;\n \t\ttest_failed = 0;\n-\t\t__atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);\n \t\ttimers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);\n \t\tif (timers == NULL) {\n \t\t\tprintf(\"Test Failed\\n\");\n@@ -310,7 +317,7 @@ struct mytimerinfo {\n \t\t\tmy_collisions++;\n \t}\n \tif (my_collisions != 0)\n-\t\t__atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);\n \n \t/* wait long enough for timers to expire */\n \trte_delay_ms(100);\n@@ -324,7 +331,7 @@ struct mytimerinfo {\n \n \t/* now check that we get the right number of callbacks */\n \tif (lcore_id == main_lcore) {\n-\t\tmy_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);\n+\t\tmy_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);\n \t\tif (my_collisions != 0)\n \t\t\tprintf(\"- %d timer reset collisions (OK)\\n\", my_collisions);\n \t\trte_timer_manage();\n",
    "prefixes": [
        "v3",
        "41/45"
    ]
}