get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133330/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133330,
    "url": "http://patches.dpdk.org/api/patches/133330/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-10-26T00:31:44",
    "name": "[v3,09/19] rcu: use rte optional stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "795593a18f2686a9fe05f8607a99b964c78d88d6",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29989,
            "url": "http://patches.dpdk.org/api/series/29989/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29989",
            "date": "2023-10-26T00:31:35",
            "name": "use rte optional stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/29989/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133330/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/133330/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 292DA43200;\n\tThu, 26 Oct 2023 02:33:10 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8225A42DF2;\n\tThu, 26 Oct 2023 02:32:14 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 6D2B5402A3\n for <dev@dpdk.org>; Thu, 26 Oct 2023 02:32:01 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id C871A20B74C9; Wed, 25 Oct 2023 17:31:59 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com C871A20B74C9",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1698280319;\n bh=Z4KCGSN1d59MjvvIivtztxl1mfyplh2DbZzeIEXzYBU=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=GfSbMdc9t2F6IgE/V1aE//+hO53H9CEsDwIc9JZSaa8fPFZML7m3GtD7tpOMg23gc\n QZ+WUoCKhtPB78cw+dh8bmkMd+xrqoP8EKpGtZOcxXzukL+2aoN6vYlBC3JY3vrtgl\n DCNMZLOTMmC9AQN8w03qTDfPJKtj36VEiEwKmJAw=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "Akhil Goyal <gakhil@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Ciara Power <ciara.power@intel.com>,\n David Christensen <drc@linux.vnet.ibm.com>,\n David Hunt <david.hunt@intel.com>,\n Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,\n Dmitry Malloy <dmitrym@microsoft.com>,\n Elena Agostini <eagostini@nvidia.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Fan Zhang <fanzhang.oss@gmail.com>, Ferruh Yigit <ferruh.yigit@amd.com>,\n Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jerin Jacob <jerinj@marvell.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>,\n Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,\n Pallavi Kadam <pallavi.kadam@intel.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Reshma Pattan <reshma.pattan@intel.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Shijith Thotton <sthotton@marvell.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Yipeng Wang <yipeng1.wang@intel.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 09/19] rcu: use rte optional stdatomic API",
        "Date": "Wed, 25 Oct 2023 17:31:44 -0700",
        "Message-Id": "<1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1698280314-25861-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>\n <1698280314-25861-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional stdatomic API\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\n---\n lib/rcu/rte_rcu_qsbr.c | 48 +++++++++++++++++------------------\n lib/rcu/rte_rcu_qsbr.h | 68 +++++++++++++++++++++++++-------------------------\n 2 files changed, 58 insertions(+), 58 deletions(-)",
    "diff": "diff --git a/lib/rcu/rte_rcu_qsbr.c b/lib/rcu/rte_rcu_qsbr.c\nindex 17be93e..4dc7714 100644\n--- a/lib/rcu/rte_rcu_qsbr.c\n+++ b/lib/rcu/rte_rcu_qsbr.c\n@@ -102,21 +102,21 @@\n \t * go out of sync. Hence, additional checks are required.\n \t */\n \t/* Check if the thread is already registered */\n-\told_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t__ATOMIC_RELAXED);\n+\told_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n+\t\t\t\t\trte_memory_order_relaxed);\n \tif (old_bmap & 1UL << id)\n \t\treturn 0;\n \n \tdo {\n \t\tnew_bmap = old_bmap | (1UL << id);\n-\t\tsuccess = __atomic_compare_exchange(\n+\t\tsuccess = rte_atomic_compare_exchange_strong_explicit(\n \t\t\t\t\t__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t&old_bmap, &new_bmap, 0,\n-\t\t\t\t\t__ATOMIC_RELEASE, __ATOMIC_RELAXED);\n+\t\t\t\t\t&old_bmap, new_bmap,\n+\t\t\t\t\trte_memory_order_release, rte_memory_order_relaxed);\n \n \t\tif (success)\n-\t\t\t__atomic_fetch_add(&v->num_threads,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&v->num_threads,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \t\telse if (old_bmap & (1UL << id))\n \t\t\t/* Someone else registered this thread.\n \t\t\t * Counter should not be incremented.\n@@ -154,8 +154,8 @@\n \t * go out of sync. Hence, additional checks are required.\n \t */\n \t/* Check if the thread is already unregistered */\n-\told_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t__ATOMIC_RELAXED);\n+\told_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n+\t\t\t\t\trte_memory_order_relaxed);\n \tif (!(old_bmap & (1UL << id)))\n \t\treturn 0;\n \n@@ -165,14 +165,14 @@\n \t\t * completed before removal of the thread from the list of\n \t\t * reporting threads.\n \t\t */\n-\t\tsuccess = __atomic_compare_exchange(\n+\t\tsuccess = rte_atomic_compare_exchange_strong_explicit(\n \t\t\t\t\t__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t&old_bmap, &new_bmap, 0,\n-\t\t\t\t\t__ATOMIC_RELEASE, __ATOMIC_RELAXED);\n+\t\t\t\t\t&old_bmap, new_bmap,\n+\t\t\t\t\trte_memory_order_release, rte_memory_order_relaxed);\n \n \t\tif (success)\n-\t\t\t__atomic_fetch_sub(&v->num_threads,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_sub_explicit(&v->num_threads,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \t\telse if (!(old_bmap & (1UL << id)))\n \t\t\t/* Someone else unregistered this thread.\n \t\t\t * Counter should not be incremented.\n@@ -227,8 +227,8 @@\n \n \tfprintf(f, \"  Registered thread IDs = \");\n \tfor (i = 0; i < v->num_elems; i++) {\n-\t\tbmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tbmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n+\t\t\t\t\trte_memory_order_acquire);\n \t\tid = i << __RTE_QSBR_THRID_INDEX_SHIFT;\n \t\twhile (bmap) {\n \t\t\tt = __builtin_ctzl(bmap);\n@@ -241,26 +241,26 @@\n \tfprintf(f, \"\\n\");\n \n \tfprintf(f, \"  Token = %\" PRIu64 \"\\n\",\n-\t\t\t__atomic_load_n(&v->token, __ATOMIC_ACQUIRE));\n+\t\t\trte_atomic_load_explicit(&v->token, rte_memory_order_acquire));\n \n \tfprintf(f, \"  Least Acknowledged Token = %\" PRIu64 \"\\n\",\n-\t\t\t__atomic_load_n(&v->acked_token, __ATOMIC_ACQUIRE));\n+\t\t\trte_atomic_load_explicit(&v->acked_token, rte_memory_order_acquire));\n \n \tfprintf(f, \"Quiescent State Counts for readers:\\n\");\n \tfor (i = 0; i < v->num_elems; i++) {\n-\t\tbmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tbmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),\n+\t\t\t\t\trte_memory_order_acquire);\n \t\tid = i << __RTE_QSBR_THRID_INDEX_SHIFT;\n \t\twhile (bmap) {\n \t\t\tt = __builtin_ctzl(bmap);\n \t\t\tfprintf(f, \"thread ID = %u, count = %\" PRIu64 \", lock count = %u\\n\",\n \t\t\t\tid + t,\n-\t\t\t\t__atomic_load_n(\n+\t\t\t\trte_atomic_load_explicit(\n \t\t\t\t\t&v->qsbr_cnt[id + t].cnt,\n-\t\t\t\t\t__ATOMIC_RELAXED),\n-\t\t\t\t__atomic_load_n(\n+\t\t\t\t\trte_memory_order_relaxed),\n+\t\t\t\trte_atomic_load_explicit(\n \t\t\t\t\t&v->qsbr_cnt[id + t].lock_cnt,\n-\t\t\t\t\t__ATOMIC_RELAXED));\n+\t\t\t\t\trte_memory_order_relaxed));\n \t\t\tbmap &= ~(1UL << t);\n \t\t}\n \t}\ndiff --git a/lib/rcu/rte_rcu_qsbr.h b/lib/rcu/rte_rcu_qsbr.h\nindex 87e1b55..9f4aed2 100644\n--- a/lib/rcu/rte_rcu_qsbr.h\n+++ b/lib/rcu/rte_rcu_qsbr.h\n@@ -63,11 +63,11 @@\n  * Given thread id needs to be converted to index into the array and\n  * the id within the array element.\n  */\n-#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)\n+#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(RTE_ATOMIC(uint64_t)) * 8)\n #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \\\n \tRTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \\\n \t\t__RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)\n-#define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \\\n+#define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t __rte_atomic *) \\\n \t((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)\n #define __RTE_QSBR_THRID_INDEX_SHIFT 6\n #define __RTE_QSBR_THRID_MASK 0x3f\n@@ -75,13 +75,13 @@\n \n /* Worker thread counter */\n struct rte_rcu_qsbr_cnt {\n-\tuint64_t cnt;\n+\tRTE_ATOMIC(uint64_t) cnt;\n \t/**< Quiescent state counter. Value 0 indicates the thread is offline\n \t *   64b counter is used to avoid adding more code to address\n \t *   counter overflow. Changing this to 32b would require additional\n \t *   changes to various APIs.\n \t */\n-\tuint32_t lock_cnt;\n+\tRTE_ATOMIC(uint32_t) lock_cnt;\n \t/**< Lock counter. Used when RTE_LIBRTE_RCU_DEBUG is enabled */\n } __rte_cache_aligned;\n \n@@ -97,16 +97,16 @@ struct rte_rcu_qsbr_cnt {\n  * 2) Register thread ID array\n  */\n struct rte_rcu_qsbr {\n-\tuint64_t token __rte_cache_aligned;\n+\tRTE_ATOMIC(uint64_t) token __rte_cache_aligned;\n \t/**< Counter to allow for multiple concurrent quiescent state queries */\n-\tuint64_t acked_token;\n+\tRTE_ATOMIC(uint64_t) acked_token;\n \t/**< Least token acked by all the threads in the last call to\n \t *   rte_rcu_qsbr_check API.\n \t */\n \n \tuint32_t num_elems __rte_cache_aligned;\n \t/**< Number of elements in the thread ID array */\n-\tuint32_t num_threads;\n+\tRTE_ATOMIC(uint32_t) num_threads;\n \t/**< Number of threads currently using this QS variable */\n \tuint32_t max_threads;\n \t/**< Maximum number of threads using this QS variable */\n@@ -311,13 +311,13 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * the following will not move down after the load of any shared\n \t * data structure.\n \t */\n-\tt = __atomic_load_n(&v->token, __ATOMIC_RELAXED);\n+\tt = rte_atomic_load_explicit(&v->token, rte_memory_order_relaxed);\n \n-\t/* __atomic_store_n(cnt, __ATOMIC_RELAXED) is used to ensure\n+\t/* rte_atomic_store_explicit(cnt, rte_memory_order_relaxed) is used to ensure\n \t * 'cnt' (64b) is accessed atomically.\n \t */\n-\t__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,\n-\t\tt, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,\n+\t\tt, rte_memory_order_relaxed);\n \n \t/* The subsequent load of the data structure should not\n \t * move above the store. Hence a store-load barrier\n@@ -326,7 +326,7 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * writer might not see that the reader is online, even though\n \t * the reader is referencing the shared data structure.\n \t */\n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n }\n \n /**\n@@ -362,8 +362,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * data structure can not move after this store.\n \t */\n \n-\t__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,\n-\t\t__RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,\n+\t\t__RTE_QSBR_CNT_THR_OFFLINE, rte_memory_order_release);\n }\n \n /**\n@@ -394,8 +394,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \n #if defined(RTE_LIBRTE_RCU_DEBUG)\n \t/* Increment the lock counter */\n-\t__atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,\n-\t\t\t\t1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_add_explicit(&v->qsbr_cnt[thread_id].lock_cnt,\n+\t\t\t\t1, rte_memory_order_acquire);\n #endif\n }\n \n@@ -427,8 +427,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \n #if defined(RTE_LIBRTE_RCU_DEBUG)\n \t/* Decrement the lock counter */\n-\t__atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,\n-\t\t\t\t1, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_sub_explicit(&v->qsbr_cnt[thread_id].lock_cnt,\n+\t\t\t\t1, rte_memory_order_release);\n \n \t__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,\n \t\t\t\t\"Lock counter %u. Nested locks?\\n\",\n@@ -461,7 +461,7 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * structure are visible to the workers before the token\n \t * update is visible.\n \t */\n-\tt = __atomic_fetch_add(&v->token, 1, __ATOMIC_RELEASE) + 1;\n+\tt = rte_atomic_fetch_add_explicit(&v->token, 1, rte_memory_order_release) + 1;\n \n \treturn t;\n }\n@@ -493,16 +493,16 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * Later loads of the shared data structure should not move\n \t * above this load. Hence, use load-acquire.\n \t */\n-\tt = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);\n+\tt = rte_atomic_load_explicit(&v->token, rte_memory_order_acquire);\n \n \t/* Check if there are updates available from the writer.\n \t * Inform the writer that updates are visible to this reader.\n \t * Prior loads of the shared data structure should not move\n \t * beyond this store. Hence use store-release.\n \t */\n-\tif (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))\n-\t\t__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,\n-\t\t\t\t\t t, __ATOMIC_RELEASE);\n+\tif (t != rte_atomic_load_explicit(&v->qsbr_cnt[thread_id].cnt, rte_memory_order_relaxed))\n+\t\trte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,\n+\t\t\t\t\t t, rte_memory_order_release);\n \n \t__RTE_RCU_DP_LOG(DEBUG, \"%s: update: token = %\" PRIu64 \", Thread ID = %d\",\n \t\t__func__, t, thread_id);\n@@ -517,7 +517,7 @@ struct rte_rcu_qsbr_dq_parameters {\n \tuint32_t i, j, id;\n \tuint64_t bmap;\n \tuint64_t c;\n-\tuint64_t *reg_thread_id;\n+\tRTE_ATOMIC(uint64_t) *reg_thread_id;\n \tuint64_t acked_token = __RTE_QSBR_CNT_MAX;\n \n \tfor (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);\n@@ -526,7 +526,7 @@ struct rte_rcu_qsbr_dq_parameters {\n \t\t/* Load the current registered thread bit map before\n \t\t * loading the reader thread quiescent state counters.\n \t\t */\n-\t\tbmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);\n+\t\tbmap = rte_atomic_load_explicit(reg_thread_id, rte_memory_order_acquire);\n \t\tid = i << __RTE_QSBR_THRID_INDEX_SHIFT;\n \n \t\twhile (bmap) {\n@@ -534,9 +534,9 @@ struct rte_rcu_qsbr_dq_parameters {\n \t\t\t__RTE_RCU_DP_LOG(DEBUG,\n \t\t\t\t\"%s: check: token = %\" PRIu64 \", wait = %d, Bit Map = 0x%\" PRIx64 \", Thread ID = %d\",\n \t\t\t\t__func__, t, wait, bmap, id + j);\n-\t\t\tc = __atomic_load_n(\n+\t\t\tc = rte_atomic_load_explicit(\n \t\t\t\t\t&v->qsbr_cnt[id + j].cnt,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\trte_memory_order_acquire);\n \t\t\t__RTE_RCU_DP_LOG(DEBUG,\n \t\t\t\t\"%s: status: token = %\" PRIu64 \", wait = %d, Thread QS cnt = %\" PRIu64 \", Thread ID = %d\",\n \t\t\t\t__func__, t, wait, c, id+j);\n@@ -554,8 +554,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \t\t\t\t/* This thread might have unregistered.\n \t\t\t\t * Re-read the bitmap.\n \t\t\t\t */\n-\t\t\t\tbmap = __atomic_load_n(reg_thread_id,\n-\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\tbmap = rte_atomic_load_explicit(reg_thread_id,\n+\t\t\t\t\t\trte_memory_order_acquire);\n \n \t\t\t\tcontinue;\n \t\t\t}\n@@ -576,8 +576,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * no need to update this very accurately using compare-and-swap.\n \t */\n \tif (acked_token != __RTE_QSBR_CNT_MAX)\n-\t\t__atomic_store_n(&v->acked_token, acked_token,\n-\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&v->acked_token, acked_token,\n+\t\t\trte_memory_order_relaxed);\n \n \treturn 1;\n }\n@@ -598,7 +598,7 @@ struct rte_rcu_qsbr_dq_parameters {\n \t\t\t\"%s: check: token = %\" PRIu64 \", wait = %d, Thread ID = %d\",\n \t\t\t__func__, t, wait, i);\n \t\twhile (1) {\n-\t\t\tc = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);\n+\t\t\tc = rte_atomic_load_explicit(&cnt->cnt, rte_memory_order_acquire);\n \t\t\t__RTE_RCU_DP_LOG(DEBUG,\n \t\t\t\t\"%s: status: token = %\" PRIu64 \", wait = %d, Thread QS cnt = %\" PRIu64 \", Thread ID = %d\",\n \t\t\t\t__func__, t, wait, c, i);\n@@ -628,8 +628,8 @@ struct rte_rcu_qsbr_dq_parameters {\n \t * no need to update this very accurately using compare-and-swap.\n \t */\n \tif (acked_token != __RTE_QSBR_CNT_MAX)\n-\t\t__atomic_store_n(&v->acked_token, acked_token,\n-\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&v->acked_token, acked_token,\n+\t\t\trte_memory_order_relaxed);\n \n \treturn 1;\n }\n",
    "prefixes": [
        "v3",
        "09/19"
    ]
}