get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97199/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97199,
    "url": "https://patches.dpdk.org/api/patches/97199/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210823054952.15001-5-joyce.kong@arm.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210823054952.15001-5-joyce.kong@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210823054952.15001-5-joyce.kong@arm.com",
    "date": "2021-08-23T05:49:48",
    "name": "[v2,4/8] examples/performance-thread: use compiler atomics for sync",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d37d9fc05ae0504a1cf33138a9d76a906947ac9f",
    "submitter": {
        "id": 970,
        "url": "https://patches.dpdk.org/api/people/970/?format=api",
        "name": "Joyce Kong",
        "email": "joyce.kong@arm.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210823054952.15001-5-joyce.kong@arm.com/mbox/",
    "series": [
        {
            "id": 18395,
            "url": "https://patches.dpdk.org/api/series/18395/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=18395",
            "date": "2021-08-23T05:49:44",
            "name": "use compiler atomic builtins for examples",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/18395/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/97199/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/97199/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6015EA0C56;\n\tMon, 23 Aug 2021 07:50:40 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2C36141159;\n\tMon, 23 Aug 2021 07:50:27 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by mails.dpdk.org (Postfix) with ESMTP id 4E8A141158\n for <dev@dpdk.org>; Mon, 23 Aug 2021 07:50:26 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id CCF531FB;\n Sun, 22 Aug 2021 22:50:25 -0700 (PDT)",
            "from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com\n [10.169.208.222])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 7A6083F5A1;\n Sun, 22 Aug 2021 22:50:23 -0700 (PDT)"
        ],
        "From": "Joyce Kong <joyce.kong@arm.com>",
        "To": "John McNamara <john.mcnamara@intel.com>",
        "Cc": "dev@dpdk.org, thomas@monjalon.net, david.marchand@redhat.com,\n honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com, nd@arm.com",
        "Date": "Mon, 23 Aug 2021 00:49:48 -0500",
        "Message-Id": "<20210823054952.15001-5-joyce.kong@arm.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210823054952.15001-1-joyce.kong@arm.com>",
        "References": "<20210823054952.15001-1-joyce.kong@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v2 4/8] examples/performance-thread: use compiler\n atomics for sync",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Convert rte_atomic usages to compiler atomic built-ins\nfor thread sync.\n\nSigned-off-by: Joyce Kong <joyce.kong@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\n---\n examples/performance-thread/common/lthread.c  | 10 +++---\n .../performance-thread/common/lthread_diag.h  | 10 +++---\n .../performance-thread/common/lthread_int.h   |  1 -\n .../performance-thread/common/lthread_mutex.c | 26 +++++++-------\n .../performance-thread/common/lthread_mutex.h |  2 +-\n .../performance-thread/common/lthread_sched.c | 34 ++++++++-----------\n .../performance-thread/common/lthread_tls.c   |  5 +--\n .../performance-thread/l3fwd-thread/main.c    | 22 +++++-------\n 8 files changed, 53 insertions(+), 57 deletions(-)",
    "diff": "diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c\nindex 3f1f48db43..98123f34f8 100644\n--- a/examples/performance-thread/common/lthread.c\n+++ b/examples/performance-thread/common/lthread.c\n@@ -357,9 +357,10 @@ void lthread_exit(void *ptr)\n \t *  - if exit before join then we suspend and resume on join\n \t *  - if join before exit then we resume the joining thread\n \t */\n+\tuint64_t join_initial = LT_JOIN_INITIAL;\n \tif ((lt->join == LT_JOIN_INITIAL)\n-\t    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,\n-\t\t\t\t   LT_JOIN_EXITING)) {\n+\t    && __atomic_compare_exchange_n(&lt->join, &join_initial,\n+\t\tLT_JOIN_EXITING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n \n \t\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 1, 0);\n \t\t_suspend();\n@@ -415,9 +416,10 @@ int lthread_join(struct lthread *lt, void **ptr)\n \t *  - if join before exit we suspend and will resume when exit is called\n \t *  - if exit before join we resume the exiting thread\n \t */\n+\tuint64_t join_initial = LT_JOIN_INITIAL;\n \tif ((lt->join == LT_JOIN_INITIAL)\n-\t    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,\n-\t\t\t\t   LT_JOIN_THREAD_SET)) {\n+\t    && __atomic_compare_exchange_n(&lt->join, &join_initial,\n+\t\tLT_JOIN_THREAD_SET, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n \n \t\tDIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 1);\n \t\t_suspend();\ndiff --git a/examples/performance-thread/common/lthread_diag.h b/examples/performance-thread/common/lthread_diag.h\nindex e876dda6da..7ee89eef38 100644\n--- a/examples/performance-thread/common/lthread_diag.h\n+++ b/examples/performance-thread/common/lthread_diag.h\n@@ -78,11 +78,11 @@ extern uint64_t diag_mask;\n \t}\t\t\t\t\t\t\t\t\\\n } while (0)\n \n-#define DIAG_COUNT_DEFINE(x) rte_atomic64_t count_##x\n-#define DIAG_COUNT_INIT(o, x) rte_atomic64_init(&((o)->count_##x))\n-#define DIAG_COUNT_INC(o, x) rte_atomic64_inc(&((o)->count_##x))\n-#define DIAG_COUNT_DEC(o, x) rte_atomic64_dec(&((o)->count_##x))\n-#define DIAG_COUNT(o, x) rte_atomic64_read(&((o)->count_##x))\n+#define DIAG_COUNT_DEFINE(x) uint64_t count_##x\n+#define DIAG_COUNT_INIT(o, x) __atomic_store_n(&((o)->count_##x), 0, __ATOMIC_RELAXED)\n+#define DIAG_COUNT_INC(o, x) __atomic_fetch_add(&((o)->count_##x), 1, __ATOMIC_RELAXED)\n+#define DIAG_COUNT_DEC(o, x) __atomic_fetch_sub(&((o)->count_##x), 1, __ATOMIC_RELAXED)\n+#define DIAG_COUNT(o, x) __atomic_load_n(&((o)->count_##x), __ATOMIC_RELAXED)\n \n #define DIAG_USED\n \ndiff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h\nindex a352f13b75..d010126f16 100644\n--- a/examples/performance-thread/common/lthread_int.h\n+++ b/examples/performance-thread/common/lthread_int.h\n@@ -21,7 +21,6 @@ extern \"C\" {\n #include <rte_cycles.h>\n #include <rte_per_lcore.h>\n #include <rte_timer.h>\n-#include <rte_atomic_64.h>\n #include <rte_spinlock.h>\n #include <ctx.h>\n \ndiff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c\nindex 01da6cad4f..061fc5c19a 100644\n--- a/examples/performance-thread/common/lthread_mutex.c\n+++ b/examples/performance-thread/common/lthread_mutex.c\n@@ -60,7 +60,7 @@ lthread_mutex_init(char *name, struct lthread_mutex **mutex,\n \tm->root_sched = THIS_SCHED;\n \tm->owner = NULL;\n \n-\trte_atomic64_init(&m->count);\n+\t__atomic_store_n(&m->count, 0, __ATOMIC_RELAXED);\n \n \tDIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);\n \t/* success */\n@@ -115,10 +115,11 @@ int lthread_mutex_lock(struct lthread_mutex *m)\n \t}\n \n \tfor (;;) {\n-\t\trte_atomic64_inc(&m->count);\n+\t\t__atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);\n \t\tdo {\n-\t\t\tif (rte_atomic64_cmpset\n-\t\t\t    ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {\n+\t\t\tuint64_t lt_init = 0;\n+\t\t\tif (__atomic_compare_exchange_n((uint64_t *) &m->owner, &lt_init,\n+\t\t\t\t(uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n \t\t\t\t/* happy days, we got the lock */\n \t\t\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);\n \t\t\t\treturn 0;\n@@ -126,7 +127,7 @@ int lthread_mutex_lock(struct lthread_mutex *m)\n \t\t\t/* spin due to race with unlock when\n \t\t\t* nothing was blocked\n \t\t\t*/\n-\t\t} while ((rte_atomic64_read(&m->count) == 1) &&\n+\t\t} while ((__atomic_load_n(&m->count, __ATOMIC_RELAXED) == 1) &&\n \t\t\t\t(m->owner == NULL));\n \n \t\t/* queue the current thread in the blocked queue\n@@ -160,16 +161,17 @@ int lthread_mutex_trylock(struct lthread_mutex *m)\n \t\treturn POSIX_ERRNO(EDEADLK);\n \t}\n \n-\trte_atomic64_inc(&m->count);\n-\tif (rte_atomic64_cmpset\n-\t    ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {\n+\t__atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);\n+\tuint64_t lt_init = 0;\n+\tif (__atomic_compare_exchange_n((uint64_t *) &m->owner, &lt_init,\n+\t\t(uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n \t\t/* got the lock */\n \t\tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);\n \t\treturn 0;\n \t}\n \n \t/* failed so return busy */\n-\trte_atomic64_dec(&m->count);\n+\t__atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);\n \tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));\n \treturn POSIX_ERRNO(EBUSY);\n }\n@@ -193,13 +195,13 @@ int lthread_mutex_unlock(struct lthread_mutex *m)\n \t\treturn POSIX_ERRNO(EPERM);\n \t}\n \n-\trte_atomic64_dec(&m->count);\n+\t__atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);\n \t/* if there are blocked threads then make one ready */\n-\twhile (rte_atomic64_read(&m->count) > 0) {\n+\twhile (__atomic_load_n(&m->count, __ATOMIC_RELAXED) > 0) {\n \t\tunblocked = _lthread_queue_remove(m->blocked);\n \n \t\tif (unblocked != NULL) {\n-\t\t\trte_atomic64_dec(&m->count);\n+\t\t\t__atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);\n \t\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);\n \t\t\tRTE_ASSERT(unblocked->sched != NULL);\n \t\t\t_ready_queue_insert((struct lthread_sched *)\ndiff --git a/examples/performance-thread/common/lthread_mutex.h b/examples/performance-thread/common/lthread_mutex.h\nindex cd866f87b8..730092bdf8 100644\n--- a/examples/performance-thread/common/lthread_mutex.h\n+++ b/examples/performance-thread/common/lthread_mutex.h\n@@ -17,7 +17,7 @@ extern \"C\" {\n \n struct lthread_mutex {\n \tstruct lthread *owner;\n-\trte_atomic64_t\tcount;\n+\tuint64_t count;\n \tstruct lthread_queue *blocked __rte_cache_aligned;\n \tstruct lthread_sched *root_sched;\n \tchar\t\t\tname[MAX_MUTEX_NAME_SIZE];\ndiff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c\nindex 38ca0c45cb..3784b010c2 100644\n--- a/examples/performance-thread/common/lthread_sched.c\n+++ b/examples/performance-thread/common/lthread_sched.c\n@@ -22,8 +22,6 @@\n \n #include <rte_prefetch.h>\n #include <rte_per_lcore.h>\n-#include <rte_atomic.h>\n-#include <rte_atomic_64.h>\n #include <rte_log.h>\n #include <rte_common.h>\n #include <rte_branch_prediction.h>\n@@ -47,8 +45,8 @@\n  * When a scheduler shuts down it is assumed that the application is terminating\n  */\n \n-static rte_atomic16_t num_schedulers;\n-static rte_atomic16_t active_schedulers;\n+static uint16_t num_schedulers;\n+static uint16_t active_schedulers;\n \n /* one scheduler per lcore */\n RTE_DEFINE_PER_LCORE(struct lthread_sched *, this_sched) = NULL;\n@@ -64,10 +62,8 @@ uint64_t diag_mask;\n RTE_INIT(lthread_sched_ctor)\n {\n \tmemset(schedcore, 0, sizeof(schedcore));\n-\trte_atomic16_init(&num_schedulers);\n-\trte_atomic16_set(&num_schedulers, 1);\n-\trte_atomic16_init(&active_schedulers);\n-\trte_atomic16_set(&active_schedulers, 0);\n+\t__atomic_store_n(&num_schedulers, 1, __ATOMIC_RELAXED);\n+\t__atomic_store_n(&active_schedulers, 0, __ATOMIC_RELAXED);\n \tdiag_cb = NULL;\n }\n \n@@ -260,8 +256,8 @@ struct lthread_sched *_lthread_sched_create(size_t stack_size)\n  */\n int lthread_num_schedulers_set(int num)\n {\n-\trte_atomic16_set(&num_schedulers, num);\n-\treturn (int)rte_atomic16_read(&num_schedulers);\n+\t__atomic_store_n(&num_schedulers, num, __ATOMIC_RELAXED);\n+\treturn (int)__atomic_load_n(&num_schedulers, __ATOMIC_RELAXED);\n }\n \n /*\n@@ -269,7 +265,7 @@ int lthread_num_schedulers_set(int num)\n  */\n int lthread_active_schedulers(void)\n {\n-\treturn (int)rte_atomic16_read(&active_schedulers);\n+\treturn (int)__atomic_load_n(&active_schedulers, __ATOMIC_RELAXED);\n }\n \n \n@@ -299,8 +295,8 @@ void lthread_scheduler_shutdown_all(void)\n \t * for the possibility of a pthread wrapper on lthread_yield(),\n \t * something that is not possible unless the scheduler is running.\n \t */\n-\twhile (rte_atomic16_read(&active_schedulers) <\n-\t       rte_atomic16_read(&num_schedulers))\n+\twhile (__atomic_load_n(&active_schedulers, __ATOMIC_RELAXED) <\n+\t       __atomic_load_n(&num_schedulers, __ATOMIC_RELAXED))\n \t\tsched_yield();\n \n \tfor (i = 0; i < LTHREAD_MAX_LCORES; i++) {\n@@ -415,15 +411,15 @@ static inline int _lthread_sched_isdone(struct lthread_sched *sched)\n  */\n static inline void _lthread_schedulers_sync_start(void)\n {\n-\trte_atomic16_inc(&active_schedulers);\n+\t__atomic_fetch_add(&active_schedulers, 1, __ATOMIC_RELAXED);\n \n \t/* wait for lthread schedulers\n \t * Note we use sched_yield() rather than pthread_yield() to allow\n \t * for the possibility of a pthread wrapper on lthread_yield(),\n \t * something that is not possible unless the scheduler is running.\n \t */\n-\twhile (rte_atomic16_read(&active_schedulers) <\n-\t       rte_atomic16_read(&num_schedulers))\n+\twhile (__atomic_load_n(&active_schedulers, __ATOMIC_RELAXED) <\n+\t       __atomic_load_n(&num_schedulers, __ATOMIC_RELAXED))\n \t\tsched_yield();\n \n }\n@@ -433,15 +429,15 @@ static inline void _lthread_schedulers_sync_start(void)\n  */\n static inline void _lthread_schedulers_sync_stop(void)\n {\n-\trte_atomic16_dec(&active_schedulers);\n-\trte_atomic16_dec(&num_schedulers);\n+\t__atomic_fetch_sub(&active_schedulers, 1, __ATOMIC_RELAXED);\n+\t__atomic_fetch_sub(&num_schedulers, 1, __ATOMIC_RELAXED);\n \n \t/* wait for schedulers\n \t * Note we use sched_yield() rather than pthread_yield() to allow\n \t * for the possibility of a pthread wrapper on lthread_yield(),\n \t * something that is not possible unless the scheduler is running.\n \t */\n-\twhile (rte_atomic16_read(&active_schedulers) > 0)\n+\twhile (__atomic_load_n(&active_schedulers, __ATOMIC_RELAXED) > 0)\n \t\tsched_yield();\n \n }\ndiff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c\nindex 07de6cafab..4ab2e3558b 100644\n--- a/examples/performance-thread/common/lthread_tls.c\n+++ b/examples/performance-thread/common/lthread_tls.c\n@@ -18,7 +18,6 @@\n #include <rte_malloc.h>\n #include <rte_log.h>\n #include <rte_ring.h>\n-#include <rte_atomic_64.h>\n \n #include \"lthread_tls.h\"\n #include \"lthread_queue.h\"\n@@ -52,8 +51,10 @@ void _lthread_key_pool_init(void)\n \n \tbzero(key_table, sizeof(key_table));\n \n+\tuint64_t pool_init = 0;\n \t/* only one lcore should do this */\n-\tif (rte_atomic64_cmpset(&key_pool_init, 0, 1)) {\n+\tif (__atomic_compare_exchange_n(&key_pool_init, &pool_init, 1, 0,\n+\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n \n \t\tsnprintf(name,\n \t\t\tMAX_LTHREAD_NAME_SIZE,\ndiff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c\nindex 2f593abf26..ba9ad034e6 100644\n--- a/examples/performance-thread/l3fwd-thread/main.c\n+++ b/examples/performance-thread/l3fwd-thread/main.c\n@@ -26,7 +26,6 @@\n #include <rte_memcpy.h>\n #include <rte_eal.h>\n #include <rte_launch.h>\n-#include <rte_atomic.h>\n #include <rte_cycles.h>\n #include <rte_prefetch.h>\n #include <rte_lcore.h>\n@@ -570,8 +569,8 @@ RTE_DEFINE_PER_LCORE(struct lcore_conf *, lcore_conf);\n  */\n static int lthreads_on = 1; /**< Use lthreads for processing*/\n \n-rte_atomic16_t rx_counter;  /**< Number of spawned rx threads */\n-rte_atomic16_t tx_counter;  /**< Number of spawned tx threads */\n+uint16_t rx_counter;  /**< Number of spawned rx threads */\n+uint16_t tx_counter;  /**< Number of spawned tx threads */\n \n struct thread_conf {\n \tuint16_t lcore_id;      /**< Initial lcore for rx thread */\n@@ -1910,11 +1909,8 @@ cpu_load_collector(__rte_unused void *arg) {\n \tprintf(\"Waiting for %d rx threads and %d tx threads\\n\", n_rx_thread,\n \t\t\tn_tx_thread);\n \n-\twhile (rte_atomic16_read(&rx_counter) < n_rx_thread)\n-\t\trte_pause();\n-\n-\twhile (rte_atomic16_read(&tx_counter) < n_tx_thread)\n-\t\trte_pause();\n+\trte_wait_until_equal_16(&rx_counter, n_rx_thread, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tx_counter, n_tx_thread, __ATOMIC_RELAXED);\n \n \tfor (i = 0; i < n_rx_thread; i++) {\n \n@@ -2036,7 +2032,7 @@ lthread_tx_per_ring(void *dummy)\n \tRTE_LOG(INFO, L3FWD, \"entering main tx loop on lcore %u\\n\", rte_lcore_id());\n \n \tnb_rx = 0;\n-\trte_atomic16_inc(&tx_counter);\n+\t__atomic_fetch_add(&tx_counter, 1, __ATOMIC_RELAXED);\n \twhile (1) {\n \n \t\t/*\n@@ -2161,7 +2157,7 @@ lthread_rx(void *dummy)\n \tworker_id = 0;\n \n \trx_conf->conf.cpu_id = sched_getcpu();\n-\trte_atomic16_inc(&rx_counter);\n+\t__atomic_fetch_add(&rx_counter, 1, __ATOMIC_RELAXED);\n \twhile (1) {\n \n \t\t/*\n@@ -2243,7 +2239,7 @@ lthread_spawner(__rte_unused void *arg)\n \t * scheduler as this lthread, yielding is required to let them to run and\n \t * prevent deadlock here.\n \t */\n-\twhile (rte_atomic16_read(&rx_counter) < n_rx_thread)\n+\twhile (__atomic_load_n(&rx_counter, __ATOMIC_RELAXED) < n_rx_thread)\n \t\tlthread_sleep(100000);\n \n \t/*\n@@ -2323,7 +2319,7 @@ pthread_tx(void *dummy)\n \tRTE_LOG(INFO, L3FWD, \"Entering main Tx loop on lcore %u\\n\", rte_lcore_id());\n \n \ttx_conf->conf.cpu_id = sched_getcpu();\n-\trte_atomic16_inc(&tx_counter);\n+\t__atomic_fetch_add(&tx_counter, 1, __ATOMIC_RELAXED);\n \twhile (1) {\n \n \t\tcur_tsc = rte_rdtsc();\n@@ -2406,7 +2402,7 @@ pthread_rx(void *dummy)\n \n \tworker_id = 0;\n \trx_conf->conf.cpu_id = sched_getcpu();\n-\trte_atomic16_inc(&rx_counter);\n+\t__atomic_fetch_add(&rx_counter, 1, __ATOMIC_RELAXED);\n \twhile (1) {\n \n \t\t/*\n",
    "prefixes": [
        "v2",
        "4/8"
    ]
}