get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138921/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138921,
    "url": "http://patches.dpdk.org/api/patches/138921/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:58",
    "name": "[v3,45/45] app/test-bbdev: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "80e9fb9014072718a3ba647f6e6518ffcf31a8cf",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138921/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/138921/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E541543D55;\n\tWed, 27 Mar 2024 23:42:29 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0142742E59;\n\tWed, 27 Mar 2024 23:39:00 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 673EF42D72\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:11 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id 0CCDC20E6F46; Wed, 27 Mar 2024 15:38:01 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com 0CCDC20E6F46",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579083;\n bh=LjpyQ6k0AdpyCbOceNfmAPXA+35KxWtIn8Q+b/Lwn/M=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=ewrx5vq5nJLv2aPG7pCRScqCWt9FJhEhpFSRHJANVYx8bZpVULjUrya7dGsALJamO\n 8dgJClZ4LmyuKhPCQaFyqDU0ghbKyz8qqiUUvIW7P/LBOvCQ1UczGc32T0krAASbKg\n nUod9doKR94p/sRmiIPK2dca7xC3RiofjMCgWK6U=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 45/45] app/test-bbdev: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:58 -0700",
        "Message-Id": "<1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------\n 1 file changed, 110 insertions(+), 73 deletions(-)",
    "diff": "diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c\nindex dcce00a..9694ed3 100644\n--- a/app/test-bbdev/test_bbdev_perf.c\n+++ b/app/test-bbdev/test_bbdev_perf.c\n@@ -144,7 +144,7 @@ struct test_op_params {\n \tuint16_t num_to_process;\n \tuint16_t num_lcores;\n \tint vector_mask;\n-\tuint16_t sync;\n+\tRTE_ATOMIC(uint16_t) sync;\n \tstruct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];\n };\n \n@@ -159,9 +159,9 @@ struct thread_params {\n \tuint8_t iter_count;\n \tdouble iter_average;\n \tdouble bler;\n-\tuint16_t nb_dequeued;\n-\tint16_t processing_status;\n-\tuint16_t burst_sz;\n+\tRTE_ATOMIC(uint16_t) nb_dequeued;\n+\tRTE_ATOMIC(int16_t) processing_status;\n+\tRTE_ATOMIC(uint16_t) burst_sz;\n \tstruct test_op_params *op_params;\n \tstruct rte_bbdev_dec_op *dec_ops[MAX_BURST];\n \tstruct rte_bbdev_enc_op *enc_ops[MAX_BURST];\n@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,\n \t}\n \n \tif (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {\n-\t\t__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,\n+\t\t    rte_memory_order_relaxed);\n \t\tprintf(\n \t\t\t\"Dequeue interrupt handler called for incorrect event!\\n\");\n \t\treturn;\n \t}\n \n-\tburst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);\n+\tburst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);\n \tnum_ops = tp->op_params->num_to_process;\n \n \tif (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)\n \t\tdeq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,\n \t\t\t\t&tp->dec_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)\n \t\tdeq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,\n \t\t\t\t&tp->dec_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)\n \t\tdeq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,\n \t\t\t\t&tp->enc_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_FFT)\n \t\tdeq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,\n \t\t\t\t&tp->fft_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)\n \t\tdeq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,\n \t\t\t\t&tp->mldts_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse /*RTE_BBDEV_OP_TURBO_ENC*/\n \t\tdeq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,\n \t\t\t\t&tp->enc_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\trte_atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t    rte_memory_order_relaxed)],\n \t\t\t\tburst_sz);\n \n \tif (deq < burst_sz) {\n \t\tprintf(\n \t\t\t\"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\\n\",\n \t\t\tburst_sz, deq);\n-\t\t__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,\n+\t\t    rte_memory_order_relaxed);\n \t\treturn;\n \t}\n \n-\tif (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {\n-\t\t__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);\n+\tif (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {\n+\t\trte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);\n \t\treturn;\n \t}\n \n@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tif (ret) {\n \t\tprintf(\"Buffers validation failed\\n\");\n-\t\t__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,\n+\t\t    rte_memory_order_relaxed);\n \t}\n \n \tswitch (test_vector.op_type) {\n@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\tbreak;\n \tdefault:\n \t\tprintf(\"Unknown op type: %d\\n\", test_vector.op_type);\n-\t\t__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,\n+\t\t    rte_memory_order_relaxed);\n \t\treturn;\n \t}\n \n@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,\n \ttp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /\n \t\t\t((double)total_time / (double)rte_get_tsc_hz());\n \n-\t__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);\n }\n \n static int\n@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\t\tnum_to_process);\n@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\t\tnum_to_process);\n@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_to_process);\n@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t * the number of operations is not a multiple of\n \t\t\t * burst size.\n \t\t\t */\n-\t\t\t__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->burst_sz, num_to_enq,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,\n \t\t\tnum_ops);\n@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,\n \t\t\tnum_ops);\n@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,\n+\t    rte_memory_order_relaxed);\n \n \tret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,\n \telse\n \t\treturn TEST_SKIPPED;\n \n-\t__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);\n \n \t/* Main core is set at first entry */\n \tt_params[0].dev_id = ad->dev_id;\n@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t\t&t_params[used_cores++], lcore_id);\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);\n \tret = bler_function(&t_params[0]);\n \n \t/* Main core is always used */\n@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\tthroughput_function = throughput_pmd_lcore_enc;\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);\n \n \t/* Main core is set at first entry */\n \tt_params[0].dev_id = ad->dev_id;\n@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t\t&t_params[used_cores++], lcore_id);\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);\n \tret = throughput_function(&t_params[0]);\n \n \t/* Main core is always used */\n@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,\n \t * Wait for main lcore operations.\n \t */\n \ttp = &t_params[0];\n-\twhile ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <\n+\twhile ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <\n \t\top_params->num_to_process) &&\n-\t\t(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=\n+\t\t(rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=\n \t\tTEST_FAILED))\n \t\trte_pause();\n \n \ttp->ops_per_sec /= TEST_REPETITIONS;\n \ttp->mbps /= TEST_REPETITIONS;\n-\tret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);\n+\tret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);\n \n \t/* Wait for worker lcores operations */\n \tfor (used_cores = 1; used_cores < num_lcores; used_cores++) {\n \t\ttp = &t_params[used_cores];\n \n-\t\twhile ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <\n+\t\twhile ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <\n \t\t\top_params->num_to_process) &&\n-\t\t\t(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=\n-\t\t\tTEST_FAILED))\n+\t\t\t(rte_atomic_load_explicit(&tp->processing_status,\n+\t\t\t    rte_memory_order_relaxed) != TEST_FAILED))\n \t\t\trte_pause();\n \n \t\ttp->ops_per_sec /= TEST_REPETITIONS;\n \t\ttp->mbps /= TEST_REPETITIONS;\n-\t\tret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);\n+\t\tret |= (int)rte_atomic_load_explicit(&tp->processing_status,\n+\t\t    rte_memory_order_relaxed);\n \t}\n \n \t/* Print throughput if test passed */\n",
    "prefixes": [
        "v3",
        "45/45"
    ]
}