get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138878/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138878,
    "url": "http://patches.dpdk.org/api/patches/138878/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-2-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-2-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-2-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:14",
    "name": "[v3,01/45] net/mlx5: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "18e88e4524c8e1167fadaceb572c144f358e3a4b",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-2-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138878/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/138878/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3128243D55;\n\tWed, 27 Mar 2024 23:38:14 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E02AD41151;\n\tWed, 27 Mar 2024 23:38:04 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id B190A402C0\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:00 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id D24AA20E67D1; Wed, 27 Mar 2024 15:37:59 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com D24AA20E67D1",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579079;\n bh=8yqVQA0CCw7BFhPZ1yvZP2KIVbhzK/iTuKcOllRvLgI=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=CfUUy27vhnqskDv0Suxr4gIQLeq36gPPlU97FLgbSCCo/40cDwRE7i8DqeEpdVEe4\n 6V1TFo/UR5CH90zv02/6BckHvx+tYX/XDG8/4ZXYY/a1J0HMSFG21OPWuTOPzsxw+I\n KKRpZYLdNoshBPRyTu0PoMgUGraWUGnuzSun8oso=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 01/45] net/mlx5: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:14 -0700",
        "Message-Id": "<1711579078-10624-2-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n drivers/net/mlx5/linux/mlx5_ethdev_os.c |   6 +-\n drivers/net/mlx5/linux/mlx5_verbs.c     |   9 ++-\n drivers/net/mlx5/mlx5.c                 |   9 ++-\n drivers/net/mlx5/mlx5.h                 |  66 ++++++++---------\n drivers/net/mlx5/mlx5_flow.c            |  37 +++++-----\n drivers/net/mlx5/mlx5_flow.h            |   8 +-\n drivers/net/mlx5/mlx5_flow_aso.c        |  43 ++++++-----\n drivers/net/mlx5/mlx5_flow_dv.c         | 126 ++++++++++++++++----------------\n drivers/net/mlx5/mlx5_flow_flex.c       |  14 ++--\n drivers/net/mlx5/mlx5_flow_hw.c         |  61 +++++++++-------\n drivers/net/mlx5/mlx5_flow_meter.c      |  30 ++++----\n drivers/net/mlx5/mlx5_flow_quota.c      |  32 ++++----\n drivers/net/mlx5/mlx5_hws_cnt.c         |  71 +++++++++---------\n drivers/net/mlx5/mlx5_hws_cnt.h         |  10 +--\n drivers/net/mlx5/mlx5_rx.h              |  14 ++--\n drivers/net/mlx5/mlx5_rxq.c             |  30 ++++----\n drivers/net/mlx5/mlx5_trigger.c         |   2 +-\n drivers/net/mlx5/mlx5_tx.h              |  18 ++---\n drivers/net/mlx5/mlx5_txpp.c            |  84 ++++++++++-----------\n drivers/net/mlx5/mlx5_txq.c             |  12 +--\n drivers/net/mlx5/mlx5_utils.c           |  10 +--\n drivers/net/mlx5/mlx5_utils.h           |   4 +-\n 22 files changed, 351 insertions(+), 345 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c\nindex 40ea9d2..70bba6c 100644\n--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c\n@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)\n \t\treturn -ENOTSUP;\n \t}\n \t/* Check there is no concurrent mapping in other thread. */\n-\tif (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,\n-\t\t\t\t\t base, false,\n-\t\t\t\t\t __ATOMIC_RELAXED, __ATOMIC_RELAXED))\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,\n+\t\t\t\t\t base,\n+\t\t\t\t\t rte_memory_order_relaxed, rte_memory_order_relaxed))\n \t\trte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));\n \treturn 0;\n }\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex b54f3cc..63da8f4 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -1117,7 +1117,7 @@\n \t\treturn 0;\n \t}\n \t/* Only need to check refcnt, 0 after \"sh\" is allocated. */\n-\tif (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {\n+\tif (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {\n \t\tMLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);\n \t\tpriv->lb_used = 1;\n \t\treturn 0;\n@@ -1163,7 +1163,7 @@\n \t\tclaim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));\n \t\tsh->self_lb.ibv_cq = NULL;\n \t}\n-\t__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);\n \treturn -rte_errno;\n #else\n \tRTE_SET_USED(dev);\n@@ -1186,8 +1186,9 @@\n \n \tif (!priv->lb_used)\n \t\treturn;\n-\tMLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));\n-\tif (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {\n+\tMLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));\n+\tif (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,\n+\t    rte_memory_order_relaxed) - 1)) {\n \t\tif (sh->self_lb.qp) {\n \t\t\tclaim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));\n \t\t\tsh->self_lb.qp = NULL;\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex d1a6382..2ff94db 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -855,8 +855,8 @@\n \t\tct_pool = mng->pools[idx];\n \t\tfor (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {\n \t\t\tct = &ct_pool->actions[i];\n-\t\t\tval = __atomic_fetch_sub(&ct->refcnt, 1,\n-\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\tval = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,\n+\t\t\t\t\t\t rte_memory_order_relaxed);\n \t\t\tMLX5_ASSERT(val == 1);\n \t\t\tif (val > 1)\n \t\t\t\tcnt++;\n@@ -1082,7 +1082,8 @@\n \t\tDRV_LOG(ERR, \"Dynamic flex parser is not supported on HWS\");\n \t\treturn -ENOTSUP;\n \t}\n-\tif (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)\n+\tif (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,\n+\t    rte_memory_order_relaxed) + 1 > 1)\n \t\treturn 0;\n \tpriv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,\n \t\t\tsizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);\n@@ -1173,7 +1174,7 @@\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;\n \n-\tif (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)\n+\tif (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)\n \t\treturn;\n \tmlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);\n \tmlx5_free(fp->flex.devx_fp);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0091a24..77c84b8 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -378,7 +378,7 @@ struct mlx5_drop {\n struct mlx5_lb_ctx {\n \tstruct ibv_qp *qp; /* QP object. */\n \tvoid *ibv_cq; /* Completion queue. */\n-\tuint16_t refcnt; /* Reference count for representors. */\n+\tRTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */\n };\n \n /* HW steering queue job descriptor type. */\n@@ -481,10 +481,10 @@ enum mlx5_counter_type {\n \n /* Counter age parameter. */\n struct mlx5_age_param {\n-\tuint16_t state; /**< Age state (atomically accessed). */\n+\tRTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */\n \tuint16_t port_id; /**< Port id of the counter. */\n \tuint32_t timeout:24; /**< Aging timeout in seconds. */\n-\tuint32_t sec_since_last_hit;\n+\tRTE_ATOMIC(uint32_t) sec_since_last_hit;\n \t/**< Time in seconds since last hit (atomically accessed). */\n \tvoid *context; /**< Flow counter age context. */\n };\n@@ -497,7 +497,7 @@ struct flow_counter_stats {\n /* Shared counters information for counters. */\n struct mlx5_flow_counter_shared {\n \tunion {\n-\t\tuint32_t refcnt; /* Only for shared action management. */\n+\t\tRTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */\n \t\tuint32_t id; /* User counter ID for legacy sharing. */\n \t};\n };\n@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {\n \n /* Counter global management structure. */\n struct mlx5_flow_counter_mng {\n-\tvolatile uint16_t n_valid; /* Number of valid pools. */\n+\tvolatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */\n \tuint16_t last_pool_idx; /* Last used pool index */\n \tint min_id; /* The minimum counter ID in the pools. */\n \tint max_id; /* The maximum counter ID in the pools. */\n@@ -654,7 +654,7 @@ struct mlx5_aso_sq {\n struct mlx5_aso_age_action {\n \tLIST_ENTRY(mlx5_aso_age_action) next;\n \tvoid *dr_action;\n-\tuint32_t refcnt;\n+\tRTE_ATOMIC(uint32_t) refcnt;\n \t/* Following fields relevant only when action is active. */\n \tuint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */\n \tstruct mlx5_age_param age_params;\n@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {\n \trte_be16_t option_class; /* geneve tlv opt class.*/\n \tuint8_t option_type; /* geneve tlv opt type.*/\n \tuint8_t length; /* geneve tlv opt length. */\n-\tuint32_t refcnt; /* geneve tlv object reference counter */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */\n };\n \n \n@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {\n \tuint16_t group;\n \t/* The group. */\n \trte_spinlock_t sl;\n-\tuint32_t ref_cnt;\n+\tRTE_ATOMIC(uint32_t) ref_cnt;\n \t/* Use count. */\n \tstruct rte_flow_pattern_template *hws_item_templ;\n \t/* Hardware steering item templates. */\n@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {\n \t\tstruct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;\n \t\t/**< srtcm_rfc2697 struct. */\n \t};\n-\tuint32_t ref_cnt; /**< Use count. */\n+\tRTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */\n \tuint32_t g_support:1; /**< If G color will be generated. */\n \tuint32_t y_support:1; /**< If Y color will be generated. */\n \tuint32_t initialized:1; /**< Initialized. */\n@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {\n \tenum mlx5_aso_mtr_type type;\n \tstruct mlx5_flow_meter_info fm;\n \t/**< Pointer to the next aso flow meter structure. */\n-\tuint8_t state; /**< ASO flow meter state. */\n+\tRTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */\n \tuint32_t offset;\n \tenum rte_color init_color;\n };\n@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {\n \t/* Default policy table. */\n \tuint32_t def_policy_id;\n \t/* Default policy id. */\n-\tuint32_t def_policy_ref_cnt;\n+\tRTE_ATOMIC(uint32_t) def_policy_ref_cnt;\n \t/** def_policy meter use count. */\n \tstruct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];\n \t/* Meter drop table. */\n@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {\n \n /* Tx packet pacing internal timestamp. */\n struct mlx5_txpp_ts {\n-\tuint64_t ci_ts;\n-\tuint64_t ts;\n+\tRTE_ATOMIC(uint64_t) ci_ts;\n+\tRTE_ATOMIC(uint64_t) ts;\n };\n \n /* Tx packet pacing structure. */\n@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {\n \tstruct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */\n \tuint32_t sync_lost:1; /* ci/timestamp synchronization lost. */\n \t/* Statistics counters. */\n-\tuint64_t err_miss_int; /* Missed service interrupt. */\n-\tuint64_t err_rearm_queue; /* Rearm Queue errors. */\n-\tuint64_t err_clock_queue; /* Clock Queue errors. */\n-\tuint64_t err_ts_past; /* Timestamp in the past. */\n-\tuint64_t err_ts_future; /* Timestamp in the distant future. */\n-\tuint64_t err_ts_order; /* Timestamp not in ascending order. */\n+\tRTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */\n+\tRTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */\n+\tRTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */\n+\tRTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */\n+\tRTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */\n+\tRTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */\n };\n \n /* Sample ID information of eCPRI flex parser structure. */\n@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {\n \tvoid *dr_action_orig;\n \t/* General action object for reply dir. */\n \tvoid *dr_action_rply;\n-\tuint32_t refcnt; /* Action used count in device flows. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */\n \tuint32_t offset; /* Offset of ASO CT in DevX objects bulk. */\n \tuint16_t peer; /* The only peer port index could also use this CT. */\n-\tenum mlx5_aso_ct_state state; /* ASO CT state. */\n+\tRTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */\n \tbool is_original; /* The direction of the DR action to be used. */\n };\n \n /* CT action object state update. */\n #define MLX5_ASO_CT_UPDATE_STATE(c, s) \\\n-\t__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)\n+\trte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)\n \n #ifdef PEDANTIC\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {\n /* Port flex item context. */\n struct mlx5_flex_item {\n \tstruct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */\n-\tuint32_t refcnt; /* Atomically accessed refcnt by flows. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */\n \tenum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */\n \tuint32_t mapnum; /* Number of pattern translation entries. */\n \tstruct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];\n@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {\n #define MLX5_SRV6_SAMPLE_NUM 5\n /* Mlx5 internal flex parser profile structure. */\n struct mlx5_internal_flex_parser_profile {\n-\tuint32_t refcnt;\n+\tRTE_ATOMIC(uint32_t) refcnt;\n \tstruct mlx5_flex_item flex; /* Hold map info for modify field. */\n };\n \n@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \tstruct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];\n #endif\n-\tstruct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */\n-\tstruct mlx5_hlist *modify_cmds;\n-\tstruct mlx5_hlist *tag_table;\n+\tRTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */\n+\tRTE_ATOMIC(struct mlx5_hlist *) modify_cmds;\n+\tRTE_ATOMIC(struct mlx5_hlist *) tag_table;\n \tstruct mlx5_list *port_id_action_list; /* Port ID action list. */\n \tstruct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */\n \tstruct mlx5_list *sample_action_list; /* List of sample actions. */\n@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {\n \t/* SW steering counters management structure. */\n \tvoid *default_miss_action; /* Default miss action. */\n \tstruct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];\n-\tstruct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];\n+\tRTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];\n \t/* Shared interrupt handler section. */\n \tstruct rte_intr_handle *intr_handle; /* Interrupt handler for device. */\n \tstruct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */\n@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {\n  * Caution, secondary process may rebuild the struct during port start.\n  */\n struct mlx5_proc_priv {\n-\tvoid *hca_bar;\n+\tRTE_ATOMIC(void *) hca_bar;\n \t/* Mapped HCA PCI BAR area. */\n \tsize_t uar_table_sz;\n \t/* Size of UAR register table. */\n@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {\n /* Indirection table. */\n struct mlx5_ind_table_obj {\n \tLIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */\n-\tuint32_t refcnt; /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */\n \tunion {\n \t\tvoid *ind_table; /**< Indirection table. */\n \t\tstruct mlx5_devx_obj *rqt; /* DevX RQT object. */\n@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {\n };\n \n struct mlx5_quota {\n-\tuint8_t state; /* object state */\n+\tRTE_ATOMIC(uint8_t) state; /* object state */\n \tuint8_t mode;  /* metering mode */\n \t/**\n \t * Keep track of application update types.\n@@ -1955,7 +1955,7 @@ struct mlx5_priv {\n \tuint32_t flex_item_map; /* Map of allocated flex item elements. */\n \tuint32_t nb_queue; /* HW steering queue number. */\n \tstruct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */\n-\tuint32_t hws_mark_refcnt; /* HWS mark action reference counter. */\n+\tRTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */\n \tstruct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */\n \tstruct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n@@ -2007,7 +2007,7 @@ struct mlx5_priv {\n \n #endif\n \tstruct rte_eth_dev *shared_host; /* Host device for HW steering. */\n-\tuint16_t shared_refcnt; /* HW steering host reference counter. */\n+\tRTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */\n };\n \n #define PORT_ID(priv) ((priv)->dev_data->port_id)\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex f31fdfb..1954975 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {\n \t\t\tshared_rss = mlx5_ipool_get\n \t\t\t\t(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t\t\t\t\t\t\t\t   idx);\n-\t\t\t__atomic_fetch_add(&shared_rss->refcnt, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,\n+\t\t\t\t\t   rte_memory_order_relaxed);\n \t\t\treturn idx;\n \t\tdefault:\n \t\t\tbreak;\n@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *\n \tif (tunnel) {\n \t\tflow->tunnel = 1;\n \t\tflow->tunnel_id = tunnel->tunnel_id;\n-\t\t__atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);\n \t\tmlx5_free(default_miss_ctx.queue);\n \t}\n \tmlx5_flow_pop_thread_workspace();\n@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *\n \tflow_mreg_del_copy_action(dev, flow);\n \tflow_drv_destroy(dev, flow);\n \tif (rss_desc->shared_rss)\n-\t\t__atomic_fetch_sub(&((struct mlx5_shared_action_rss *)\n+\t\trte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)\n \t\t\tmlx5_ipool_get\n \t\t\t(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n-\t\t\trss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);\n+\t\t\trss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);\n \tmlx5_ipool_free(priv->flows[type], idx);\n \trte_errno = ret; /* Restore rte_errno. */\n \tret = rte_errno;\n@@ -7976,7 +7976,8 @@ struct rte_flow *\n \n \t\ttunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);\n \t\tRTE_VERIFY(tunnel);\n-\t\tif (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))\n+\t\tif (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,\n+\t\t    rte_memory_order_relaxed) - 1))\n \t\t\tmlx5_flow_tunnel_free(dev, tunnel);\n \t}\n \tflow_mreg_del_copy_action(dev, flow);\n@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*\n {\n \tuint32_t pools_n, us;\n \n-\tpools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);\n+\tpools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);\n \tus = MLX5_POOL_QUERY_FREQ_US / pools_n;\n \tDRV_LOG(DEBUG, \"Set alarm for %u pools each %u us\", pools_n, us);\n \tif (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {\n@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*\n \tfor (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {\n \t\tcnt = MLX5_POOL_GET_CNT(pool, i);\n \t\tage_param = MLX5_CNT_TO_AGE(cnt);\n-\t\tif (__atomic_load_n(&age_param->state,\n-\t\t\t\t    __ATOMIC_RELAXED) != AGE_CANDIDATE)\n+\t\tif (rte_atomic_load_explicit(&age_param->state,\n+\t\t\t\t    rte_memory_order_relaxed) != AGE_CANDIDATE)\n \t\t\tcontinue;\n \t\tif (cur->data[i].hits != prev->data[i].hits) {\n-\t\t\t__atomic_store_n(&age_param->sec_since_last_hit, 0,\n-\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,\n+\t\t\t\t\t rte_memory_order_relaxed);\n \t\t\tcontinue;\n \t\t}\n-\t\tif (__atomic_fetch_add(&age_param->sec_since_last_hit,\n+\t\tif (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,\n \t\t\t\t       time_delta,\n-\t\t\t\t       __ATOMIC_RELAXED) + time_delta <= age_param->timeout)\n+\t\t\t\t       rte_memory_order_relaxed) + time_delta <= age_param->timeout)\n \t\t\tcontinue;\n \t\t/**\n \t\t * Hold the lock first, or if between the\n@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*\n \t\tpriv = rte_eth_devices[age_param->port_id].data->dev_private;\n \t\tage_info = GET_PORT_AGE_INFO(priv);\n \t\trte_spinlock_lock(&age_info->aged_sl);\n-\t\tif (__atomic_compare_exchange_n(&age_param->state, &expected,\n-\t\t\t\t\t\tAGE_TMOUT, false,\n-\t\t\t\t\t\t__ATOMIC_RELAXED,\n-\t\t\t\t\t\t__ATOMIC_RELAXED)) {\n+\t\tif (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,\n+\t\t\t\t\t\tAGE_TMOUT,\n+\t\t\t\t\t\trte_memory_order_relaxed,\n+\t\t\t\t\t\trte_memory_order_relaxed)) {\n \t\t\tTAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);\n \t\t\tMLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);\n \t\t}\n@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {\n {\n \tstruct tunnel_db_element_release_ctx *ctx = x;\n \tctx->ret = 0;\n-\tif (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))\n+\tif (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))\n \t\tmlx5_flow_tunnel_free(dev, tunnel);\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 0065727..943f759 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {\n \tLIST_ENTRY(mlx5_flow_tunnel) chain;\n \tstruct rte_flow_tunnel app_tunnel;\t/** app tunnel copy */\n \tuint32_t tunnel_id;\t\t\t/** unique tunnel ID */\n-\tuint32_t refctn;\n+\tRTE_ATOMIC(uint32_t) refctn;\n \tstruct rte_flow_action action;\n \tstruct rte_flow_item item;\n \tstruct mlx5_hlist *groups;\t\t/** tunnel groups */\n@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {\n \tstruct mlx5dr_match_template *mt; /* mlx5 match template. */\n \tuint64_t item_flags; /* Item layer flags. */\n \tuint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */\n-\tuint32_t refcnt;  /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */\n \t/*\n \t * If true, then rule pattern should be prepended with\n \t * represented_port pattern item.\n@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {\n \tuint16_t reformat_off; /* Offset of DR reformat action. */\n \tuint16_t mhdr_off; /* Offset of DR modify header action. */\n \tuint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */\n-\tuint32_t refcnt; /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */\n \tuint8_t flex_item; /* flex item index. */\n };\n \n@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {\n /* Shared RSS action structure */\n struct mlx5_shared_action_rss {\n \tILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */\n-\tuint32_t refcnt; /**< Atomically accessed refcnt. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */\n \tstruct rte_flow_action_rss origin; /**< Original rte RSS action. */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n \tstruct mlx5_ind_table_obj *ind_tbl;\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex ab9eb21..a94b228 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -619,7 +619,7 @@\n \t\t\tuint8_t *u8addr;\n \t\t\tuint8_t hit;\n \n-\t\t\tif (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=\n+\t\t\tif (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=\n \t\t\t\t\t    AGE_CANDIDATE)\n \t\t\t\tcontinue;\n \t\t\tbyte = 63 - (j / 8);\n@@ -627,13 +627,13 @@\n \t\t\tu8addr = (uint8_t *)addr;\n \t\t\thit = (u8addr[byte] >> offset) & 0x1;\n \t\t\tif (hit) {\n-\t\t\t\t__atomic_store_n(&ap->sec_since_last_hit, 0,\n-\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_store_explicit(&ap->sec_since_last_hit, 0,\n+\t\t\t\t\t\t rte_memory_order_relaxed);\n \t\t\t} else {\n \t\t\t\tstruct mlx5_priv *priv;\n \n-\t\t\t\t__atomic_fetch_add(&ap->sec_since_last_hit,\n-\t\t\t\t\t\t   diff, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,\n+\t\t\t\t\t\t   diff, rte_memory_order_relaxed);\n \t\t\t\t/* If timeout passed add to aged-out list. */\n \t\t\t\tif (ap->sec_since_last_hit <= ap->timeout)\n \t\t\t\t\tcontinue;\n@@ -641,12 +641,11 @@\n \t\t\t\trte_eth_devices[ap->port_id].data->dev_private;\n \t\t\t\tage_info = GET_PORT_AGE_INFO(priv);\n \t\t\t\trte_spinlock_lock(&age_info->aged_sl);\n-\t\t\t\tif (__atomic_compare_exchange_n(&ap->state,\n+\t\t\t\tif (rte_atomic_compare_exchange_strong_explicit(&ap->state,\n \t\t\t\t\t\t\t\t&expected,\n \t\t\t\t\t\t\t\tAGE_TMOUT,\n-\t\t\t\t\t\t\t\tfalse,\n-\t\t\t\t\t\t\t       __ATOMIC_RELAXED,\n-\t\t\t\t\t\t\t    __ATOMIC_RELAXED)) {\n+\t\t\t\t\t\t\t       rte_memory_order_relaxed,\n+\t\t\t\t\t\t\t    rte_memory_order_relaxed)) {\n \t\t\t\t\tLIST_INSERT_HEAD(&age_info->aged_aso,\n \t\t\t\t\t\t\t act, next);\n \t\t\t\t\tMLX5_AGE_SET(age_info,\n@@ -946,10 +945,10 @@\n \t\tfor (i = 0; i < n; ++i) {\n \t\t\taso_mtr = sq->elts[(sq->tail + i) & mask].mtr;\n \t\t\tMLX5_ASSERT(aso_mtr);\n-\t\t\tverdict = __atomic_compare_exchange_n(&aso_mtr->state,\n+\t\t\tverdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,\n \t\t\t\t\t\t    &exp_state, ASO_METER_READY,\n-\t\t\t\t\t\t    false, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t    __ATOMIC_RELAXED);\n+\t\t\t\t\t\t    rte_memory_order_relaxed,\n+\t\t\t\t\t\t    rte_memory_order_relaxed);\n \t\t\tMLX5_ASSERT(verdict);\n \t\t}\n \t\tsq->tail += n;\n@@ -1005,10 +1004,10 @@\n \t\t\tmtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,\n \t\t\t\t\t     MLX5_INDIRECT_ACTION_IDX_GET(job->action));\n \t\t\tMLX5_ASSERT(mtr);\n-\t\t\tverdict = __atomic_compare_exchange_n(&mtr->state,\n+\t\t\tverdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,\n \t\t\t\t\t\t    &exp_state, ASO_METER_READY,\n-\t\t\t\t\t\t    false, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t    __ATOMIC_RELAXED);\n+\t\t\t\t\t\t    rte_memory_order_relaxed,\n+\t\t\t\t\t\t    rte_memory_order_relaxed);\n \t\t\tMLX5_ASSERT(verdict);\n \t\t\tflow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));\n \t\t}\n@@ -1103,7 +1102,7 @@\n \tstruct mlx5_aso_sq *sq;\n \tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n \tuint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;\n-\tuint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);\n+\tuint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);\n \tpoll_cq_t poll_mtr_cq =\n \t\tis_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;\n \n@@ -1112,7 +1111,7 @@\n \tsq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);\n \tdo {\n \t\tpoll_mtr_cq(priv, sq);\n-\t\tif (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==\n+\t\tif (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==\n \t\t\t\t\t    ASO_METER_READY)\n \t\t\treturn 0;\n \t\t/* Waiting for CQE ready. */\n@@ -1411,7 +1410,7 @@\n \tuint16_t wqe_idx;\n \tstruct mlx5_aso_ct_pool *pool;\n \tenum mlx5_aso_ct_state state =\n-\t\t\t\t__atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);\n \n \tif (state == ASO_CONNTRACK_FREE) {\n \t\tDRV_LOG(ERR, \"Fail: No context to query\");\n@@ -1620,12 +1619,12 @@\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n \telse\n \t\tsq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);\n-\tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n+\tif (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==\n \t    ASO_CONNTRACK_READY)\n \t\treturn 0;\n \tdo {\n \t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tif (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==\n+\t\tif (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==\n \t\t    ASO_CONNTRACK_READY)\n \t\t\treturn 0;\n \t\t/* Waiting for CQE ready, consider should block or sleep. */\n@@ -1791,7 +1790,7 @@\n \tbool need_lock = !!(queue == MLX5_HW_INV_QUEUE);\n \tuint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;\n \tenum mlx5_aso_ct_state state =\n-\t\t\t\t__atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);\n \n \tif (sh->config.dv_flow_en == 2)\n \t\tsq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);\n@@ -1807,7 +1806,7 @@\n \t}\n \tdo {\n \t\tmlx5_aso_ct_completion_handle(sh, sq, need_lock);\n-\t\tstate = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n+\t\tstate = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);\n \t\tif (state == ASO_CONNTRACK_READY ||\n \t\t    state == ASO_CONNTRACK_QUERY)\n \t\t\treturn 0;\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex d434c67..f9c56af 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {\n }\n \n static inline struct mlx5_hlist *\n-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,\n+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,\n \t\t     const char *name, uint32_t size, bool direct_key,\n \t\t     bool lcores_share, void *ctx,\n \t\t     mlx5_list_create_cb cb_create,\n@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {\n \tstruct mlx5_hlist *expected = NULL;\n \tchar s[MLX5_NAME_SIZE];\n \n-\thl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);\n+\thl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);\n \tif (likely(hl))\n \t\treturn hl;\n \tsnprintf(s, sizeof(s), \"%s_%s\", sh->ibdev_name, name);\n@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {\n \t\t\t\t   \"cannot allocate resource memory\");\n \t\treturn NULL;\n \t}\n-\tif (!__atomic_compare_exchange_n(phl, &expected, hl, false,\n-\t\t\t\t\t __ATOMIC_SEQ_CST,\n-\t\t\t\t\t __ATOMIC_SEQ_CST)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,\n+\t\t\t\t\t rte_memory_order_seq_cst,\n+\t\t\t\t\t rte_memory_order_seq_cst)) {\n \t\tmlx5_hlist_destroy(hl);\n-\t\thl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);\n+\t\thl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);\n \t}\n \treturn hl;\n }\n@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *\n static struct mlx5_indexed_pool *\n flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)\n {\n-\tstruct mlx5_indexed_pool *ipool = __atomic_load_n\n-\t\t\t\t     (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);\n+\tstruct mlx5_indexed_pool *ipool = rte_atomic_load_explicit\n+\t\t\t\t     (&sh->mdh_ipools[index], rte_memory_order_seq_cst);\n \n \tif (!ipool) {\n \t\tstruct mlx5_indexed_pool *expected = NULL;\n@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *\n \t\tipool = mlx5_ipool_create(&cfg);\n \t\tif (!ipool)\n \t\t\treturn NULL;\n-\t\tif (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],\n-\t\t\t\t\t\t &expected, ipool, false,\n-\t\t\t\t\t\t __ATOMIC_SEQ_CST,\n-\t\t\t\t\t\t __ATOMIC_SEQ_CST)) {\n+\t\tif (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],\n+\t\t\t\t\t\t &expected, ipool,\n+\t\t\t\t\t\t rte_memory_order_seq_cst,\n+\t\t\t\t\t\t rte_memory_order_seq_cst)) {\n \t\t\tmlx5_ipool_destroy(ipool);\n-\t\t\tipool = __atomic_load_n(&sh->mdh_ipools[index],\n-\t\t\t\t\t\t__ATOMIC_SEQ_CST);\n+\t\t\tipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],\n+\t\t\t\t\t\trte_memory_order_seq_cst);\n \t\t}\n \t}\n \treturn ipool;\n@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *\n \n \tage_info = GET_PORT_AGE_INFO(priv);\n \tage_param = flow_dv_counter_idx_get_age(dev, counter);\n-\tif (!__atomic_compare_exchange_n(&age_param->state, &expected,\n-\t\t\t\t\t AGE_FREE, false, __ATOMIC_RELAXED,\n-\t\t\t\t\t __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,\n+\t\t\t\t\t AGE_FREE, rte_memory_order_relaxed,\n+\t\t\t\t\t rte_memory_order_relaxed)) {\n \t\t/**\n \t\t * We need the lock even it is age timeout,\n \t\t * since counter may still in process.\n@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *\n \t\trte_spinlock_lock(&age_info->aged_sl);\n \t\tTAILQ_REMOVE(&age_info->aged_counters, cnt, next);\n \t\trte_spinlock_unlock(&age_info->aged_sl);\n-\t\t__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);\n \t}\n }\n \n@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *\n \t\t * indirect action API, shared info is 1 before the reduction,\n \t\t * so this condition is failed and function doesn't return here.\n \t\t */\n-\t\tif (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,\n-\t\t\t\t       __ATOMIC_RELAXED) - 1)\n+\t\tif (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,\n+\t\t\t\t       rte_memory_order_relaxed) - 1)\n \t\t\treturn;\n \t}\n \tcnt->pool = pool;\n@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *\n \t\t\tgeneve_opt_v->option_type &&\n \t\t\tgeneve_opt_resource->length ==\n \t\t\tgeneve_opt_v->option_len) {\n-\t\t\t__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,\n+\t\t\t\t\t   rte_memory_order_relaxed);\n \t\t} else {\n \t\t\tret = rte_flow_error_set(error, ENOMEM,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *\n \t\tgeneve_opt_resource->option_class = geneve_opt_v->option_class;\n \t\tgeneve_opt_resource->option_type = geneve_opt_v->option_type;\n \t\tgeneve_opt_resource->length = geneve_opt_v->option_len;\n-\t\t__atomic_store_n(&geneve_opt_resource->refcnt, 1,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,\n+\t\t\t\trte_memory_order_relaxed);\n \t}\n exit:\n \trte_spinlock_unlock(&sh->geneve_tlv_opt_sl);\n@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *\n \t\t(void *)(uintptr_t)(dev_flow->flow_idx);\n \tage_param->timeout = age->timeout;\n \tage_param->port_id = dev->data->port_id;\n-\t__atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);\n \treturn counter;\n }\n \n@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *\n \tuint16_t expected = AGE_CANDIDATE;\n \n \tage_info = GET_PORT_AGE_INFO(priv);\n-\tif (!__atomic_compare_exchange_n(&age_param->state, &expected,\n-\t\t\t\t\t AGE_FREE, false, __ATOMIC_RELAXED,\n-\t\t\t\t\t __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,\n+\t\t\t\t\t AGE_FREE, rte_memory_order_relaxed,\n+\t\t\t\t\t rte_memory_order_relaxed)) {\n \t\t/**\n \t\t * We need the lock even it is age timeout,\n \t\t * since age action may still in process.\n@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *\n \t\trte_spinlock_lock(&age_info->aged_sl);\n \t\tLIST_REMOVE(age, next);\n \t\trte_spinlock_unlock(&age_info->aged_sl);\n-\t\t__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);\n \t}\n }\n \n@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;\n \tstruct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);\n-\tuint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;\n+\tuint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;\n \n \tif (!ret) {\n \t\tflow_dv_aso_age_remove_from_age(dev, age);\n@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *\n \t\t\treturn 0; /* 0 is an error. */\n \t\t}\n \t}\n-\t__atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);\n \treturn pool->index | ((age_free->offset + 1) << 16);\n }\n \n@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *\n \taso_age->age_params.context = context;\n \taso_age->age_params.timeout = timeout;\n \taso_age->age_params.port_id = dev->data->port_id;\n-\t__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,\n-\t\t\t __ATOMIC_RELAXED);\n-\t__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,\n-\t\t\t __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,\n+\t\t\t rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,\n+\t\t\t rte_memory_order_relaxed);\n }\n \n static void\n@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *\n \tuint32_t ret;\n \tstruct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);\n \tenum mlx5_aso_ct_state state =\n-\t\t\t__atomic_load_n(&ct->state, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);\n \n \t/* Cannot release when CT is in the ASO SQ. */\n \tif (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)\n \t\treturn -1;\n-\tret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;\n+\tret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;\n \tif (!ret) {\n \t\tif (ct->dr_action_orig) {\n #ifdef HAVE_MLX5_DR_ACTION_ASO_CT\n@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *\n \tpool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);\n \tct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);\n \t/* 0: inactive, 1: created, 2+: used by flows. */\n-\t__atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);\n \treg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);\n \tif (!ct->dr_action_orig) {\n #ifdef HAVE_MLX5_DR_ACTION_ASO_CT\n@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *\n \t\t\tage_act = flow_aso_age_get_by_idx(dev, owner_idx);\n \t\t\tif (flow->age == 0) {\n \t\t\t\tflow->age = owner_idx;\n-\t\t\t\t__atomic_fetch_add(&age_act->refcnt, 1,\n-\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_fetch_add_explicit(&age_act->refcnt, 1,\n+\t\t\t\t\t\t   rte_memory_order_relaxed);\n \t\t\t}\n \t\t\tage_act_pos = actions_n++;\n \t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *\n \t\t\t} else {\n \t\t\t\tif (flow->counter == 0) {\n \t\t\t\t\tflow->counter = owner_idx;\n-\t\t\t\t\t__atomic_fetch_add\n+\t\t\t\t\trte_atomic_fetch_add_explicit\n \t\t\t\t\t\t(&cnt_act->shared_info.refcnt,\n-\t\t\t\t\t\t 1, __ATOMIC_RELAXED);\n+\t\t\t\t\t\t 1, rte_memory_order_relaxed);\n \t\t\t\t}\n \t\t\t\t/* Save information first, will apply later. */\n \t\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *\n \t\t\t\tflow->indirect_type =\n \t\t\t\t\t\tMLX5_INDIRECT_ACTION_TYPE_CT;\n \t\t\t\tflow->ct = owner_idx;\n-\t\t\t\t__atomic_fetch_add(&ct->refcnt, 1,\n-\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_fetch_add_explicit(&ct->refcnt, 1,\n+\t\t\t\t\t\t   rte_memory_order_relaxed);\n \t\t\t}\n \t\t\tactions_n++;\n \t\t\taction_flags |= MLX5_FLOW_ACTION_CT;\n@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *\n \n \tshared_rss = mlx5_ipool_get\n \t\t\t(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);\n-\t__atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);\n }\n \n void\n@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *\n \t\t\t\tsh->geneve_tlv_option_resource;\n \trte_spinlock_lock(&sh->geneve_tlv_opt_sl);\n \tif (geneve_opt_resource) {\n-\t\tif (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,\n-\t\t\t\t\t __ATOMIC_RELAXED) - 1)) {\n+\t\tif (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,\n+\t\t\t\t\t rte_memory_order_relaxed) - 1)) {\n \t\t\tclaim_zero(mlx5_devx_cmd_destroy\n \t\t\t\t\t(geneve_opt_resource->obj));\n \t\t\tmlx5_free(sh->geneve_tlv_option_resource);\n@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *\n \t/* Update queue with indirect table queue memoyr. */\n \torigin->queue = shared_rss->ind_tbl->queues;\n \trte_spinlock_init(&shared_rss->action_rss_sl);\n-\t__atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);\n \trte_spinlock_lock(&priv->shared_act_sl);\n \tILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t     &priv->rss_shared_actions, idx, shared_rss, next);\n@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"invalid shared action\");\n-\tif (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,\n-\t\t\t\t\t 0, 0, __ATOMIC_ACQUIRE,\n-\t\t\t\t\t __ATOMIC_RELAXED))\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,\n+\t\t\t\t\t 0, rte_memory_order_acquire,\n+\t\t\t\t\t rte_memory_order_relaxed))\n \t\treturn rte_flow_error_set(error, EBUSY,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t  NULL,\n@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *\n \t\treturn __flow_dv_action_rss_release(dev, idx, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\tcnt = flow_dv_counter_get_by_idx(dev, idx, NULL);\n-\t\tif (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,\n-\t\t\t\t\t\t &no_flow_refcnt, 1, false,\n-\t\t\t\t\t\t __ATOMIC_ACQUIRE,\n-\t\t\t\t\t\t __ATOMIC_RELAXED))\n+\t\tif (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,\n+\t\t\t\t\t\t &no_flow_refcnt, 1,\n+\t\t\t\t\t\t rte_memory_order_acquire,\n+\t\t\t\t\t\t rte_memory_order_relaxed))\n \t\t\treturn rte_flow_error_set(error, EBUSY,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t\t  NULL,\n@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *\n \tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n \t\tage_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;\n \t\tresp = data;\n-\t\tresp->aged = __atomic_load_n(&age_param->state,\n-\t\t\t\t\t      __ATOMIC_RELAXED) == AGE_TMOUT ?\n+\t\tresp->aged = rte_atomic_load_explicit(&age_param->state,\n+\t\t\t\t\t      rte_memory_order_relaxed) == AGE_TMOUT ?\n \t\t\t\t\t\t\t\t\t  1 : 0;\n \t\tresp->sec_since_last_hit_valid = !resp->aged;\n \t\tif (resp->sec_since_last_hit_valid)\n-\t\t\tresp->sec_since_last_hit = __atomic_load_n\n-\t\t\t     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);\n+\t\t\tresp->sec_since_last_hit = rte_atomic_load_explicit\n+\t\t\t     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);\n \t\treturn 0;\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn flow_dv_query_count(dev, idx, data, error);\n@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL, \"age data not available\");\n \t}\n-\tresp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==\n+\tresp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==\n \t\t\t\t     AGE_TMOUT ? 1 : 0;\n \tresp->sec_since_last_hit_valid = !resp->aged;\n \tif (resp->sec_since_last_hit_valid)\n-\t\tresp->sec_since_last_hit = __atomic_load_n\n-\t\t\t     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);\n+\t\tresp->sec_since_last_hit = rte_atomic_load_explicit\n+\t\t\t     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c\nindex 4ae03a2..8a02247 100644\n--- a/drivers/net/mlx5/mlx5_flow_flex.c\n+++ b/drivers/net/mlx5/mlx5_flow_flex.c\n@@ -86,7 +86,7 @@\n \t\t\tMLX5_ASSERT(!item->refcnt);\n \t\t\tMLX5_ASSERT(!item->devx_fp);\n \t\t\titem->devx_fp = NULL;\n-\t\t\t__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);\n+\t\t\trte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);\n \t\t\tpriv->flex_item_map |= 1u << idx;\n \t\t}\n \t}\n@@ -107,7 +107,7 @@\n \t\tMLX5_ASSERT(!item->refcnt);\n \t\tMLX5_ASSERT(!item->devx_fp);\n \t\titem->devx_fp = NULL;\n-\t\t__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);\n \t\tpriv->flex_item_map &= ~(1u << idx);\n \t\trte_spinlock_unlock(&priv->flex_item_sl);\n \t}\n@@ -379,7 +379,7 @@\n \t\treturn ret;\n \t}\n \tif (acquire)\n-\t\t__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);\n \treturn ret;\n }\n \n@@ -414,7 +414,7 @@\n \t\trte_errno = -EINVAL;\n \t\treturn -EINVAL;\n \t}\n-\t__atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);\n \treturn 0;\n }\n \n@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *\n \t}\n \tflex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);\n \t/* Mark initialized flex item valid. */\n-\t__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);\n \treturn (struct rte_flow_item_flex_handle *)flex;\n \n error:\n@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, NULL,\n \t\t\t\t\t  \"invalid flex item handle value\");\n \t}\n-\tif (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,\n-\t\t\t\t\t __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,\n+\t\t\t\t\t rte_memory_order_acquire, rte_memory_order_relaxed)) {\n \t\trte_spinlock_unlock(&priv->flex_item_sl);\n \t\treturn rte_flow_error_set(error, EBUSY,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, NULL,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 9ebbe66..8891f3c 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,\n \t}\n \n \tif (acts->mark)\n-\t\tif (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))\n+\t\tif (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,\n+\t\t    rte_memory_order_relaxed) - 1))\n \t\t\tflow_hw_rxq_flag_set(dev, false);\n \n \tif (acts->jump) {\n@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n \t\t\t\tgoto err;\n \t\t\tacts->rule_acts[dr_pos].action =\n \t\t\t\tpriv->hw_tag[!!attr->group];\n-\t\t\t__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,\n+\t\t\t    rte_memory_order_relaxed);\n \t\t\tflow_hw_rxq_flag_set(dev, true);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\n@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n \tuint8_t i;\n \n \tfor (i = 0; i < nb_action_templates; i++) {\n-\t\tuint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,\n-\t\t\t\t\t\t     __ATOMIC_RELAXED);\n+\t\tuint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,\n+\t\t\t\t\t\t     rte_memory_order_relaxed) + 1;\n \n \t\tif (refcnt <= 1) {\n \t\t\trte_flow_error_set(error, EINVAL,\n@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n at_error:\n \twhile (i--) {\n \t\t__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);\n-\t\t__atomic_sub_fetch(&action_templates[i]->refcnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t}\n \treturn rte_errno;\n }\n@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n \t\t}\n \t\tif (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)\n \t\t\tmatcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;\n-\t\tret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,\n-\t\t\t\t\t __ATOMIC_RELAXED) + 1;\n+\t\tret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,\n+\t\t\t\t\t rte_memory_order_relaxed) + 1;\n \t\tif (ret <= 1) {\n \t\t\trte_errno = EINVAL;\n \t\t\tgoto it_error;\n@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n at_error:\n \tfor (i = 0; i < nb_action_templates; i++) {\n \t\t__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);\n-\t\t__atomic_fetch_sub(&action_templates[i]->refcnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t}\n \ti = nb_item_templates;\n it_error:\n \twhile (i--)\n-\t\t__atomic_fetch_sub(&item_templates[i]->refcnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n error:\n \terr = rte_errno;\n \tif (tbl) {\n@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)\n \t}\n \tLIST_REMOVE(table, next);\n \tfor (i = 0; i < table->nb_item_templates; i++)\n-\t\t__atomic_fetch_sub(&table->its[i]->refcnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \tfor (i = 0; i < table->nb_action_templates; i++) {\n \t\t__flow_hw_action_template_destroy(dev, &table->ats[i].acts);\n-\t\t__atomic_fetch_sub(&table->ats[i].action_template->refcnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t}\n \tflow_hw_destroy_table_multi_pattern_ctx(table);\n \tif (table->matcher_info[0].matcher)\n@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {\n \tif (!at->tmpl)\n \t\tgoto error;\n \tat->action_flags = action_flags;\n-\t__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);\n \tLIST_INSERT_HEAD(&priv->flow_hw_at, at, next);\n \treturn at;\n error:\n@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {\n \tuint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |\n \t\t\tMLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;\n \n-\tif (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {\n+\tif (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {\n \t\tDRV_LOG(WARNING, \"Action template %p is still in use.\",\n \t\t\t(void *)template);\n \t\treturn rte_flow_error_set(error, EBUSY,\n@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {\n \t\t\tbreak;\n \t\t}\n \t}\n-\t__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);\n \trte_errno = pattern_template_validate(dev, &it, 1);\n \tif (rte_errno)\n \t\tgoto error;\n@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n-\tif (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {\n+\tif (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {\n \t\tDRV_LOG(WARNING, \"Item template %p is still in use.\",\n \t\t\t(void *)template);\n \t\treturn rte_flow_error_set(error, EBUSY,\n@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *\n \t\t}\n \t\tdr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;\n \t\tpriv->shared_host = host_dev;\n-\t\t__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,\n+\t\t    rte_memory_order_relaxed);\n \t}\n \tdr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);\n \t/* rte_errno has been updated by HWS layer. */\n@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *\n \tif (priv->shared_host) {\n \t\tstruct mlx5_priv *host_priv = priv->shared_host->data->dev_private;\n \n-\t\t__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,\n+\t\t    rte_memory_order_relaxed);\n \t\tpriv->shared_host = NULL;\n \t}\n \tif (priv->hw_q) {\n@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *\n \tpriv->hw_q = NULL;\n \tif (priv->shared_host) {\n \t\tstruct mlx5_priv *host_priv = priv->shared_host->data->dev_private;\n-\t\t__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,\n+\t\t    rte_memory_order_relaxed);\n \t\tpriv->shared_host = NULL;\n \t}\n \tmlx5_free(priv->hw_attr);\n@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *\n \t\t\t\tNULL,\n \t\t\t\t\"Invalid CT destruction index\");\n \t}\n-\t__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,\n+\t\t\t\t rte_memory_order_relaxed);\n \tmlx5_ipool_free(pool->cts, idx);\n \treturn 0;\n }\n@@ -11572,7 +11577,7 @@ struct mlx5_hw_q_job *\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL, \"age data not available\");\n-\tswitch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {\n+\tswitch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {\n \tcase HWS_AGE_AGED_OUT_REPORTED:\n \tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n \t\tresp->aged = 1;\n@@ -11592,8 +11597,8 @@ struct mlx5_hw_q_job *\n \t}\n \tresp->sec_since_last_hit_valid = !resp->aged;\n \tif (resp->sec_since_last_hit_valid)\n-\t\tresp->sec_since_last_hit = __atomic_load_n\n-\t\t\t\t (&param->sec_since_last_hit, __ATOMIC_RELAXED);\n+\t\tresp->sec_since_last_hit = rte_atomic_load_explicit\n+\t\t\t\t (&param->sec_since_last_hit, rte_memory_order_relaxed);\n \treturn 0;\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex ca361f7..da3289b 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *\n \t\t\tNULL, \"Meter profile id not valid.\");\n \t/* Meter policy must exist. */\n \tif (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {\n-\t\t__atomic_fetch_add\n+\t\trte_atomic_fetch_add_explicit\n \t\t\t(&priv->sh->mtrmng->def_policy_ref_cnt,\n-\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\t1, rte_memory_order_relaxed);\n \t\tdomain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;\n \t\tif (!priv->sh->config.dv_esw_en)\n \t\t\tdomain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;\n@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *\n \tfm->is_enable = params->meter_enable;\n \tfm->shared = !!shared;\n \tfm->color_aware = !!params->use_prev_mtr_color;\n-\t__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);\n \tif (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {\n \t\tfm->def_policy = 1;\n \t\tfm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);\n@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *\n \t}\n \tfm->active_state = params->meter_enable;\n \tif (mtr_policy)\n-\t\t__atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);\n \treturn 0;\n error:\n \tmlx5_flow_destroy_mtr_tbls(dev, fm);\n@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *\n \t\t\t\t\t  NULL, \"Failed to create devx meter.\");\n \t}\n \tfm->active_state = params->meter_enable;\n-\t__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);\n-\t__atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);\n+\trte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);\n \treturn 0;\n }\n #endif\n@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *\n \tif (fmp == NULL)\n \t\treturn -1;\n \t/* Update dependencies. */\n-\t__atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);\n \tfm->profile = NULL;\n \t/* Remove from list. */\n \tif (!priv->sh->meter_aso_en) {\n@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *\n \t}\n \tmlx5_flow_destroy_mtr_tbls(dev, fm);\n \tif (fm->def_policy)\n-\t\t__atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,\n-\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,\n+\t\t\t\t1, rte_memory_order_relaxed);\n \tif (priv->sh->meter_aso_en) {\n \t\tif (!fm->def_policy) {\n \t\t\tmtr_policy = mlx5_flow_meter_policy_find(dev,\n \t\t\t\t\t\tfm->policy_id, NULL);\n \t\t\tif (mtr_policy)\n-\t\t\t\t__atomic_fetch_sub(&mtr_policy->ref_cnt,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \t\t\tfm->policy_id = 0;\n \t\t}\n \t\tfm->def_policy = 0;\n@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *\n \t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL, \"Meter object is being used.\");\n \t/* Destroy the meter profile. */\n-\t__atomic_fetch_sub(&fm->profile->ref_cnt,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \t/* Destroy the meter policy. */\n \tpolicy = mlx5_flow_meter_policy_find(dev,\n \t\t\tfm->policy_id, NULL);\n-\t__atomic_fetch_sub(&policy->ref_cnt,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_sub_explicit(&policy->ref_cnt,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \tmemset(fm, 0, sizeof(struct mlx5_flow_meter_info));\n \treturn 0;\n }\ndiff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c\nindex 14a2a8b..6ad0e8a 100644\n--- a/drivers/net/mlx5/mlx5_flow_quota.c\n+++ b/drivers/net/mlx5/mlx5_flow_quota.c\n@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,\n \t\tstruct mlx5_quota *quota_obj =\n \t\t\tsq->elts[(sq->tail + i) & mask].quota_obj;\n \n-\t\t__atomic_compare_exchange_n(&quota_obj->state, &state,\n-\t\t\t\t\t    MLX5_QUOTA_STATE_READY, false,\n-\t\t\t\t\t    __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\t\trte_atomic_compare_exchange_strong_explicit(&quota_obj->state, &state,\n+\t\t\t\t\t    MLX5_QUOTA_STATE_READY,\n+\t\t\t\t\t    rte_memory_order_relaxed, rte_memory_order_relaxed);\n \t}\n }\n \n@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,\n \t\trte_spinlock_lock(&sq->sqsl);\n \t\tmlx5_quota_cmd_completion_handle(sq);\n \t\trte_spinlock_unlock(&sq->sqsl);\n-\t\tif (__atomic_load_n(&quota_obj->state, __ATOMIC_RELAXED) ==\n+\t\tif (rte_atomic_load_explicit(&quota_obj->state, rte_memory_order_relaxed) ==\n \t\t    MLX5_QUOTA_STATE_READY)\n \t\t\treturn 0;\n \t} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);\n@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,\n mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)\n {\n \tuint8_t state = MLX5_QUOTA_STATE_READY;\n-\tbool verdict = __atomic_compare_exchange_n\n-\t\t(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,\n-\t\t __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\tbool verdict = rte_atomic_compare_exchange_strong_explicit\n+\t\t(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,\n+\t\t rte_memory_order_relaxed, rte_memory_order_relaxed);\n \n \tif (!verdict)\n \t\treturn rte_flow_error_set(error, EBUSY,\n@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,\n \tret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,\n \t\t\t\t async_job ? async_job : &sync_job, push, NULL);\n \tif (ret) {\n-\t\t__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,\n+\t\t\t\t rte_memory_order_relaxed);\n \t\treturn rte_flow_error_set(error, EAGAIN,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL, \"try again\");\n \t}\n@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,\n \t\t\t\t async_job ? async_job : &sync_job, push,\n \t\t\t\t (void *)(uintptr_t)update->conf);\n \tif (ret) {\n-\t\t__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,\n+\t\t\t\t rte_memory_order_relaxed);\n \t\treturn rte_flow_error_set(error, EAGAIN,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL, \"try again\");\n \t}\n@@ -593,9 +593,9 @@ struct rte_flow_action_handle *\n \t\t\t\t   NULL, \"quota: failed to allocate quota object\");\n \t\treturn NULL;\n \t}\n-\tverdict = __atomic_compare_exchange_n\n-\t\t(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,\n-\t\t __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\tverdict = rte_atomic_compare_exchange_strong_explicit\n+\t\t(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,\n+\t\t rte_memory_order_relaxed, rte_memory_order_relaxed);\n \tif (!verdict) {\n \t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t   NULL, \"quota: new quota object has invalid state\");\n@@ -616,8 +616,8 @@ struct rte_flow_action_handle *\n \t\t\t\t (void *)(uintptr_t)conf);\n \tif (ret) {\n \t\tmlx5_ipool_free(qctx->quota_ipool, id);\n-\t\t__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,\n+\t\t\t\t rte_memory_order_relaxed);\n \t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t   NULL, \"quota: WR failure\");\n \t\treturn 0;\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c\nindex c31f2f3..1b625e0 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.c\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.c\n@@ -149,7 +149,7 @@\n \t\t}\n \t\tif (param->timeout == 0)\n \t\t\tcontinue;\n-\t\tswitch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {\n+\t\tswitch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {\n \t\tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n \t\tcase HWS_AGE_AGED_OUT_REPORTED:\n \t\t\t/* Already aged-out, no action is needed. */\n@@ -171,8 +171,8 @@\n \t\thits = rte_be_to_cpu_64(stats[i].hits);\n \t\tif (param->nb_cnts == 1) {\n \t\t\tif (hits != param->accumulator_last_hits) {\n-\t\t\t\t__atomic_store_n(&param->sec_since_last_hit, 0,\n-\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_store_explicit(&param->sec_since_last_hit, 0,\n+\t\t\t\t\t\t rte_memory_order_relaxed);\n \t\t\t\tparam->accumulator_last_hits = hits;\n \t\t\t\tcontinue;\n \t\t\t}\n@@ -184,8 +184,8 @@\n \t\t\tparam->accumulator_cnt = 0;\n \t\t\tif (param->accumulator_last_hits !=\n \t\t\t\t\t\tparam->accumulator_hits) {\n-\t\t\t\t__atomic_store_n(&param->sec_since_last_hit,\n-\t\t\t\t\t\t 0, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_store_explicit(&param->sec_since_last_hit,\n+\t\t\t\t\t\t 0, rte_memory_order_relaxed);\n \t\t\t\tparam->accumulator_last_hits =\n \t\t\t\t\t\t\tparam->accumulator_hits;\n \t\t\t\tparam->accumulator_hits = 0;\n@@ -193,9 +193,9 @@\n \t\t\t}\n \t\t\tparam->accumulator_hits = 0;\n \t\t}\n-\t\tif (__atomic_fetch_add(&param->sec_since_last_hit, time_delta,\n-\t\t\t\t       __ATOMIC_RELAXED) + time_delta <=\n-\t\t   __atomic_load_n(&param->timeout, __ATOMIC_RELAXED))\n+\t\tif (rte_atomic_fetch_add_explicit(&param->sec_since_last_hit, time_delta,\n+\t\t\t\t       rte_memory_order_relaxed) + time_delta <=\n+\t\t   rte_atomic_load_explicit(&param->timeout, rte_memory_order_relaxed))\n \t\t\tcontinue;\n \t\t/* Prepare the relevant ring for this AGE parameter */\n \t\tif (priv->hws_strict_queue)\n@@ -203,10 +203,10 @@\n \t\telse\n \t\t\tr = age_info->hw_age.aged_list;\n \t\t/* Changing the state atomically and insert it into the ring. */\n-\t\tif (__atomic_compare_exchange_n(&param->state, &expected1,\n+\t\tif (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected1,\n \t\t\t\t\t\tHWS_AGE_AGED_OUT_NOT_REPORTED,\n-\t\t\t\t\t\tfalse, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t__ATOMIC_RELAXED)) {\n+\t\t\t\t\t\trte_memory_order_relaxed,\n+\t\t\t\t\t\trte_memory_order_relaxed)) {\n \t\t\tint ret = rte_ring_enqueue_burst_elem(r, &age_idx,\n \t\t\t\t\t\t\t      sizeof(uint32_t),\n \t\t\t\t\t\t\t      1, NULL);\n@@ -221,11 +221,10 @@\n \t\t\t */\n \t\t\texpected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;\n \t\t\tif (ret == 0 &&\n-\t\t\t    !__atomic_compare_exchange_n(&param->state,\n+\t\t\t    !rte_atomic_compare_exchange_strong_explicit(&param->state,\n \t\t\t\t\t\t\t &expected2, expected1,\n-\t\t\t\t\t\t\t false,\n-\t\t\t\t\t\t\t __ATOMIC_RELAXED,\n-\t\t\t\t\t\t\t __ATOMIC_RELAXED) &&\n+\t\t\t\t\t\t\t rte_memory_order_relaxed,\n+\t\t\t\t\t\t\t rte_memory_order_relaxed) &&\n \t\t\t    expected2 == HWS_AGE_FREE)\n \t\t\t\tmlx5_hws_age_param_free(priv,\n \t\t\t\t\t\t\tparam->own_cnt_index,\n@@ -235,10 +234,10 @@\n \t\t\tif (!priv->hws_strict_queue)\n \t\t\t\tMLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);\n \t\t} else {\n-\t\t\t__atomic_compare_exchange_n(&param->state, &expected2,\n+\t\t\trte_atomic_compare_exchange_strong_explicit(&param->state, &expected2,\n \t\t\t\t\t\t  HWS_AGE_AGED_OUT_NOT_REPORTED,\n-\t\t\t\t\t\t  false, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t  __ATOMIC_RELAXED);\n+\t\t\t\t\t\t  rte_memory_order_relaxed,\n+\t\t\t\t\t\t  rte_memory_order_relaxed);\n \t\t}\n \t}\n \t/* The event is irrelevant in strict queue mode. */\n@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"invalid AGE parameter index\");\n-\tswitch (__atomic_exchange_n(&param->state, HWS_AGE_FREE,\n-\t\t\t\t    __ATOMIC_RELAXED)) {\n+\tswitch (rte_atomic_exchange_explicit(&param->state, HWS_AGE_FREE,\n+\t\t\t\t    rte_memory_order_relaxed)) {\n \tcase HWS_AGE_CANDIDATE:\n \tcase HWS_AGE_AGED_OUT_REPORTED:\n \t\tmlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);\n@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *\n \t\t\t\t   \"cannot allocate AGE parameter\");\n \t\treturn 0;\n \t}\n-\tMLX5_ASSERT(__atomic_load_n(&param->state,\n-\t\t\t\t    __ATOMIC_RELAXED) == HWS_AGE_FREE);\n+\tMLX5_ASSERT(rte_atomic_load_explicit(&param->state,\n+\t\t\t\t    rte_memory_order_relaxed) == HWS_AGE_FREE);\n \tif (shared) {\n \t\tparam->nb_cnts = 0;\n \t\tparam->accumulator_hits = 0;\n@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"invalid AGE parameter index\");\n \tif (update_ade->timeout_valid) {\n-\t\tuint32_t old_timeout = __atomic_exchange_n(&param->timeout,\n+\t\tuint32_t old_timeout = rte_atomic_exchange_explicit(&param->timeout,\n \t\t\t\t\t\t\t   update_ade->timeout,\n-\t\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\t\t\t\t\t   rte_memory_order_relaxed);\n \n \t\tif (old_timeout == 0)\n \t\t\tsec_since_last_hit_reset = true;\n@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *\n \t\tstate_update = true;\n \t}\n \tif (sec_since_last_hit_reset)\n-\t\t__atomic_store_n(&param->sec_since_last_hit, 0,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&param->sec_since_last_hit, 0,\n+\t\t\t\t rte_memory_order_relaxed);\n \tif (state_update) {\n \t\tuint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;\n \n@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *\n \t\t *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING\n \t\t *  - AGED_OUT_REPORTED -> CANDIDATE\n \t\t */\n-\t\tif (!__atomic_compare_exchange_n(&param->state, &expected,\n+\t\tif (!rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,\n \t\t\t\t\t\t HWS_AGE_CANDIDATE_INSIDE_RING,\n-\t\t\t\t\t\t false, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t __ATOMIC_RELAXED) &&\n+\t\t\t\t\t\t rte_memory_order_relaxed,\n+\t\t\t\t\t\t rte_memory_order_relaxed) &&\n \t\t    expected == HWS_AGE_AGED_OUT_REPORTED)\n-\t\t\t__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,\n-\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\trte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,\n+\t\t\t\t\t rte_memory_order_relaxed);\n \t}\n \treturn 0;\n }\n@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *\n \tuint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;\n \n \tMLX5_ASSERT(param != NULL);\n-\tif (__atomic_compare_exchange_n(&param->state, &expected,\n-\t\t\t\t\tHWS_AGE_AGED_OUT_REPORTED, false,\n-\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED))\n+\tif (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,\n+\t\t\t\t\tHWS_AGE_AGED_OUT_REPORTED,\n+\t\t\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed))\n \t\treturn param->context;\n \tswitch (expected) {\n \tcase HWS_AGE_FREE:\n@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *\n \t\tmlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);\n \t\tbreak;\n \tcase HWS_AGE_CANDIDATE_INSIDE_RING:\n-\t\t__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,\n+\t\t\t\t rte_memory_order_relaxed);\n \t\tbreak;\n \tcase HWS_AGE_CANDIDATE:\n \t\t/*\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h\nindex e005960..481442f 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.h\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.h\n@@ -101,7 +101,7 @@ struct mlx5_hws_cnt_pool {\n \tLIST_ENTRY(mlx5_hws_cnt_pool) next;\n \tstruct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;\n \tstruct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;\n-\tuint32_t query_gen __rte_cache_aligned;\n+\tRTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;\n \tstruct mlx5_hws_cnt *pool;\n \tstruct mlx5_hws_cnt_raw_data_mng *raw_mng;\n \tstruct rte_ring *reuse_list;\n@@ -134,10 +134,10 @@ enum {\n \n /* HWS counter age parameter. */\n struct mlx5_hws_age_param {\n-\tuint32_t timeout; /* Aging timeout in seconds (atomically accessed). */\n-\tuint32_t sec_since_last_hit;\n+\tRTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */\n+\tRTE_ATOMIC(uint32_t) sec_since_last_hit;\n \t/* Time in seconds since last hit (atomically accessed). */\n-\tuint16_t state; /* AGE state (atomically accessed). */\n+\tRTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */\n \tuint64_t accumulator_last_hits;\n \t/* Last total value of hits for comparing. */\n \tuint64_t accumulator_hits;\n@@ -426,7 +426,7 @@ struct mlx5_hws_age_param {\n \tiidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);\n \thpool->pool[iidx].in_used = false;\n \thpool->pool[iidx].query_gen_when_free =\n-\t\t__atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);\n+\t\trte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);\n \tif (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)\n \t\tqcache = hpool->cache->qcache[*queue];\n \tif (unlikely(qcache == NULL)) {\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 2fce908..c627113 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {\n /* RX queue private data. */\n struct mlx5_rxq_priv {\n \tuint16_t idx; /* Queue index. */\n-\tuint32_t refcnt; /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */\n \tstruct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */\n \tLIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {\n /* External RX queue descriptor. */\n struct mlx5_external_rxq {\n \tuint32_t hw_id; /* Queue index in the Hardware. */\n-\tuint32_t refcnt; /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */\n };\n \n /* mlx5_rxq.c */\n@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,\n \tstruct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];\n \tvoid *addr;\n \n-\tif (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {\n+\tif (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {\n \t\tMLX5_ASSERT(rep != NULL);\n \t\t/* Replace MPRQ buf. */\n \t\t(*rxq->mprq_bufs)[rq_idx] = rep;\n@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,\n \t\tvoid *buf_addr;\n \n \t\t/* Increment the refcnt of the whole chunk. */\n-\t\t__atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);\n-\t\tMLX5_ASSERT(__atomic_load_n(&buf->refcnt,\n-\t\t\t    __ATOMIC_RELAXED) <= strd_n + 1);\n+\t\trte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);\n+\t\tMLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,\n+\t\t\t    rte_memory_order_relaxed) <= strd_n + 1);\n \t\tbuf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);\n \t\t/*\n \t\t * MLX5 device doesn't use iova but it is necessary in a\n@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,\n \tif (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)\n \t\treturn false;\n \trxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];\n-\treturn !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);\n+\treturn !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);\n }\n \n #define LWM_COOKIE_RXQID_OFFSET 0\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex dd51687..f67aaa6 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -416,7 +416,7 @@\n \t\trte_errno = EINVAL;\n \t\treturn -rte_errno;\n \t}\n-\treturn (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);\n+\treturn (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);\n }\n \n /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */\n@@ -1319,7 +1319,7 @@\n \n \tmemset(_m, 0, sizeof(*buf));\n \tbuf->mp = mp;\n-\t__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);\n \tfor (j = 0; j != strd_n; ++j) {\n \t\tshinfo = &buf->shinfos[j];\n \t\tshinfo->free_cb = mlx5_mprq_buf_free_cb;\n@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *\n \tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n \n \tif (rxq != NULL)\n-\t\t__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);\n \treturn rxq;\n }\n \n@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *\n \n \tif (rxq == NULL)\n \t\treturn 0;\n-\treturn __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;\n+\treturn rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;\n }\n \n /**\n@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *\n {\n \tstruct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);\n \n-\t__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);\n \treturn rxq;\n }\n \n@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *\n {\n \tstruct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);\n \n-\treturn __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;\n+\treturn rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;\n }\n \n /**\n@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *\n \t\t    (memcmp(ind_tbl->queues, queues,\n \t\t\t    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))\n \t\t     == 0)) {\n-\t\t\t__atomic_fetch_add(&ind_tbl->refcnt, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,\n+\t\t\t\t\t   rte_memory_order_relaxed);\n \t\t\tbreak;\n \t\t}\n \t}\n@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *\n \tunsigned int ret;\n \n \trte_rwlock_write_lock(&priv->ind_tbls_lock);\n-\tret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;\n+\tret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;\n \tif (!ret)\n \t\tLIST_REMOVE(ind_tbl, next);\n \trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *\n \t\t}\n \t\treturn ret;\n \t}\n-\t__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *\n {\n \tuint32_t refcnt;\n \n-\trefcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);\n+\trefcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);\n \tif (refcnt <= 1)\n \t\treturn 0;\n \t/*\n@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *\n \text_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);\n \tif (ext_rxq == NULL)\n \t\treturn -rte_errno;\n-\tif (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,\n-\t\t\t\t\t __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,\n+\t\t\t\t\t rte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\tif (ext_rxq->hw_id != hw_idx) {\n \t\t\tDRV_LOG(ERR, \"Port %u external RxQ index %u \"\n \t\t\t\t\"is already mapped to HW index (requesting is \"\n@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *\n \t\trte_errno = EINVAL;\n \t\treturn -rte_errno;\n \t}\n-\tif (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,\n-\t\t\t\t\t __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,\n+\t\t\t\t\t rte_memory_order_relaxed, rte_memory_order_relaxed)) {\n \t\tDRV_LOG(ERR, \"Port %u external RxQ index %u doesn't exist.\",\n \t\t\tport_id, dpdk_idx);\n \t\trte_errno = EINVAL;\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex f8d6728..c241a1d 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -1441,7 +1441,7 @@\n \trte_delay_us_sleep(1000 * priv->rxqs_n);\n \tDRV_LOG(DEBUG, \"port %u stopping device\", dev->data->port_id);\n \tif (priv->sh->config.dv_flow_en == 2) {\n-\t\tif (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))\n+\t\tif (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))\n \t\t\tflow_hw_rxq_flag_set(dev, false);\n \t} else {\n \t\tmlx5_flow_stop_default(dev);\ndiff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h\nindex b1e8ea1..0e44df5 100644\n--- a/drivers/net/mlx5/mlx5_tx.h\n+++ b/drivers/net/mlx5/mlx5_tx.h\n@@ -179,7 +179,7 @@ struct mlx5_txq_data {\n __extension__\n struct mlx5_txq_ctrl {\n \tLIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */\n-\tuint32_t refcnt; /* Reference counter. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n \tbool is_hairpin; /* Whether TxQ type is Hairpin. */\n \tunsigned int max_inline_data; /* Max inline data. */\n@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\t * the service thread, data should be re-read.\n \t\t */\n \t\trte_compiler_barrier();\n-\t\tci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);\n-\t\tts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);\n+\t\tci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);\n+\t\tts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);\n \t\trte_compiler_barrier();\n \t\tif (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))\n \t\t\tbreak;\n@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \tmts -= ts;\n \tif (unlikely(mts >= UINT64_MAX / 2)) {\n \t\t/* We have negative integer, mts is in the past. */\n-\t\t__atomic_fetch_add(&sh->txpp.err_ts_past,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \ttick = sh->txpp.tick;\n@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \tmts = (mts + tick - 1) / tick;\n \tif (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {\n \t\t/* We have mts is too distant future. */\n-\t\t__atomic_fetch_add(&sh->txpp.err_ts_future,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t\treturn -1;\n \t}\n \tmts <<= 64 - MLX5_CQ_INDEX_WIDTH;\n@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\t/* Convert the timestamp into completion to wait. */\n \t\tts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);\n \t\tif (txq->ts_last && ts < txq->ts_last)\n-\t\t\t__atomic_fetch_add(&txq->sh->txpp.err_ts_order,\n-\t\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,\n+\t\t\t\t\t   1, rte_memory_order_relaxed);\n \t\ttxq->ts_last = ts;\n \t\twqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);\n \t\tsh = txq->sh;\ndiff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c\nindex 5a5df2d..4e26fa2 100644\n--- a/drivers/net/mlx5/mlx5_txpp.c\n+++ b/drivers/net/mlx5/mlx5_txpp.c\n@@ -538,12 +538,12 @@\n \t\tuint64_t *ps;\n \n \t\trte_compiler_barrier();\n-\t\ttm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);\n-\t\top = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);\n+\t\ttm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);\n+\t\top = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);\n \t\trte_compiler_barrier();\n-\t\tif (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))\n+\t\tif (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))\n \t\t\tcontinue;\n-\t\tif (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))\n+\t\tif (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))\n \t\t\tcontinue;\n \t\tps = (uint64_t *)ts;\n \t\tps[0] = tm;\n@@ -561,8 +561,8 @@\n \tci = ci << (64 - MLX5_CQ_INDEX_WIDTH);\n \tci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;\n \trte_compiler_barrier();\n-\t__atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);\n \trte_wmb();\n }\n \n@@ -590,8 +590,8 @@\n \t\t\t */\n \t\t\tDRV_LOG(DEBUG,\n \t\t\t\t\"Clock Queue error sync lost (%X).\", opcode);\n-\t\t\t\t__atomic_fetch_add(&sh->txpp.err_clock_queue,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t\t\tsh->txpp.sync_lost = 1;\n \t\t}\n \t\treturn;\n@@ -633,10 +633,10 @@\n \tif (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)\n \t\treturn;\n \tMLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);\n-\t__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,\n-\t\t\t sh->txpp.ts.ts, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,\n-\t\t\t sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,\n+\t\t\t sh->txpp.ts.ts, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,\n+\t\t\t sh->txpp.ts.ci_ts, rte_memory_order_relaxed);\n \tif (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)\n \t\tsh->txpp.ts_p = 0;\n \tif (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)\n@@ -677,8 +677,8 @@\n \t\t/* Check whether we have missed interrupts. */\n \t\tif (cq_ci - wq->cq_ci != 1) {\n \t\t\tDRV_LOG(DEBUG, \"Rearm Queue missed interrupt.\");\n-\t\t\t__atomic_fetch_add(&sh->txpp.err_miss_int,\n-\t\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,\n+\t\t\t\t\t   1, rte_memory_order_relaxed);\n \t\t\t/* Check sync lost on wqe index. */\n \t\t\tif (cq_ci - wq->cq_ci >=\n \t\t\t\t(((1UL << MLX5_WQ_INDEX_WIDTH) /\n@@ -693,8 +693,8 @@\n \t\t/* Fire new requests to Rearm Queue. */\n \t\tif (error) {\n \t\t\tDRV_LOG(DEBUG, \"Rearm Queue error sync lost.\");\n-\t\t\t__atomic_fetch_add(&sh->txpp.err_rearm_queue,\n-\t\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,\n+\t\t\t\t\t   1, rte_memory_order_relaxed);\n \t\t\tsh->txpp.sync_lost = 1;\n \t\t}\n \t}\n@@ -987,8 +987,8 @@\n \t\tmlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);\n \t\tif (to.cts.op_own >> 4) {\n \t\t\tDRV_LOG(DEBUG, \"Clock Queue error sync lost.\");\n-\t\t\t__atomic_fetch_add(&sh->txpp.err_clock_queue,\n-\t\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,\n+\t\t\t\t\t   1, rte_memory_order_relaxed);\n \t\t\tsh->txpp.sync_lost = 1;\n \t\t\treturn -EIO;\n \t\t}\n@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n \n-\t__atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);\n \treturn 0;\n }\n \n@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,\n \tdo {\n \t\tuint64_t ts, ci;\n \n-\t\tts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);\n-\t\tci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);\n+\t\tts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);\n+\t\tci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);\n \t\trte_compiler_barrier();\n \t\tif ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)\n \t\t\tcontinue;\n-\t\tif (__atomic_load_n(&txpp->tsa[idx].ts,\n-\t\t\t\t    __ATOMIC_RELAXED) != ts)\n+\t\tif (rte_atomic_load_explicit(&txpp->tsa[idx].ts,\n+\t\t\t\t    rte_memory_order_relaxed) != ts)\n \t\t\tcontinue;\n-\t\tif (__atomic_load_n(&txpp->tsa[idx].ci_ts,\n-\t\t\t\t    __ATOMIC_RELAXED) != ci)\n+\t\tif (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,\n+\t\t\t\t    rte_memory_order_relaxed) != ci)\n \t\t\tcontinue;\n \t\ttsa->ts = ts;\n \t\ttsa->ci_ts = ci;\n@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,\n \t\tfor (i = 0; i < n_txpp; ++i)\n \t\t\tstats[n_used + i].id = n_used + i;\n \t\tstats[n_used + 0].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_miss_int,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_miss_int,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 1].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_rearm_queue,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_rearm_queue,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 2].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_clock_queue,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_clock_queue,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 3].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_ts_past,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_ts_past,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 4].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_ts_future,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_ts_future,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 5].value =\n-\t\t\t\t__atomic_load_n(&sh->txpp.err_ts_order,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\trte_atomic_load_explicit(&sh->txpp.err_ts_order,\n+\t\t\t\t\t\trte_memory_order_relaxed);\n \t\tstats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);\n \t\tstats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);\n \t\tstats[n_used + 8].value = sh->txpp.sync_lost;\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 14f55e8..da4236f 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\t__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);\n \ttmpl->is_hairpin = false;\n \tLIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);\n \treturn tmpl;\n@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *\n \ttmpl->txq.idx = idx;\n \ttmpl->hairpin_conf = *hairpin_conf;\n \ttmpl->is_hairpin = true;\n-\t__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);\n \tLIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);\n \treturn tmpl;\n }\n@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *\n \n \tif (txq_data) {\n \t\tctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);\n-\t\t__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);\n \t}\n \treturn ctrl;\n }\n@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *\n \tif (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)\n \t\treturn 0;\n \ttxq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);\n-\tif (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)\n+\tif (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)\n \t\treturn 1;\n \tif (txq_ctrl->obj) {\n \t\tpriv->obj_ops.txq_obj_release(txq_ctrl->obj);\n@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *\n \t\ttxq_free_elts(txq_ctrl);\n \t\tdev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;\n \t}\n-\tif (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {\n+\tif (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {\n \t\tif (!txq_ctrl->is_hairpin)\n \t\t\tmlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);\n \t\tLIST_REMOVE(txq_ctrl, next);\n@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *\n \tif (!(*priv->txqs)[idx])\n \t\treturn -1;\n \ttxq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);\n-\treturn (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);\n+\treturn (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c\nindex e28db2e..fc03cc0 100644\n--- a/drivers/net/mlx5/mlx5_utils.c\n+++ b/drivers/net/mlx5/mlx5_utils.c\n@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *\n \tstruct mlx5_indexed_cache *gc, *lc, *olc = NULL;\n \n \tlc = pool->cache[cidx]->lc;\n-\tgc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);\n+\tgc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);\n \tif (gc && lc != gc) {\n \t\tmlx5_ipool_lock(pool);\n \t\tif (lc && !(--lc->ref_cnt))\n@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *\n \t\tpool->cache[cidx]->len = fetch_size - 1;\n \t\treturn pool->cache[cidx]->idx[pool->cache[cidx]->len];\n \t}\n-\ttrunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,\n-\t\t\t __ATOMIC_ACQUIRE) : 0;\n+\ttrunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,\n+\t\t\t rte_memory_order_acquire) : 0;\n \ttrunk_n = lc ? lc->n_trunk : 0;\n \tcur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);\n \t/* Check if index reach maximum. */\n@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *\n \t\tlc = p;\n \t\tlc->ref_cnt = 1;\n \t\tpool->cache[cidx]->lc = lc;\n-\t\t__atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);\n \t}\n \t/* Add trunk to trunks array. */\n \tlc->trunks[trunk_idx] = trunk;\n-\t__atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);\n \t/* Enqueue half of the index to global. */\n \tts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;\n \tfetch_size = trunk->free >> 1;\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex f3c0d76..3146092 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {\n \n struct mlx5_indexed_cache {\n \tstruct mlx5_indexed_trunk **trunks;\n-\tvolatile uint32_t n_trunk_valid; /* Trunks allocated. */\n+\tvolatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */\n \tuint32_t n_trunk; /* Trunk pointer array size. */\n \tuint32_t ref_cnt;\n \tuint32_t len;\n@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {\n \t\t\tuint32_t free_list; /* Index to first free trunk. */\n \t\t};\n \t\tstruct {\n-\t\t\tstruct mlx5_indexed_cache *gc;\n+\t\t\tRTE_ATOMIC(struct mlx5_indexed_cache *) gc;\n \t\t\t/* Global cache. */\n \t\t\tstruct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];\n \t\t\t/* Local cache. */\n",
    "prefixes": [
        "v3",
        "01/45"
    ]
}