get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138904/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138904,
    "url": "http://patches.dpdk.org/api/patches/138904/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-29-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-29-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-29-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:41",
    "name": "[v3,28/45] common/mlx5: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "08f4d18de29298049a120e8e0b66965efc1a57cb",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-29-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138904/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/138904/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BA70C43D55;\n\tWed, 27 Mar 2024 23:40:54 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D79CA42E17;\n\tWed, 27 Mar 2024 23:38:40 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 1FF59427D8\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:07 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id C9D0C20E6F0D; Wed, 27 Mar 2024 15:38:00 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com C9D0C20E6F0D",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579081;\n bh=3dyxhHOmEQHirswakVoXTSWcemtp6+kDU5U9YJr2dno=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=jtasGIsAIKPM97nzprnEo5EYQn/XR63tA6J5BcYDyWXbLN1efmmBXVxVk1usRwca3\n MjGPRLHyYoL5AnEQo7t0I013XX/4nYptLMNfE6FZy9inmbueOFTM+De2+R5ExEAqmG\n EaNrIdxwcNMThuI+TUX/Xmw0nWNsFAfpNo6lIs8s=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 28/45] common/mlx5: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:41 -0700",
        "Message-Id": "<1711579078-10624-29-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n drivers/common/mlx5/linux/mlx5_nl.c     |  5 +--\n drivers/common/mlx5/mlx5_common.h       |  2 +-\n drivers/common/mlx5/mlx5_common_mr.c    | 16 ++++-----\n drivers/common/mlx5/mlx5_common_mr.h    |  2 +-\n drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------\n drivers/common/mlx5/mlx5_common_utils.h |  6 ++--\n drivers/common/mlx5/mlx5_malloc.c       | 58 ++++++++++++++++-----------------\n 7 files changed, 61 insertions(+), 60 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c\nindex 28a1f56..bf6dd19 100644\n--- a/drivers/common/mlx5/linux/mlx5_nl.c\n+++ b/drivers/common/mlx5/linux/mlx5_nl.c\n@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {\n \tuint16_t state; /**< IB device port state (out). */\n };\n \n-uint32_t atomic_sn;\n+RTE_ATOMIC(uint32_t) atomic_sn;\n \n /* Generate Netlink sequence number. */\n-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)\n+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \\\n+\trte_memory_order_relaxed) + 1)\n \n /**\n  * Opens a Netlink socket.\ndiff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h\nindex 9c80277..14c70ed 100644\n--- a/drivers/common/mlx5/mlx5_common.h\n+++ b/drivers/common/mlx5/mlx5_common.h\n@@ -195,7 +195,7 @@ enum mlx5_cqe_status {\n \t/* Prevent speculative reading of other fields in CQE until\n \t * CQE is valid.\n \t */\n-\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\trte_atomic_thread_fence(rte_memory_order_acquire);\n \n \tif (unlikely(op_code == MLX5_CQE_RESP_ERR ||\n \t\t     op_code == MLX5_CQE_REQ_ERR))\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex 85ec10d..50922ad 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -35,7 +35,7 @@ struct mlx5_range {\n /** Memory region for a mempool. */\n struct mlx5_mempool_mr {\n \tstruct mlx5_pmd_mr pmd_mr;\n-\tuint32_t refcnt; /**< Number of mempools sharing this MR. */\n+\tRTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */\n };\n \n /* Mempool registration. */\n@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {\n {\n \tstruct mlx5_mprq_buf *buf = opaque;\n \n-\tif (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {\n+\tif (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {\n \t\trte_mempool_put(buf->mp, buf);\n-\t} else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,\n-\t\t\t\t\t       __ATOMIC_RELAXED) - 1 == 0)) {\n-\t\t__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);\n+\t} else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,\n+\t\t\t\t\t       rte_memory_order_relaxed) - 1 == 0)) {\n+\t\trte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);\n \t\trte_mempool_put(buf->mp, buf);\n \t}\n }\n@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {\n \tunsigned int i;\n \n \tfor (i = 0; i < mpr->mrs_n; i++)\n-\t\t__atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);\n }\n \n /**\n@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {\n \tbool ret = false;\n \n \tfor (i = 0; i < mpr->mrs_n; i++)\n-\t\tret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,\n-\t\t\t\t\t  __ATOMIC_RELAXED) - 1 == 0;\n+\t\tret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,\n+\t\t\t\t\t  rte_memory_order_relaxed) - 1 == 0;\n \treturn ret;\n }\n \ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 8789d40..5bdf48a 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {\n /* Multi-Packet RQ buffer header. */\n struct mlx5_mprq_buf {\n \tstruct rte_mempool *mp;\n-\tuint16_t refcnt; /* Atomically accessed refcnt. */\n+\tRTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */\n \tstruct rte_mbuf_ext_shared_info shinfos[];\n \t/*\n \t * Shared information per stride.\ndiff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c\nindex e69d068..4b95d35 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.c\n+++ b/drivers/common/mlx5/mlx5_common_utils.c\n@@ -81,14 +81,14 @@ struct mlx5_list *\n \twhile (entry != NULL) {\n \t\tif (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {\n \t\t\tif (reuse) {\n-\t\t\t\tret = __atomic_fetch_add(&entry->ref_cnt, 1,\n-\t\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\t\tret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,\n+\t\t\t\t\t\t\t rte_memory_order_relaxed);\n \t\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref: %u.\",\n \t\t\t\t\tl_const->name, (void *)entry,\n \t\t\t\t\tentry->ref_cnt);\n \t\t\t} else if (lcore_index < MLX5_LIST_GLOBAL) {\n-\t\t\t\tret = __atomic_load_n(&entry->ref_cnt,\n-\t\t\t\t\t\t      __ATOMIC_RELAXED);\n+\t\t\t\tret = rte_atomic_load_explicit(&entry->ref_cnt,\n+\t\t\t\t\t\t      rte_memory_order_relaxed);\n \t\t\t}\n \t\t\tif (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))\n \t\t\t\treturn entry;\n@@ -151,13 +151,13 @@ struct mlx5_list_entry *\n {\n \tstruct mlx5_list_cache *c = l_inconst->cache[lcore_index];\n \tstruct mlx5_list_entry *entry = LIST_FIRST(&c->h);\n-\tuint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,\n-\t\t\t\t\t       __ATOMIC_RELAXED);\n+\tuint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,\n+\t\t\t\t\t       rte_memory_order_relaxed);\n \n \twhile (inv_cnt != 0 && entry != NULL) {\n \t\tstruct mlx5_list_entry *nentry = LIST_NEXT(entry, next);\n \n-\t\tif (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {\n+\t\tif (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {\n \t\t\tLIST_REMOVE(entry, next);\n \t\t\tif (l_const->lcores_share)\n \t\t\t\tl_const->cb_clone_free(l_const->ctx, entry);\n@@ -217,7 +217,7 @@ struct mlx5_list_entry *\n \t\tentry->lcore_idx = (uint32_t)lcore_index;\n \t\tLIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,\n \t\t\t\t entry, next);\n-\t\t__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);\n \t\tDRV_LOG(DEBUG, \"MLX5 list %s c%d entry %p new: %u.\",\n \t\t\tl_const->name, lcore_index,\n \t\t\t(void *)entry, entry->ref_cnt);\n@@ -254,7 +254,7 @@ struct mlx5_list_entry *\n \tl_inconst->gen_cnt++;\n \trte_rwlock_write_unlock(&l_inconst->lock);\n \tLIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);\n-\t__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);\n+\trte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);\n \tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\", l_const->name,\n \t\t(void *)entry, entry->ref_cnt);\n \treturn local_entry;\n@@ -285,7 +285,7 @@ struct mlx5_list_entry *\n {\n \tstruct mlx5_list_entry *gentry = entry->gentry;\n \n-\tif (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)\n+\tif (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)\n \t\treturn 1;\n \tif (entry->lcore_idx == (uint32_t)lcore_idx) {\n \t\tLIST_REMOVE(entry, next);\n@@ -294,23 +294,23 @@ struct mlx5_list_entry *\n \t\telse\n \t\t\tl_const->cb_remove(l_const->ctx, entry);\n \t} else {\n-\t\t__atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,\n-\t\t\t\t   1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,\n+\t\t\t\t   1, rte_memory_order_relaxed);\n \t}\n \tif (!l_const->lcores_share) {\n-\t\t__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);\n \t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n \t\t\tl_const->name, (void *)entry);\n \t\treturn 0;\n \t}\n-\tif (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)\n+\tif (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)\n \t\treturn 1;\n \trte_rwlock_write_lock(&l_inconst->lock);\n \tif (likely(gentry->ref_cnt == 0)) {\n \t\tLIST_REMOVE(gentry, next);\n \t\trte_rwlock_write_unlock(&l_inconst->lock);\n \t\tl_const->cb_remove(l_const->ctx, gentry);\n-\t\t__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);\n \t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p removed.\",\n \t\t\tl_const->name, (void *)gentry);\n \t\treturn 0;\n@@ -377,7 +377,7 @@ struct mlx5_list_entry *\n mlx5_list_get_entry_num(struct mlx5_list *list)\n {\n \tMLX5_ASSERT(list);\n-\treturn __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);\n+\treturn rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);\n }\n \n /********************* Hash List **********************/\ndiff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h\nindex ae15119..cb4d104 100644\n--- a/drivers/common/mlx5/mlx5_common_utils.h\n+++ b/drivers/common/mlx5/mlx5_common_utils.h\n@@ -29,7 +29,7 @@\n  */\n struct mlx5_list_entry {\n \tLIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */\n-\tuint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */\n+\tRTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */\n \tuint32_t lcore_idx;\n \tunion {\n \t\tstruct mlx5_list_entry *gentry;\n@@ -39,7 +39,7 @@ struct mlx5_list_entry {\n \n struct mlx5_list_cache {\n \tLIST_HEAD(mlx5_list_head, mlx5_list_entry) h;\n-\tuint32_t inv_cnt; /* Invalid entries counter. */\n+\tRTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */\n } __rte_cache_aligned;\n \n /**\n@@ -111,7 +111,7 @@ struct mlx5_list_const {\n struct mlx5_list_inconst {\n \trte_rwlock_t lock; /* read/write lock. */\n \tvolatile uint32_t gen_cnt; /* List modification may update it. */\n-\tvolatile uint32_t count; /* number of entries in list. */\n+\tvolatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */\n \tstruct mlx5_list_cache *cache[MLX5_LIST_MAX];\n \t/* Lcore cache, last index is the global cache. */\n };\ndiff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c\nindex c58c41d..ef6dabe 100644\n--- a/drivers/common/mlx5/mlx5_malloc.c\n+++ b/drivers/common/mlx5/mlx5_malloc.c\n@@ -16,7 +16,7 @@ struct mlx5_sys_mem {\n \tuint32_t init:1; /* Memory allocator initialized. */\n \tuint32_t enable:1; /* System memory select. */\n \tuint32_t reserve:30; /* Reserve. */\n-\tstruct rte_memseg_list *last_msl;\n+\tRTE_ATOMIC(struct rte_memseg_list *) last_msl;\n \t/* last allocated rte memory memseg list. */\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n \tuint64_t malloc_sys;\n@@ -93,14 +93,14 @@ struct mlx5_sys_mem {\n \t * different with the cached msl.\n \t */\n \tif (addr && !mlx5_mem_check_msl(addr,\n-\t    (struct rte_memseg_list *)__atomic_load_n\n-\t    (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {\n-\t\t__atomic_store_n(&mlx5_sys_mem.last_msl,\n+\t    (struct rte_memseg_list *)rte_atomic_load_explicit\n+\t    (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {\n+\t\trte_atomic_store_explicit(&mlx5_sys_mem.last_msl,\n \t\t\trte_mem_virt2memseg_list(addr),\n-\t\t\t__ATOMIC_RELAXED);\n+\t\t\trte_memory_order_relaxed);\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,\n+\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \t}\n }\n@@ -122,11 +122,11 @@ struct mlx5_sys_mem {\n \t * to check if the memory belongs to rte memory.\n \t */\n \tif (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)\n-\t    __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {\n+\t    rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {\n \t\tif (!rte_mem_virt2memseg_list(addr))\n \t\t\treturn false;\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);\n #endif\n \t}\n \treturn true;\n@@ -185,8 +185,8 @@ struct mlx5_sys_mem {\n \t\tmlx5_mem_update_msl(addr);\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n \t\tif (addr)\n-\t\t\t__atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,\n+\t\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \t\treturn addr;\n \t}\n@@ -199,8 +199,8 @@ struct mlx5_sys_mem {\n \t\taddr = malloc(size);\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n \tif (addr)\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,\n+\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \treturn addr;\n }\n@@ -233,8 +233,8 @@ struct mlx5_sys_mem {\n \t\tmlx5_mem_update_msl(new_addr);\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n \t\tif (new_addr)\n-\t\t\t__atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,\n-\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,\n+\t\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \t\treturn new_addr;\n \t}\n@@ -246,8 +246,8 @@ struct mlx5_sys_mem {\n \tnew_addr = realloc(addr, size);\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n \tif (new_addr)\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,\n+\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \treturn new_addr;\n }\n@@ -259,14 +259,14 @@ struct mlx5_sys_mem {\n \t\treturn;\n \tif (!mlx5_mem_is_rte(addr)) {\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,\n+\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \t\tmlx5_os_free(addr);\n \t} else {\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n-\t\t__atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,\n+\t\t\t\t   rte_memory_order_relaxed);\n #endif\n \t\trte_free(addr);\n \t}\n@@ -280,14 +280,14 @@ struct mlx5_sys_mem {\n \t\t\" free:%\"PRIi64\"\\nRTE memory malloc:%\"PRIi64\",\"\n \t\t\" realloc:%\"PRIi64\", free:%\"PRIi64\"\\nMSL miss:%\"PRIi64\",\"\n \t\t\" update:%\"PRIi64\"\",\n-\t\t__atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),\n-\t\t__atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),\n+\t\trte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));\n #endif\n }\n \n",
    "prefixes": [
        "v3",
        "28/45"
    ]
}