get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132816/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132816,
    "url": "http://patches.dpdk.org/api/patches/132816/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1697574677-16578-14-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1697574677-16578-14-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1697574677-16578-14-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-10-17T20:31:11",
    "name": "[v2,13/19] vhost: use rte optional stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d7e26f2f52dd9da8e08b56a921aacde101b84c2b",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1697574677-16578-14-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29892,
            "url": "http://patches.dpdk.org/api/series/29892/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29892",
            "date": "2023-10-17T20:30:58",
            "name": "use rte optional stdatomic API",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/29892/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132816/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132816/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6B99C43190;\n\tTue, 17 Oct 2023 22:32:56 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7060F42E57;\n\tTue, 17 Oct 2023 22:31:39 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 817FC402DD\n for <dev@dpdk.org>; Tue, 17 Oct 2023 22:31:20 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id 35CE220B74CD; Tue, 17 Oct 2023 13:31:18 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com 35CE220B74CD",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1697574679;\n bh=kQWKjluXmL1Ut6QiK2YkU0t25uV10RyUjIyAgWBMAmI=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=KLR3o2B6b4xj6HLEUMcLfAIKjrYFhb3HAwDBg9CK0SfkJDFGOnUPCVXXOq3vNAbCA\n zu7oSERqzsH/pJePtrjP/62e7s3A/6Fih1NFTC7rbzg7fwP9VnRA8sbvAvMjRvKknc\n E6E40rW1WQtxOhRhCf4oa7U0vBE/hC8m9nsWPMQM=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "Akhil Goyal <gakhil@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Ciara Power <ciara.power@intel.com>,\n David Christensen <drc@linux.vnet.ibm.com>,\n David Hunt <david.hunt@intel.com>,\n Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,\n Dmitry Malloy <dmitrym@microsoft.com>,\n Elena Agostini <eagostini@nvidia.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Fan Zhang <fanzhang.oss@gmail.com>, Ferruh Yigit <ferruh.yigit@amd.com>,\n Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jerin Jacob <jerinj@marvell.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>,\n Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,\n Pallavi Kadam <pallavi.kadam@intel.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Reshma Pattan <reshma.pattan@intel.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Shijith Thotton <sthotton@marvell.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Yipeng Wang <yipeng1.wang@intel.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v2 13/19] vhost: use rte optional stdatomic API",
        "Date": "Tue, 17 Oct 2023 13:31:11 -0700",
        "Message-Id": "<1697574677-16578-14-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>\n <1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional stdatomic API\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\n---\n lib/vhost/vdpa.c            |  3 ++-\n lib/vhost/vhost.c           | 42 ++++++++++++++++----------------\n lib/vhost/vhost.h           | 39 ++++++++++++++++--------------\n lib/vhost/vhost_user.c      |  6 ++---\n lib/vhost/virtio_net.c      | 58 +++++++++++++++++++++++++--------------------\n lib/vhost/virtio_net_ctrl.c |  6 +++--\n 6 files changed, 84 insertions(+), 70 deletions(-)",
    "diff": "diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c\nindex 6284ea2..219eef8 100644\n--- a/lib/vhost/vdpa.c\n+++ b/lib/vhost/vdpa.c\n@@ -235,7 +235,8 @@ struct rte_vdpa_device *\n \t}\n \n \t/* used idx is the synchronization point for the split vring */\n-\t__atomic_store_n(&vq->used->idx, idx_m, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit((unsigned short __rte_atomic *)&vq->used->idx,\n+\t\tidx_m, rte_memory_order_release);\n \n \tif (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))\n \t\tvring_used_event(s_vring) = idx_m;\ndiff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c\nindex 7fde412..bdcf85b 100644\n--- a/lib/vhost/vhost.c\n+++ b/lib/vhost/vhost.c\n@@ -128,12 +128,13 @@ struct vhost_vq_stats_name_off {\n {\n #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)\n \t/*\n-\t * __sync_ built-ins are deprecated, but __atomic_ ones\n+\t * __sync_ built-ins are deprecated, but rte_atomic_ ones\n \t * are sub-optimized in older GCC versions.\n \t */\n \t__sync_fetch_and_or_1(addr, (1U << nr));\n #else\n-\t__atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);\n+\trte_atomic_fetch_or_explicit((volatile uint8_t __rte_atomic *)addr, (1U << nr),\n+\t\trte_memory_order_relaxed);\n #endif\n }\n \n@@ -155,7 +156,7 @@ struct vhost_vq_stats_name_off {\n \t\treturn;\n \n \t/* To make sure guest memory updates are committed before logging */\n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tpage = addr / VHOST_LOG_PAGE;\n \twhile (page * VHOST_LOG_PAGE < addr + len) {\n@@ -197,7 +198,7 @@ struct vhost_vq_stats_name_off {\n \tif (unlikely(!vq->log_cache))\n \t\treturn;\n \n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tlog_base = (unsigned long *)(uintptr_t)dev->log_base;\n \n@@ -206,17 +207,18 @@ struct vhost_vq_stats_name_off {\n \n #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)\n \t\t/*\n-\t\t * '__sync' builtins are deprecated, but '__atomic' ones\n+\t\t * '__sync' builtins are deprecated, but 'rte_atomic' ones\n \t\t * are sub-optimized in older GCC versions.\n \t\t */\n \t\t__sync_fetch_and_or(log_base + elem->offset, elem->val);\n #else\n-\t\t__atomic_fetch_or(log_base + elem->offset, elem->val,\n-\t\t\t\t__ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_or_explicit(\n+\t\t\t(unsigned long __rte_atomic *)(log_base + elem->offset),\n+\t\t\telem->val, rte_memory_order_relaxed);\n #endif\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tvq->log_cache_nb_elem = 0;\n }\n@@ -231,7 +233,7 @@ struct vhost_vq_stats_name_off {\n \n \tif (unlikely(!vq->log_cache)) {\n \t\t/* No logging cache allocated, write dirty log map directly */\n-\t\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\trte_atomic_thread_fence(rte_memory_order_release);\n \t\tvhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);\n \n \t\treturn;\n@@ -251,7 +253,7 @@ struct vhost_vq_stats_name_off {\n \t\t * No more room for a new log cache entry,\n \t\t * so write the dirty log map directly.\n \t\t */\n-\t\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\trte_atomic_thread_fence(rte_memory_order_release);\n \t\tvhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);\n \n \t\treturn;\n@@ -1184,11 +1186,11 @@ struct vhost_vq_stats_name_off {\n \tif (unlikely(idx >= vq->size))\n \t\treturn -1;\n \n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \tvq->inflight_split->desc[idx].inflight = 0;\n \n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \tvq->inflight_split->used_idx = last_used_idx;\n \treturn 0;\n@@ -1227,11 +1229,11 @@ struct vhost_vq_stats_name_off {\n \tif (unlikely(head >= vq->size))\n \t\treturn -1;\n \n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \tinflight_info->desc[head].inflight = 0;\n \n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \tinflight_info->old_free_head = inflight_info->free_head;\n \tinflight_info->old_used_idx = inflight_info->used_idx;\n@@ -1454,7 +1456,7 @@ struct vhost_vq_stats_name_off {\n \t\t\tvq->avail_wrap_counter << 15;\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tvq->device_event->flags = flags;\n \treturn 0;\n@@ -1519,16 +1521,16 @@ struct vhost_vq_stats_name_off {\n \n \trte_rwlock_read_lock(&vq->access_lock);\n \n-\t__atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&vq->irq_pending, false, rte_memory_order_release);\n \n \tif (dev->backend_ops->inject_irq(dev, vq)) {\n \t\tif (dev->flags & VIRTIO_DEV_STATS_ENABLED)\n-\t\t\t__atomic_fetch_add(&vq->stats.guest_notifications_error,\n-\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,\n+\t\t\t\t\t1, rte_memory_order_relaxed);\n \t} else {\n \t\tif (dev->flags & VIRTIO_DEV_STATS_ENABLED)\n-\t\t\t__atomic_fetch_add(&vq->stats.guest_notifications,\n-\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,\n+\t\t\t\t\t1, rte_memory_order_relaxed);\n \t\tif (dev->notify_ops->guest_notified)\n \t\t\tdev->notify_ops->guest_notified(dev->vid);\n \t}\ndiff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex 5fc9035..f8624fb 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -158,9 +158,9 @@ struct virtqueue_stats {\n \tuint64_t inflight_completed;\n \tuint64_t guest_notifications_suppressed;\n \t/* Counters below are atomic, and should be incremented as such. */\n-\tuint64_t guest_notifications;\n-\tuint64_t guest_notifications_offloaded;\n-\tuint64_t guest_notifications_error;\n+\tRTE_ATOMIC(uint64_t) guest_notifications;\n+\tRTE_ATOMIC(uint64_t) guest_notifications_offloaded;\n+\tRTE_ATOMIC(uint64_t) guest_notifications_error;\n };\n \n /**\n@@ -348,7 +348,7 @@ struct vhost_virtqueue {\n \tstruct vhost_vring_addr ring_addrs;\n \tstruct virtqueue_stats\tstats;\n \n-\tbool irq_pending;\n+\tRTE_ATOMIC(bool) irq_pending;\n } __rte_cache_aligned;\n \n /* Virtio device status as per Virtio specification */\n@@ -486,7 +486,7 @@ struct virtio_net {\n \tuint32_t\t\tflags;\n \tuint16_t\t\tvhost_hlen;\n \t/* to tell if we need broadcast rarp packet */\n-\tint16_t\t\t\tbroadcast_rarp;\n+\tRTE_ATOMIC(int16_t)\tbroadcast_rarp;\n \tuint32_t\t\tnr_vring;\n \tint\t\t\tasync_copy;\n \n@@ -557,7 +557,8 @@ struct virtio_net {\n static inline bool\n desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)\n {\n-\tuint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);\n+\tuint16_t flags = rte_atomic_load_explicit((unsigned short __rte_atomic *)&desc->flags,\n+\t\trte_memory_order_acquire);\n \n \treturn wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&\n \t\twrap_counter != !!(flags & VRING_DESC_F_USED);\n@@ -914,17 +915,19 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tbool expected = false;\n \n \tif (dev->notify_ops->guest_notify) {\n-\t\tif (__atomic_compare_exchange_n(&vq->irq_pending, &expected, true, 0,\n-\t\t\t\t  __ATOMIC_RELEASE, __ATOMIC_RELAXED)) {\n+\t\tif (rte_atomic_compare_exchange_strong_explicit(&vq->irq_pending, &expected, true,\n+\t\t\t\t  rte_memory_order_release, rte_memory_order_relaxed)) {\n \t\t\tif (dev->notify_ops->guest_notify(dev->vid, vq->index)) {\n \t\t\t\tif (dev->flags & VIRTIO_DEV_STATS_ENABLED)\n-\t\t\t\t\t__atomic_fetch_add(&vq->stats.guest_notifications_offloaded,\n-\t\t\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\t\t\trte_atomic_fetch_add_explicit(\n+\t\t\t\t\t\t&vq->stats.guest_notifications_offloaded,\n+\t\t\t\t\t\t1, rte_memory_order_relaxed);\n \t\t\t\treturn;\n \t\t\t}\n \n \t\t\t/* Offloading failed, fallback to direct IRQ injection */\n-\t\t\t__atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE);\n+\t\t\trte_atomic_store_explicit(&vq->irq_pending, false,\n+\t\t\t\trte_memory_order_release);\n \t\t} else {\n \t\t\tvq->stats.guest_notifications_suppressed++;\n \t\t\treturn;\n@@ -933,14 +936,14 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \n \tif (dev->backend_ops->inject_irq(dev, vq)) {\n \t\tif (dev->flags & VIRTIO_DEV_STATS_ENABLED)\n-\t\t\t__atomic_fetch_add(&vq->stats.guest_notifications_error,\n-\t\t\t\t1, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,\n+\t\t\t\t1, rte_memory_order_relaxed);\n \t\treturn;\n \t}\n \n \tif (dev->flags & VIRTIO_DEV_STATS_ENABLED)\n-\t\t__atomic_fetch_add(&vq->stats.guest_notifications,\n-\t\t\t1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,\n+\t\t\t1, rte_memory_order_relaxed);\n \tif (dev->notify_ops->guest_notified)\n \t\tdev->notify_ops->guest_notified(dev->vid);\n }\n@@ -949,7 +952,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)\n {\n \t/* Flush used->idx update before we read avail->flags. */\n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \t/* Don't kick guest if we don't reach index specified by guest. */\n \tif (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {\n@@ -981,7 +984,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \tbool signalled_used_valid, kick = false;\n \n \t/* Flush used desc update. */\n-\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \n \tif (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {\n \t\tif (vq->driver_event->flags !=\n@@ -1007,7 +1010,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,\n \t\tgoto kick;\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\trte_atomic_thread_fence(rte_memory_order_acquire);\n \n \toff_wrap = vq->driver_event->off_wrap;\n \toff = off_wrap & ~(1 << 15);\ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex 901a80b..e363121 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -1914,7 +1914,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev,\n \n \tif (inflight_split->used_idx != used->idx) {\n \t\tinflight_split->desc[last_io].inflight = 0;\n-\t\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\t\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \t\tinflight_split->used_idx = used->idx;\n \t}\n \n@@ -2418,10 +2418,10 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev,\n \t * Set the flag to inject a RARP broadcast packet at\n \t * rte_vhost_dequeue_burst().\n \t *\n-\t * __ATOMIC_RELEASE ordering is for making sure the mac is\n+\t * rte_memory_order_release ordering is for making sure the mac is\n \t * copied before the flag is set.\n \t */\n-\t__atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&dev->broadcast_rarp, 1, rte_memory_order_release);\n \tvdpa_dev = dev->vdpa_dev;\n \tif (vdpa_dev && vdpa_dev->ops->migration_done)\n \t\tvdpa_dev->ops->migration_done(dev->vid);\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex 759a78e..8af20f1 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -298,8 +298,8 @@\n \n \tvhost_log_cache_sync(dev, vq);\n \n-\t__atomic_fetch_add(&vq->used->idx, vq->shadow_used_idx,\n-\t\t\t   __ATOMIC_RELEASE);\n+\trte_atomic_fetch_add_explicit((unsigned short __rte_atomic *)&vq->used->idx,\n+\t\tvq->shadow_used_idx, rte_memory_order_release);\n \tvq->shadow_used_idx = 0;\n \tvhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),\n \t\tsizeof(vq->used->idx));\n@@ -335,7 +335,7 @@\n \t}\n \n \t/* The ordering for storing desc flags needs to be enforced. */\n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tfor (i = 0; i < vq->shadow_used_idx; i++) {\n \t\tuint16_t flags;\n@@ -387,8 +387,9 @@\n \n \tvq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;\n \t/* desc flags is the synchronization point for virtio packed vring */\n-\t__atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,\n-\t\t\t used_elem->flags, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(\n+\t\t(unsigned short __rte_atomic *)&vq->desc_packed[vq->shadow_last_used_idx].flags,\n+\t\tused_elem->flags, rte_memory_order_release);\n \n \tvhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *\n \t\t\t\t   sizeof(struct vring_packed_desc),\n@@ -418,7 +419,7 @@\n \t\tdesc_base[i].len = lens[i];\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {\n \t\tdesc_base[i].flags = flags;\n@@ -515,7 +516,7 @@\n \t\tvq->desc_packed[vq->last_used_idx + i].len = 0;\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \tvhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)\n \t\tvq->desc_packed[vq->last_used_idx + i].flags = flags;\n \n@@ -1415,7 +1416,8 @@\n \t * The ordering between avail index and\n \t * desc reads needs to be enforced.\n \t */\n-\tavail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);\n+\tavail_head = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,\n+\t\trte_memory_order_acquire);\n \n \trte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);\n \n@@ -1806,7 +1808,8 @@\n \t/*\n \t * The ordering between avail index and desc reads need to be enforced.\n \t */\n-\tavail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);\n+\tavail_head = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,\n+\t\trte_memory_order_acquire);\n \n \trte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);\n \n@@ -2222,7 +2225,7 @@\n \t}\n \n \t/* The ordering for storing desc flags needs to be enforced. */\n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n \n \tfrom = async->last_buffer_idx_packed;\n \n@@ -2311,7 +2314,9 @@\n \t\t\tvhost_vring_call_packed(dev, vq);\n \t\t} else {\n \t\t\twrite_back_completed_descs_split(vq, n_descs);\n-\t\t\t__atomic_fetch_add(&vq->used->idx, n_descs, __ATOMIC_RELEASE);\n+\t\t\trte_atomic_fetch_add_explicit(\n+\t\t\t\t(unsigned short __rte_atomic *)&vq->used->idx,\n+\t\t\t\tn_descs, rte_memory_order_release);\n \t\t\tvhost_vring_call_split(dev, vq);\n \t\t}\n \t} else {\n@@ -3085,8 +3090,8 @@\n \t * The ordering between avail index and\n \t * desc reads needs to be enforced.\n \t */\n-\tavail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -\n-\t\t\tvq->last_avail_idx;\n+\tavail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,\n+\t\trte_memory_order_acquire) - vq->last_avail_idx;\n \tif (avail_entries == 0)\n \t\treturn 0;\n \n@@ -3224,7 +3229,7 @@\n \t\t\treturn -1;\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\trte_atomic_thread_fence(rte_memory_order_acquire);\n \n \tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n \t\tlens[i] = descs[avail_idx + i].len;\n@@ -3297,7 +3302,7 @@\n \t\t\treturn -1;\n \t}\n \n-\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\trte_atomic_thread_fence(rte_memory_order_acquire);\n \n \tvhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)\n \t\tlens[i] = descs[avail_idx + i].len;\n@@ -3590,7 +3595,7 @@\n \t *\n \t * broadcast_rarp shares a cacheline in the virtio_net structure\n \t * with some fields that are accessed during enqueue and\n-\t * __atomic_compare_exchange_n causes a write if performed compare\n+\t * rte_atomic_compare_exchange_strong_explicit causes a write if performed compare\n \t * and exchange. This could result in false sharing between enqueue\n \t * and dequeue.\n \t *\n@@ -3598,9 +3603,9 @@\n \t * and only performing compare and exchange if the read indicates it\n \t * is likely to be set.\n \t */\n-\tif (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&\n-\t\t\t__atomic_compare_exchange_n(&dev->broadcast_rarp,\n-\t\t\t&success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {\n+\tif (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&\n+\t\t\trte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,\n+\t\t\t&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {\n \n \t\trarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);\n \t\tif (rarp_mbuf == NULL) {\n@@ -3683,7 +3688,8 @@\n \t\tvhost_vring_call_packed(dev, vq);\n \t} else {\n \t\twrite_back_completed_descs_split(vq, nr_cpl_pkts);\n-\t\t__atomic_fetch_add(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_add_explicit((unsigned short __rte_atomic *)&vq->used->idx,\n+\t\t\tnr_cpl_pkts, rte_memory_order_release);\n \t\tvhost_vring_call_split(dev, vq);\n \t}\n \tvq->async->pkts_inflight_n -= nr_cpl_pkts;\n@@ -3714,8 +3720,8 @@\n \t * The ordering between avail index and\n \t * desc reads needs to be enforced.\n \t */\n-\tavail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -\n-\t\t\tvq->last_avail_idx;\n+\tavail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic *)&vq->avail->idx,\n+\t\trte_memory_order_acquire) - vq->last_avail_idx;\n \tif (avail_entries == 0)\n \t\tgoto out;\n \n@@ -4204,7 +4210,7 @@\n \t *\n \t * broadcast_rarp shares a cacheline in the virtio_net structure\n \t * with some fields that are accessed during enqueue and\n-\t * __atomic_compare_exchange_n causes a write if performed compare\n+\t * rte_atomic_compare_exchange_strong_explicit causes a write if performed compare\n \t * and exchange. This could result in false sharing between enqueue\n \t * and dequeue.\n \t *\n@@ -4212,9 +4218,9 @@\n \t * and only performing compare and exchange if the read indicates it\n \t * is likely to be set.\n \t */\n-\tif (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&\n-\t\t\t__atomic_compare_exchange_n(&dev->broadcast_rarp,\n-\t\t\t&success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {\n+\tif (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&\n+\t\t\trte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,\n+\t\t\t&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {\n \n \t\trarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);\n \t\tif (rarp_mbuf == NULL) {\ndiff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c\nindex 6b583a0..c4847f8 100644\n--- a/lib/vhost/virtio_net_ctrl.c\n+++ b/lib/vhost/virtio_net_ctrl.c\n@@ -33,7 +33,8 @@ struct virtio_net_ctrl_elem {\n \tuint8_t *ctrl_req;\n \tstruct vring_desc *descs;\n \n-\tavail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);\n+\tavail_idx = rte_atomic_load_explicit((unsigned short __rte_atomic *)&cvq->avail->idx,\n+\t\trte_memory_order_acquire);\n \tif (avail_idx == cvq->last_avail_idx) {\n \t\tVHOST_LOG_CONFIG(dev->ifname, DEBUG, \"Control queue empty\\n\");\n \t\treturn 0;\n@@ -236,7 +237,8 @@ struct virtio_net_ctrl_elem {\n \tif (cvq->last_used_idx >= cvq->size)\n \t\tcvq->last_used_idx -= cvq->size;\n \n-\t__atomic_store_n(&cvq->used->idx, cvq->last_used_idx, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit((unsigned short __rte_atomic *)&cvq->used->idx,\n+\t\tcvq->last_used_idx, rte_memory_order_release);\n \n \tvhost_vring_call_split(dev, dev->cvq);\n \n",
    "prefixes": [
        "v2",
        "13/19"
    ]
}