get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138892/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138892,
    "url": "http://patches.dpdk.org/api/patches/138892/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-17-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-17-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-17-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:29",
    "name": "[v3,16/45] net/virtio: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "a0a0d35f51e3fa480941e2541caa3015ebfbcef4",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-17-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138892/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/138892/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 819D743D55;\n\tWed, 27 Mar 2024 23:39:44 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BA7A242DD7;\n\tWed, 27 Mar 2024 23:38:26 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id D183F41140\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:03 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id EAA7A20E6AEA; Wed, 27 Mar 2024 15:38:00 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com EAA7A20E6AEA",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579080;\n bh=yr5jW6sAut4Onozi6vnRBA4DVQzwWJJClXZlhYXYW/Y=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=RPIga6ZQg1+bQ2cu1ZHIwaaYZs/w4obJdESVlLRMt1mdV4jhPCKCf1D4NBEfDzRZN\n jrqDMdCHX7JAGPTL+HRSlBEo03hNAOjv3LihKjl1LwsF3/PWS0b/IqlGuNY3ahUPfT\n hmKt2wJaSKhfshSnruyThC8Eh2bC8IKRwaKH8DwI=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 16/45] net/virtio: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:29 -0700",
        "Message-Id": "<1711579078-10624-17-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n drivers/net/virtio/virtio_ring.h                 |  4 +--\n drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----\n drivers/net/virtio/virtqueue.h                   | 32 ++++++++++++------------\n 3 files changed, 24 insertions(+), 24 deletions(-)",
    "diff": "diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h\nindex e848c0b..2a25751 100644\n--- a/drivers/net/virtio/virtio_ring.h\n+++ b/drivers/net/virtio/virtio_ring.h\n@@ -59,7 +59,7 @@ struct vring_used_elem {\n \n struct vring_used {\n \tuint16_t flags;\n-\tuint16_t idx;\n+\tRTE_ATOMIC(uint16_t) idx;\n \tstruct vring_used_elem ring[];\n };\n \n@@ -70,7 +70,7 @@ struct vring_packed_desc {\n \tuint64_t addr;\n \tuint32_t len;\n \tuint16_t id;\n-\tuint16_t flags;\n+\tRTE_ATOMIC(uint16_t) flags;\n };\n \n #define RING_EVENT_FLAGS_ENABLE 0x0\ndiff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c\nindex 4fdfe70..24e2b2c 100644\n--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c\n+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c\n@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)\n static inline int\n desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)\n {\n-\tuint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);\n+\tuint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);\n \n \treturn wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&\n \t\twrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);\n@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)\n \t\tif (vq->used_wrap_counter)\n \t\t\tflags |= VRING_PACKED_DESC_F_AVAIL_USED;\n \n-\t\t__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,\n-\t\t\t\t __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,\n+\t\t\t\t rte_memory_order_release);\n \n \t\tvq->used_idx += n_descs;\n \t\tif (vq->used_idx >= dev->queue_size) {\n@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)\n \tstruct vring *vring = &dev->vrings.split[queue_idx];\n \n \t/* Consume avail ring, using used ring idx as first one */\n-\twhile (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)\n+\twhile (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)\n \t       != vring->avail->idx) {\n-\t\tavail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)\n+\t\tavail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)\n \t\t\t    & (vring->num - 1);\n \t\tdesc_idx = vring->avail->ring[avail_idx];\n \n@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)\n \t\tuep->id = desc_idx;\n \t\tuep->len = n_descs;\n \n-\t\t__atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);\n \t}\n }\n \ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 5d0c039..b7bbdde 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -37,7 +37,7 @@\n virtio_mb(uint8_t weak_barriers)\n {\n \tif (weak_barriers)\n-\t\trte_atomic_thread_fence(__ATOMIC_SEQ_CST);\n+\t\trte_atomic_thread_fence(rte_memory_order_seq_cst);\n \telse\n \t\trte_mb();\n }\n@@ -46,7 +46,7 @@\n virtio_rmb(uint8_t weak_barriers)\n {\n \tif (weak_barriers)\n-\t\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\t\trte_atomic_thread_fence(rte_memory_order_acquire);\n \telse\n \t\trte_io_rmb();\n }\n@@ -55,7 +55,7 @@\n virtio_wmb(uint8_t weak_barriers)\n {\n \tif (weak_barriers)\n-\t\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\trte_atomic_thread_fence(rte_memory_order_release);\n \telse\n \t\trte_io_wmb();\n }\n@@ -67,12 +67,12 @@\n \tuint16_t flags;\n \n \tif (weak_barriers) {\n-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports\n+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports\n  * a better perf(~1.5%), which comes from the saved branch by the compiler.\n  * The if and else branch are identical  on the platforms except Arm.\n  */\n #ifdef RTE_ARCH_ARM\n-\t\tflags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);\n+\t\tflags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);\n #else\n \t\tflags = dp->flags;\n \t\trte_io_rmb();\n@@ -90,12 +90,12 @@\n \t\t\t      uint16_t flags, uint8_t weak_barriers)\n {\n \tif (weak_barriers) {\n-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports\n+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports\n  * a better perf(~1.5%), which comes from the saved branch by the compiler.\n  * The if and else branch are identical on the platforms except Arm.\n  */\n #ifdef RTE_ARCH_ARM\n-\t\t__atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);\n #else\n \t\trte_io_wmb();\n \t\tdp->flags = flags;\n@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,\n \n \tif (vq->hw->weak_barriers) {\n \t/**\n-\t * x86 prefers to using rte_smp_rmb over __atomic_load_n as it\n+\t * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it\n \t * reports a slightly better perf, which comes from the saved\n \t * branch by the compiler.\n \t * The if and else branches are identical with the smp and io\n@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,\n \t\tidx = vq->vq_split.ring.used->idx;\n \t\trte_smp_rmb();\n #else\n-\t\tidx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,\n-\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tidx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,\n+\t\t\t\trte_memory_order_acquire);\n #endif\n \t} else {\n \t\tidx = vq->vq_split.ring.used->idx;\n@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n vq_update_avail_idx(struct virtqueue *vq)\n {\n \tif (vq->hw->weak_barriers) {\n-\t/* x86 prefers to using rte_smp_wmb over __atomic_store_n as\n+\t/* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as\n \t * it reports a slightly better perf, which comes from the\n \t * saved branch by the compiler.\n \t * The if and else branches are identical with the smp and\n@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n \t\trte_smp_wmb();\n \t\tvq->vq_split.ring.avail->idx = vq->vq_avail_idx;\n #else\n-\t\t__atomic_store_n(&vq->vq_split.ring.avail->idx,\n-\t\t\t\t vq->vq_avail_idx, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,\n+\t\t\t\t vq->vq_avail_idx, rte_memory_order_release);\n #endif\n \t} else {\n \t\trte_io_wmb();\n@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP\n #define VIRTQUEUE_DUMP(vq) do { \\\n \tuint16_t used_idx, nused; \\\n-\tused_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \\\n-\t\t\t\t   __ATOMIC_RELAXED); \\\n+\tused_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \\\n+\t\t\t\t   rte_memory_order_relaxed); \\\n \tnused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \\\n \tif (virtio_with_packed_queue((vq)->hw)) { \\\n \t\tPMD_INIT_LOG(DEBUG, \\\n@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,\n \t  \" avail.flags=0x%x; used.flags=0x%x\", \\\n \t  (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \\\n \t  (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \\\n-\t  __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \\\n+\t  rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \\\n \t  (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \\\n } while (0)\n #else\n",
    "prefixes": [
        "v3",
        "16/45"
    ]
}