get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/138899/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 138899,
    "url": "http://patches.dpdk.org/api/patches/138899/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-24-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1711579078-10624-24-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1711579078-10624-24-git-send-email-roretzla@linux.microsoft.com",
    "date": "2024-03-27T22:37:36",
    "name": "[v3,23/45] event/opdl: use rte stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "98def70de68093bddc05213ebda35368039844ac",
    "submitter": {
        "id": 2077,
        "url": "http://patches.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1711579078-10624-24-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 31633,
            "url": "http://patches.dpdk.org/api/series/31633/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31633",
            "date": "2024-03-27T22:37:13",
            "name": "use stdatomic API",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31633/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/138899/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/138899/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C84EF43D55;\n\tWed, 27 Mar 2024 23:40:23 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E2B4242DFD;\n\tWed, 27 Mar 2024 23:38:34 +0100 (CET)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id AAF534161A\n for <dev@dpdk.org>; Wed, 27 Mar 2024 23:38:05 +0100 (CET)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id 705A120E6AFE; Wed, 27 Mar 2024 15:38:00 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com 705A120E6AFE",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1711579081;\n bh=5J87jBEK97Q0K3S7fHZx8Q+coAj4Gma722CJzo5PJD8=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=pa2Rm9mSykDhKeDSXKpcjCdsR/Z9pP0FgXVcwLFH5VK4tiEPIXgizCfg7eBhZqYnt\n 1Q5qnvOc/4y66jsuCqoWaCLSYq0OrRZjeLHjeSRiklPYJOUjP83AR0e+2kuFP2EuQM\n phqMhkYnEMuoGKC124lY5H+z844zNMZ5VFoz6dLw=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n\t=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>,\n Abdullah Sevincer <abdullah.sevincer@intel.com>,\n Ajit Khaparde <ajit.khaparde@broadcom.com>, Alok Prasad <palok@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Anoob Joseph <anoobj@marvell.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Byron Marohn <byron.marohn@intel.com>, Chenbo Xia <chenbox@nvidia.com>,\n Chengwen Feng <fengchengwen@huawei.com>,\n Ciara Loftus <ciara.loftus@intel.com>, Ciara Power <ciara.power@intel.com>,\n Dariusz Sosnowski <dsosnowski@nvidia.com>, David Hunt <david.hunt@intel.com>,\n Devendra Singh Rawat <dsinghrawat@marvell.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jakub Grajciar <jgrajcia@cisco.com>, Jerin Jacob <jerinj@marvell.com>,\n Jeroen de Borst <jeroendb@google.com>, Jian Wang <jianwang@trustnetic.com>,\n Jiawen Wu <jiawenwu@trustnetic.com>, Jie Hai <haijie1@huawei.com>,\n Jingjing Wu <jingjing.wu@intel.com>, Joshua Washington <joshwash@google.com>,\n Joyce Kong <joyce.kong@arm.com>, Junfeng Guo <junfeng.guo@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Liang Ma <liangma@liangbit.com>, Long Li <longli@microsoft.com>,\n Maciej Czekaj <mczekaj@marvell.com>, Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>, Ori Kam <orika@nvidia.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Peter Mccarthy <peter.mccarthy@intel.com>,\n Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Reshma Pattan <reshma.pattan@intel.com>, Rosen Xu <rosen.xu@intel.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Rushil Gupta <rushilg@google.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Somnath Kotur <somnath.kotur@broadcom.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Sunil Uttarwar <sunilprakashrao.uttarwar@amd.com>,\n Tetsuya Mukawa <mtetsuyah@gmail.com>, Vamsi Attunuru <vattunuru@marvell.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Yipeng Wang <yipeng1.wang@intel.com>, Yisen Zhuang <yisen.zhuang@huawei.com>,\n Yuying Zhang <Yuying.Zhang@intel.com>, Yuying Zhang <yuying.zhang@intel.com>,\n Ziyang Xuan <xuanziyang2@huawei.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 23/45] event/opdl: use rte stdatomic API",
        "Date": "Wed, 27 Mar 2024 15:37:36 -0700",
        "Message-Id": "<1711579078-10624-24-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com>\n <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional rte stdatomic API.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nAcked-by: Stephen Hemminger <stephen@networkplumber.org>\n---\n drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------\n 1 file changed, 40 insertions(+), 40 deletions(-)",
    "diff": "diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c\nindex da5ea02..a86bfb8 100644\n--- a/drivers/event/opdl/opdl_ring.c\n+++ b/drivers/event/opdl/opdl_ring.c\n@@ -47,12 +47,12 @@ struct shared_state {\n \t/* Last known minimum sequence number of dependencies, used for multi\n \t * thread operation\n \t */\n-\tuint32_t available_seq;\n+\tRTE_ATOMIC(uint32_t) available_seq;\n \tchar _pad1[RTE_CACHE_LINE_SIZE * 3];\n-\tuint32_t head;  /* Head sequence number (for multi thread operation) */\n+\tRTE_ATOMIC(uint32_t) head;  /* Head sequence number (for multi thread operation) */\n \tchar _pad2[RTE_CACHE_LINE_SIZE * 3];\n \tstruct opdl_stage *stage;  /* back pointer */\n-\tuint32_t tail;  /* Tail sequence number */\n+\tRTE_ATOMIC(uint32_t) tail;  /* Tail sequence number */\n \tchar _pad3[RTE_CACHE_LINE_SIZE * 2];\n } __rte_cache_aligned;\n \n@@ -150,10 +150,10 @@ struct opdl_ring {\n available(const struct opdl_stage *s)\n {\n \tif (s->threadsafe == true) {\n-\t\tuint32_t n = __atomic_load_n(&s->shared.available_seq,\n-\t\t\t\t__ATOMIC_ACQUIRE) -\n-\t\t\t\t__atomic_load_n(&s->shared.head,\n-\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tuint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,\n+\t\t\t\trte_memory_order_acquire) -\n+\t\t\t\trte_atomic_load_explicit(&s->shared.head,\n+\t\t\t\trte_memory_order_acquire);\n \n \t\t/* Return 0 if available_seq needs to be updated */\n \t\treturn (n <= s->num_slots) ? n : 0;\n@@ -169,7 +169,7 @@ struct opdl_ring {\n {\n \tuint32_t i;\n \tuint32_t this_tail = s->shared.tail;\n-\tuint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);\n+\tuint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);\n \t/* Input stage sequence numbers are greater than the sequence numbers of\n \t * its dependencies so an offset of t->num_slots is needed when\n \t * calculating available slots and also the condition which is used to\n@@ -180,16 +180,16 @@ struct opdl_ring {\n \tif (is_input_stage(s)) {\n \t\twrap = s->num_slots;\n \t\tfor (i = 1; i < s->num_deps; i++) {\n-\t\t\tuint32_t seq = __atomic_load_n(&s->deps[i]->tail,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tuint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,\n+\t\t\t\t\trte_memory_order_acquire);\n \t\t\tif ((this_tail - seq) > (this_tail - min_seq))\n \t\t\t\tmin_seq = seq;\n \t\t}\n \t} else {\n \t\twrap = 0;\n \t\tfor (i = 1; i < s->num_deps; i++) {\n-\t\t\tuint32_t seq = __atomic_load_n(&s->deps[i]->tail,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tuint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,\n+\t\t\t\t\trte_memory_order_acquire);\n \t\t\tif ((seq - this_tail) < (min_seq - this_tail))\n \t\t\t\tmin_seq = seq;\n \t\t}\n@@ -198,8 +198,8 @@ struct opdl_ring {\n \tif (s->threadsafe == false)\n \t\ts->available_seq = min_seq + wrap;\n \telse\n-\t\t__atomic_store_n(&s->shared.available_seq, min_seq + wrap,\n-\t\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,\n+\t\t\t\trte_memory_order_release);\n }\n \n /* Wait until the number of available slots reaches number requested */\n@@ -299,7 +299,7 @@ struct opdl_ring {\n \tcopy_entries_in(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);\n \n \treturn num_entries;\n }\n@@ -382,18 +382,18 @@ struct opdl_ring {\n \t\t/* There should be no race condition here. If shared.tail\n \t\t * matches, no other core can update it until this one does.\n \t\t */\n-\t\tif (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==\n+\t\tif (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==\n \t\t\t\ttail) {\n \t\t\tif (num_entries >= (head - tail)) {\n \t\t\t\tclaim_mgr_remove(disclaims);\n-\t\t\t\t__atomic_store_n(&s->shared.tail, head,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\trte_atomic_store_explicit(&s->shared.tail, head,\n+\t\t\t\t\t\trte_memory_order_release);\n \t\t\t\tnum_entries -= (head - tail);\n \t\t\t} else {\n \t\t\t\tclaim_mgr_move_tail(disclaims, num_entries);\n-\t\t\t\t__atomic_store_n(&s->shared.tail,\n+\t\t\t\trte_atomic_store_explicit(&s->shared.tail,\n \t\t\t\t\t\tnum_entries + tail,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\t\trte_memory_order_release);\n \t\t\t\tnum_entries = 0;\n \t\t\t}\n \t\t} else if (block == false)\n@@ -421,7 +421,7 @@ struct opdl_ring {\n \topdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,\n \t\t\tfalse);\n \n-\t*old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);\n+\t*old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);\n \twhile (true) {\n \t\tbool success;\n \t\t/* If called by opdl_ring_input(), claim does not need to be\n@@ -441,11 +441,10 @@ struct opdl_ring {\n \t\tif (*num_entries == 0)\n \t\t\treturn;\n \n-\t\tsuccess = __atomic_compare_exchange_n(&s->shared.head, old_head,\n+\t\tsuccess = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,\n \t\t\t\t*old_head + *num_entries,\n-\t\t\t\ttrue,  /* may fail spuriously */\n-\t\t\t\t__ATOMIC_RELEASE,  /* memory order on success */\n-\t\t\t\t__ATOMIC_ACQUIRE);  /* memory order on fail */\n+\t\t\t\trte_memory_order_release,  /* memory order on success */\n+\t\t\t\trte_memory_order_acquire);  /* memory order on fail */\n \t\tif (likely(success))\n \t\t\tbreak;\n \t\trte_pause();\n@@ -473,10 +472,11 @@ struct opdl_ring {\n \t/* If another thread started inputting before this one, but hasn't\n \t * finished, we need to wait for it to complete to update the tail.\n \t */\n-\trte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,\n+\t    rte_memory_order_acquire);\n \n-\t__atomic_store_n(&s->shared.tail, old_head + num_entries,\n-\t\t\t__ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,\n+\t\t\trte_memory_order_release);\n \n \treturn num_entries;\n }\n@@ -526,8 +526,8 @@ struct opdl_ring {\n \t\tfor (j = 0; j < num_entries; j++) {\n \t\t\tev = (struct rte_event *)get_slot(t, s->head+j);\n \n-\t\t\tevent  = __atomic_load_n(&(ev->event),\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tevent  = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,\n+\t\t\t\t\trte_memory_order_acquire);\n \n \t\t\topa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);\n \t\t\tflow_id  = OPDL_FLOWID_MASK & event;\n@@ -628,8 +628,8 @@ struct opdl_ring {\n \t\t\t\tnum_entries, s->head - old_tail);\n \t\tnum_entries = s->head - old_tail;\n \t}\n-\t__atomic_store_n(&s->shared.tail, num_entries + old_tail,\n-\t\t\t__ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,\n+\t\t\trte_memory_order_release);\n }\n \n uint32_t\n@@ -658,7 +658,7 @@ struct opdl_ring {\n \tcopy_entries_in(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);\n \n \treturn num_entries;\n \n@@ -677,7 +677,7 @@ struct opdl_ring {\n \tcopy_entries_out(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);\n \n \treturn num_entries;\n }\n@@ -756,7 +756,7 @@ struct opdl_ring {\n \t\treturn 0;\n \t}\n \tif (s->threadsafe == false) {\n-\t\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\t\trte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);\n \t\ts->seq += s->num_claimed;\n \t\ts->shadow_head = s->head;\n \t\ts->num_claimed = 0;\n@@ -1009,8 +1009,8 @@ struct opdl_ring *\n \t\t\tev_orig = (struct rte_event *)\n \t\t\t\tget_slot(t, s->shadow_head+i);\n \n-\t\t\tevent  = __atomic_load_n(&(ev_orig->event),\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tevent  = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,\n+\t\t\t\t\trte_memory_order_acquire);\n \n \t\t\topa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);\n \t\t\tflow_id  = OPDL_FLOWID_MASK & event;\n@@ -1027,9 +1027,9 @@ struct opdl_ring *\n \n \t\t\t\tif ((event & OPDL_EVENT_MASK) !=\n \t\t\t\t\t\tev_temp) {\n-\t\t\t\t\t__atomic_store_n(&(ev_orig->event),\n-\t\t\t\t\t\t\tev_update,\n-\t\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\trte_atomic_store_explicit(\n+\t\t\t\t\t\t(uint64_t __rte_atomic *)&ev_orig->event,\n+\t\t\t\t\t\tev_update, rte_memory_order_release);\n \t\t\t\t\tev_updated = true;\n \t\t\t\t}\n \t\t\t\tif (ev_orig->u64 != ev->u64) {\n",
    "prefixes": [
        "v3",
        "23/45"
    ]
}