From patchwork Mon Oct 16 23:09:01 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 132677 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4CB2A43183; Tue, 17 Oct 2023 01:10:57 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DC99A427D9; Tue, 17 Oct 2023 01:09:27 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 08C8C40E64 for ; Tue, 17 Oct 2023 01:09:09 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id CAD8220B74D1; Mon, 16 Oct 2023 16:09:07 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com CAD8220B74D1 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1697497747; bh=z0Rs5NSot0ilkYEI+ncHHjpoGPyvF3LWV91KEgWDvF4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=IEbp7kAP5F/J13WroIrsIkw5FYMkj8tXyGdYuN4lSxqDNdTrnUIRQTPXZljkaI2iq lRYnT9GoPs6d04Ko+2sUemAytlRH98b9Fw4TTBoZ9w3+qS+iYtBOmslONMEGMmhiU+ sdytNKY0xG7SA3gJk2jfhePwMk52eX5PLTrzX4TY= From: Tyler Retzlaff To: dev@dpdk.org Cc: Akhil Goyal , Anatoly Burakov , Andrew Rybchenko , Bruce Richardson , Chenbo Xia , Ciara Power , David Christensen , David Hunt , Dmitry Kozlyuk , Dmitry Malloy , Elena Agostini , Erik Gabriel Carrillo , Fan Zhang , Ferruh Yigit , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jerin Jacob , Konstantin Ananyev , Matan Azrad , Maxime Coquelin , Narcisa Ana Maria Vasile , Nicolas Chautru , Olivier Matz , Ori Kam , Pallavi Kadam , Pavan Nikhilesh , Reshma Pattan , Sameh Gobriel , Shijith Thotton , Sivaprasad Tummala , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Thomas Monjalon , Viacheslav Ovsiienko , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH 17/21] distributor: use rte optional stdatomic API Date: Mon, 16 Oct 2023 16:09:01 -0700 Message-Id: <1697497745-20664-18-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API Signed-off-by: Tyler Retzlaff --- lib/distributor/distributor_private.h | 4 +-- lib/distributor/rte_distributor.c | 54 +++++++++++++++++------------------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h index 2f29343..dfeb9b5 100644 --- a/lib/distributor/distributor_private.h +++ b/lib/distributor/distributor_private.h @@ -113,12 +113,12 @@ enum rte_distributor_match_function { * There is a separate cacheline for returns in the burst API. */ struct rte_distributor_buffer { - volatile int64_t bufptr64[RTE_DIST_BURST_SIZE] + volatile RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE] __rte_cache_aligned; /* <= outgoing to worker */ int64_t pad1 __rte_cache_aligned; /* <= one cache line */ - volatile int64_t retptr64[RTE_DIST_BURST_SIZE] + volatile RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE] __rte_cache_aligned; /* <= incoming from worker */ int64_t pad2 __rte_cache_aligned; /* <= one cache line */ diff --git a/lib/distributor/rte_distributor.c b/lib/distributor/rte_distributor.c index 5ca80dd..2ecb95c 100644 --- a/lib/distributor/rte_distributor.c +++ b/lib/distributor/rte_distributor.c @@ -38,7 +38,7 @@ struct rte_distributor_buffer *buf = &(d->bufs[worker_id]); unsigned int i; - volatile int64_t *retptr64; + volatile RTE_ATOMIC(int64_t) *retptr64; if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { rte_distributor_request_pkt_single(d->d_single, @@ -50,7 +50,7 @@ /* Spin while handshake bits are set (scheduler clears it). * Sync with worker on GET_BUF flag. */ - while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE) + while (unlikely(rte_atomic_load_explicit(retptr64, rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { rte_pause(); uint64_t t = rte_rdtsc()+100; @@ -78,8 +78,8 @@ * line is ready for processing * Sync with distributor to release retptrs */ - __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF, + rte_memory_order_release); } int @@ -102,7 +102,7 @@ * RETURN_BUF is set when distributor must retrieve in-flight packets * Sync with distributor to acquire bufptrs */ - if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&(buf->bufptr64[0]), rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) return -1; @@ -120,8 +120,8 @@ * on the next cacheline while we're working. * Sync with distributor on GET_BUF flag. Release bufptrs. */ - __atomic_store_n(&(buf->bufptr64[0]), - buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), + buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, rte_memory_order_release); return count; } @@ -177,7 +177,7 @@ /* Spin while handshake bits are set (scheduler clears it). * Sync with worker on GET_BUF flag. */ - while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED) + while (unlikely(rte_atomic_load_explicit(&(buf->retptr64[0]), rte_memory_order_relaxed) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { rte_pause(); uint64_t t = rte_rdtsc()+100; @@ -187,7 +187,7 @@ } /* Sync with distributor to acquire retptrs */ - __atomic_thread_fence(__ATOMIC_ACQUIRE); + __atomic_thread_fence(rte_memory_order_acquire); for (i = 0; i < RTE_DIST_BURST_SIZE; i++) /* Switch off the return bit first */ buf->retptr64[i] = 0; @@ -200,15 +200,15 @@ * we won't read any mbufs from there even if GET_BUF is set. * This allows distributor to retrieve in-flight already sent packets. */ - __atomic_fetch_or(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF, - __ATOMIC_ACQ_REL); + rte_atomic_fetch_or_explicit(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF, + rte_memory_order_acq_rel); /* set the RETURN_BUF on retptr64 even if we got no returns. * Sync with distributor on RETURN_BUF flag. Release retptrs. * Notify distributor that we don't request more packets any more. */ - __atomic_store_n(&(buf->retptr64[0]), - buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->retptr64[0]), + buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, rte_memory_order_release); return 0; } @@ -297,7 +297,7 @@ * to worker which does not require new packets. * They must be retrieved and assigned to another worker. */ - if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) + if (!(rte_atomic_load_explicit(&(buf->bufptr64[0]), rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) for (i = 0; i < RTE_DIST_BURST_SIZE; i++) if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF) @@ -310,8 +310,8 @@ * with new packets if worker will make a new request. * - clear RETURN_BUF to unlock reads on worker side. */ - __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF, + rte_memory_order_release); /* Collect backlog packets from worker */ for (i = 0; i < d->backlog[wkr].count; i++) @@ -348,7 +348,7 @@ unsigned int i; /* Sync on GET_BUF flag. Acquire retptrs. */ - if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&(buf->retptr64[0]), rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) { for (i = 0; i < RTE_DIST_BURST_SIZE; i++) { if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) { @@ -379,7 +379,7 @@ /* Clear for the worker to populate with more returns. * Sync with distributor on GET_BUF flag. Release retptrs. */ - __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->retptr64[0]), 0, rte_memory_order_release); } return count; } @@ -404,7 +404,7 @@ return 0; /* Sync with worker on GET_BUF flag */ - while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE) + while (!(rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64[0]), rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { handle_returns(d, wkr); if (unlikely(!d->active[wkr])) @@ -430,8 +430,8 @@ /* Clear the GET bit. * Sync with worker on GET_BUF flag. Release bufptrs. */ - __atomic_store_n(&(buf->bufptr64[0]), - buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), + buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, rte_memory_order_release); return buf->count; } @@ -463,8 +463,8 @@ /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) { /* Sync with worker on GET_BUF flag. */ - if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) { + if (rte_atomic_load_explicit(&(d->bufs[wid].bufptr64[0]), + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF) { d->bufs[wid].count = 0; release(d, wid); handle_returns(d, wid); @@ -598,8 +598,8 @@ /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) /* Sync with worker on GET_BUF flag. */ - if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) { + if ((rte_atomic_load_explicit(&(d->bufs[wid].bufptr64[0]), + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { d->bufs[wid].count = 0; release(d, wid); } @@ -700,8 +700,8 @@ /* throw away returns, so workers can exit */ for (wkr = 0; wkr < d->num_workers; wkr++) /* Sync with worker. Release retptrs. */ - __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(d->bufs[wkr].retptr64[0]), 0, + rte_memory_order_release); d->returns.start = d->returns.count = 0; }