From patchwork Fri Aug 11 17:32:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 130184 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B958543036; Fri, 11 Aug 2023 19:32:46 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3C2C843264; Fri, 11 Aug 2023 19:32:25 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id DC60742D3F; Fri, 11 Aug 2023 19:32:19 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 2B45320FCF8B; Fri, 11 Aug 2023 10:32:19 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 2B45320FCF8B DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1691775139; bh=4FDPEEXdI5egawCMhiKoUo3G76TdCHE5VK5pSea7en8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XD8TYsR4/FFvq98LVeUhQKtrrzfTy03notef7cMp0KNPKPPWxqqgk8JWgUzYYxrya 1bQLy0ST2D3Eu/hM8f+2V3QcJJG2nTWnlGoq21sbo+Yl2XPsmv2+O7CaavfvRhHWNk qpk8lmJ1j2Mu3O7zxgOnuVheQAVfdfBr+OLXFcwc= From: Tyler Retzlaff To: dev@dpdk.org Cc: techboard@dpdk.org, Bruce Richardson , Honnappa Nagarahalli , Ruifeng Wang , Jerin Jacob , Sunil Kumar Kori , =?utf-8?q?Mattias_R=C3=B6nnblom?= , Joyce Kong , David Christensen , Konstantin Ananyev , David Hunt , Thomas Monjalon , David Marchand , Tyler Retzlaff Subject: [PATCH v2 4/6] distributor: adapt for EAL optional atomics API changes Date: Fri, 11 Aug 2023 10:32:14 -0700 Message-Id: <1691775136-6460-5-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1691775136-6460-1-git-send-email-roretzla@linux.microsoft.com> References: <1691717521-1025-1-git-send-email-roretzla@linux.microsoft.com> <1691775136-6460-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adapt distributor for EAL optional atomics API changes Signed-off-by: Tyler Retzlaff Reviewed-by: Morten Brørup --- lib/distributor/distributor_private.h | 2 +- lib/distributor/rte_distributor_single.c | 44 ++++++++++++++++---------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h index 7101f63..ffbdae5 100644 --- a/lib/distributor/distributor_private.h +++ b/lib/distributor/distributor_private.h @@ -52,7 +52,7 @@ * Only 64-bits of the memory is actually used though. */ union rte_distributor_buffer_single { - volatile int64_t bufptr64; + volatile int64_t __rte_atomic bufptr64; char pad[RTE_CACHE_LINE_SIZE*3]; } __rte_cache_aligned; diff --git a/lib/distributor/rte_distributor_single.c b/lib/distributor/rte_distributor_single.c index 2c77ac4..ad43c13 100644 --- a/lib/distributor/rte_distributor_single.c +++ b/lib/distributor/rte_distributor_single.c @@ -32,10 +32,10 @@ int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_GET_BUF; RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, - ==, 0, __ATOMIC_RELAXED); + ==, 0, rte_memory_order_relaxed); /* Sync with distributor on GET_BUF flag. */ - __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release); } struct rte_mbuf * @@ -44,7 +44,7 @@ struct rte_mbuf * { union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; /* Sync with distributor. Acquire bufptr64. */ - if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&buf->bufptr64, rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF) return NULL; @@ -72,10 +72,10 @@ struct rte_mbuf * uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF; RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, - ==, 0, __ATOMIC_RELAXED); + ==, 0, rte_memory_order_relaxed); /* Sync with distributor on RETURN_BUF flag. */ - __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release); return 0; } @@ -119,7 +119,7 @@ struct rte_mbuf * d->in_flight_tags[wkr] = 0; d->in_flight_bitmask &= ~(1UL << wkr); /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, 0, rte_memory_order_release); if (unlikely(d->backlog[wkr].count != 0)) { /* On return of a packet, we need to move the * queued packets for this core elsewhere. @@ -165,21 +165,21 @@ struct rte_mbuf * for (wkr = 0; wkr < d->num_workers; wkr++) { uintptr_t oldbuf = 0; /* Sync with worker. Acquire bufptr64. */ - const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE); + const int64_t data = rte_atomic_load_explicit(&d->bufs[wkr].bufptr64, + rte_memory_order_acquire); if (data & RTE_DISTRIB_GET_BUF) { flushed++; if (d->backlog[wkr].count) /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); else { /* Sync with worker on GET_BUF flag. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_memory_order_release); d->in_flight_tags[wkr] = 0; d->in_flight_bitmask &= ~(1UL << wkr); } @@ -217,8 +217,8 @@ struct rte_mbuf * while (next_idx < num_mbufs || next_mb != NULL) { uintptr_t oldbuf = 0; /* Sync with worker. Acquire bufptr64. */ - int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE); + int64_t data = rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64), + rte_memory_order_acquire); if (!next_mb) { next_mb = mbufs[next_idx++]; @@ -264,15 +264,15 @@ struct rte_mbuf * if (d->backlog[wkr].count) /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); else { /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, next_value, - __ATOMIC_RELEASE); + rte_memory_order_release); d->in_flight_tags[wkr] = new_tag; d->in_flight_bitmask |= (1UL << wkr); next_mb = NULL; @@ -294,8 +294,8 @@ struct rte_mbuf * for (wkr = 0; wkr < d->num_workers; wkr++) if (d->backlog[wkr].count && /* Sync with worker. Acquire bufptr64. */ - (__atomic_load_n(&(d->bufs[wkr].bufptr64), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) { + (rte_atomic_load_explicit(&d->bufs[wkr].bufptr64, + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { int64_t oldbuf = d->bufs[wkr].bufptr64 >> RTE_DISTRIB_FLAG_BITS; @@ -303,9 +303,9 @@ struct rte_mbuf * store_return(oldbuf, d, &ret_start, &ret_count); /* Sync with worker. Release bufptr64. */ - __atomic_store_n(&(d->bufs[wkr].bufptr64), + rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, backlog_pop(&d->backlog[wkr]), - __ATOMIC_RELEASE); + rte_memory_order_release); } d->returns.start = ret_start;