[v3,7/7] net/ring: replace rte atomics with GCC builtin atomics

Message ID 1679612036-30773-8-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series replace rte atomics with GCC builtin atomics |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/github-robot: build success github build: passed
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-unit-testing success Testing PASS

Commit Message

Tyler Retzlaff March 23, 2023, 10:53 p.m. UTC
  Replace the use of rte_atomic.h types and functions, instead use GCC
supplied C++11 memory model builtins.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/net/ring/rte_eth_ring.c | 26 ++++++++++++++++----------
 1 file changed, 16 insertions(+), 10 deletions(-)
  

Comments

David Marchand May 24, 2023, 8:12 p.m. UTC | #1
Hello Bruce,

For you again, review please.


On Thu, Mar 23, 2023 at 11:54 PM Tyler Retzlaff
<roretzla@linux.microsoft.com> wrote:
>
> Replace the use of rte_atomic.h types and functions, instead use GCC
> supplied C++11 memory model builtins.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
>  drivers/net/ring/rte_eth_ring.c | 26 ++++++++++++++++----------
>  1 file changed, 16 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
> index e8bc9b6..43eb627 100644
> --- a/drivers/net/ring/rte_eth_ring.c
> +++ b/drivers/net/ring/rte_eth_ring.c
> @@ -44,8 +44,8 @@ enum dev_action {
>
>  struct ring_queue {
>         struct rte_ring *rng;
> -       rte_atomic64_t rx_pkts;
> -       rte_atomic64_t tx_pkts;
> +       uint64_t rx_pkts;
> +       uint64_t tx_pkts;
>  };
>
>  struct pmd_internals {
> @@ -80,9 +80,10 @@ struct pmd_internals {
>         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
>                         ptrs, nb_bufs, NULL);
>         if (r->rng->flags & RING_F_SC_DEQ)
> -               r->rx_pkts.cnt += nb_rx;
> +               r->rx_pkts += nb_rx;
>         else
> -               rte_atomic64_add(&(r->rx_pkts), nb_rx);
> +               /* NOTE: review for potential ordering optimization */
> +               __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_SEQ_CST);
>         return nb_rx;
>  }
>
> @@ -94,9 +95,10 @@ struct pmd_internals {
>         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
>                         ptrs, nb_bufs, NULL);
>         if (r->rng->flags & RING_F_SP_ENQ)
> -               r->tx_pkts.cnt += nb_tx;
> +               r->tx_pkts += nb_tx;
>         else
> -               rte_atomic64_add(&(r->tx_pkts), nb_tx);
> +               /* NOTE: review for potential ordering optimization */
> +               __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_SEQ_CST);
>         return nb_tx;
>  }
>
> @@ -184,13 +186,15 @@ struct pmd_internals {
>
>         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
>                         i < dev->data->nb_rx_queues; i++) {
> -               stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
> +               /* NOTE: review for atomic access */
> +               stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
>                 rx_total += stats->q_ipackets[i];
>         }
>
>         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
>                         i < dev->data->nb_tx_queues; i++) {
> -               stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
> +               /* NOTE: review for atomic access */
> +               stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
>                 tx_total += stats->q_opackets[i];
>         }
>
> @@ -207,9 +211,11 @@ struct pmd_internals {
>         struct pmd_internals *internal = dev->data->dev_private;
>
>         for (i = 0; i < dev->data->nb_rx_queues; i++)
> -               internal->rx_ring_queues[i].rx_pkts.cnt = 0;
> +               /* NOTE: review for atomic access */
> +               internal->rx_ring_queues[i].rx_pkts = 0;
>         for (i = 0; i < dev->data->nb_tx_queues; i++)
> -               internal->tx_ring_queues[i].tx_pkts.cnt = 0;
> +               /* NOTE: review for atomic access */
> +               internal->tx_ring_queues[i].tx_pkts = 0;
>
>         return 0;
>  }
> --
> 1.8.3.1
>
  
Bruce Richardson May 25, 2023, 8:44 a.m. UTC | #2
On Wed, May 24, 2023 at 10:12:44PM +0200, David Marchand wrote:
> Hello Bruce,
> 
> For you again, review please.
> 
> 
> On Thu, Mar 23, 2023 at 11:54 PM Tyler Retzlaff
> <roretzla@linux.microsoft.com> wrote:
> >
> > Replace the use of rte_atomic.h types and functions, instead use GCC
> > supplied C++11 memory model builtins.
> >
> > Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > ---

Acked-by: Bruce Richardson <bruce.richardson@intel.com>
  

Patch

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index e8bc9b6..43eb627 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@  enum dev_action {
 
 struct ring_queue {
 	struct rte_ring *rng;
-	rte_atomic64_t rx_pkts;
-	rte_atomic64_t tx_pkts;
+	uint64_t rx_pkts;
+	uint64_t tx_pkts;
 };
 
 struct pmd_internals {
@@ -80,9 +80,10 @@  struct pmd_internals {
 	const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
 			ptrs, nb_bufs, NULL);
 	if (r->rng->flags & RING_F_SC_DEQ)
-		r->rx_pkts.cnt += nb_rx;
+		r->rx_pkts += nb_rx;
 	else
-		rte_atomic64_add(&(r->rx_pkts), nb_rx);
+		/* NOTE: review for potential ordering optimization */
+		__atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_SEQ_CST);
 	return nb_rx;
 }
 
@@ -94,9 +95,10 @@  struct pmd_internals {
 	const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
 			ptrs, nb_bufs, NULL);
 	if (r->rng->flags & RING_F_SP_ENQ)
-		r->tx_pkts.cnt += nb_tx;
+		r->tx_pkts += nb_tx;
 	else
-		rte_atomic64_add(&(r->tx_pkts), nb_tx);
+		/* NOTE: review for potential ordering optimization */
+		__atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_SEQ_CST);
 	return nb_tx;
 }
 
@@ -184,13 +186,15 @@  struct pmd_internals {
 
 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
 			i < dev->data->nb_rx_queues; i++) {
-		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
+		/* NOTE: review for atomic access */
+		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
 		rx_total += stats->q_ipackets[i];
 	}
 
 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
 			i < dev->data->nb_tx_queues; i++) {
-		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+		/* NOTE: review for atomic access */
+		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
 		tx_total += stats->q_opackets[i];
 	}
 
@@ -207,9 +211,11 @@  struct pmd_internals {
 	struct pmd_internals *internal = dev->data->dev_private;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		internal->rx_ring_queues[i].rx_pkts.cnt = 0;
+		/* NOTE: review for atomic access */
+		internal->rx_ring_queues[i].rx_pkts = 0;
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+		/* NOTE: review for atomic access */
+		internal->tx_ring_queues[i].tx_pkts = 0;
 
 	return 0;
 }