[v4] net/mlx5: relaxed ordering for multi-packet RQ buffer refcnt

Message ID 1599101590-4856-1-git-send-email-phil.yang@arm.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [v4] net/mlx5: relaxed ordering for multi-packet RQ buffer refcnt |

Checks

Context Check Description
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Phil Yang Sept. 3, 2020, 2:53 a.m. UTC
  Use c11 atomics with RELAXED ordering instead of the rte_atomic ops
which enforce unnecessary barriers on aarch64.

Signed-off-by: Phil Yang <phil.yang@arm.com>
---
v4:
Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)

v3:
Split from the patchset:
http://patchwork.dpdk.org/cover/68159/

 drivers/net/mlx5/mlx5_rxq.c  |  2 +-
 drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
 drivers/net/mlx5/mlx5_rxtx.h |  2 +-
 3 files changed, 11 insertions(+), 9 deletions(-)
  

Comments

Honnappa Nagarahalli Sept. 10, 2020, 1:30 a.m. UTC | #1
<snip>

> 
> Use c11 atomics with RELAXED ordering instead of the rte_atomic ops which
> enforce unnecessary barriers on aarch64.
> 
> Signed-off-by: Phil Yang <phil.yang@arm.com>
Looks good.

Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

> ---
> v4:
> Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)
> 
> v3:
> Split from the patchset:
> http://patchwork.dpdk.org/cover/68159/
> 
>  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  3 files changed, 11 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index
> 79eb8f8..40e0239 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp,
> void *opaque_arg,
> 
>  	memset(_m, 0, sizeof(*buf));
>  	buf->mp = mp;
> -	rte_atomic16_set(&buf->refcnt, 1);
> +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>  	for (j = 0; j != strd_n; ++j) {
>  		shinfo = &buf->shinfos[j];
>  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> 1b71e94..549477b 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> __rte_unused, void *opaque)  {
>  	struct mlx5_mprq_buf *buf = opaque;
> 
> -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
>  		rte_mempool_put(buf->mp, buf);
> -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> -		rte_atomic16_set(&buf->refcnt, 1);
> +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> +					       __ATOMIC_RELAXED) == 0)) {
> +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>  		rte_mempool_put(buf->mp, buf);
>  	}
>  }
> @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
> 
>  		if (consumed_strd == strd_n) {
>  			/* Replace WQE only if the buffer is still in use. */
> -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> +			if (__atomic_load_n(&buf->refcnt,
> +					    __ATOMIC_RELAXED) > 1) {
>  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> strd_n);
>  				/* Release the old buffer. */
>  				mlx5_mprq_buf_free(buf);
> @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
>  			void *buf_addr;
> 
>  			/* Increment the refcnt of the whole chunk. */
> -			rte_atomic16_add_return(&buf->refcnt, 1);
> -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> >refcnt) <=
> -				    strd_n + 1);
> +			__atomic_add_fetch(&buf->refcnt, 1,
> __ATOMIC_RELAXED);
> +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> +				    __ATOMIC_RELAXED) <= strd_n + 1);
>  			buf_addr = RTE_PTR_SUB(addr,
> RTE_PKTMBUF_HEADROOM);
>  			/*
>  			 * MLX5 device doesn't use iova but it is necessary in
> a diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index c02a007..467f31d 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -68,7 +68,7 @@ struct rxq_zip {
>  /* Multi-Packet RQ buffer header. */
>  struct mlx5_mprq_buf {
>  	struct rte_mempool *mp;
> -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> +	uint16_t refcnt; /* Atomically accessed refcnt. */
>  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> packet. */
>  	struct rte_mbuf_ext_shared_info shinfos[];
>  	/*
> --
> 2.7.4
  
Alexander Kozyrev Sept. 10, 2020, 1:36 a.m. UTC | #2
> <snip>
> 
> >
> > Use c11 atomics with RELAXED ordering instead of the rte_atomic ops
> > which enforce unnecessary barriers on aarch64.
> >
> > Signed-off-by: Phil Yang <phil.yang@arm.com>
> Looks good.
> 
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

Acked-by: Alexander Kozyrev <akozyrev@nvidia.com>

> 
> > ---
> > v4:
> > Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)
> >
> > v3:
> > Split from the patchset:
> > https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> >
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Cakozyrev%40nv
> idia.
> >
> com%7Cf16ba4e8cfb145f5d82008d85529348e%7C43083d15727340c1b7db39ef
> d9ccc
> >
> 17a%7C0%7C0%7C637352982762038088&amp;sdata=0HzTxbzh0Dqk0hZ5PIgEV
> zieyV%
> > 2BnLTivsVIFFxXFAtI%3D&amp;reserved=0
> >
> >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> >  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> >  3 files changed, 11 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> > index
> > 79eb8f8..40e0239 100644
> > --- a/drivers/net/mlx5/mlx5_rxq.c
> > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void
> > *opaque_arg,
> >
> >  	memset(_m, 0, sizeof(*buf));
> >  	buf->mp = mp;
> > -	rte_atomic16_set(&buf->refcnt, 1);
> > +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> >  	for (j = 0; j != strd_n; ++j) {
> >  		shinfo = &buf->shinfos[j];
> >  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> > 1b71e94..549477b 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> __rte_unused,
> > void *opaque)  {
> >  	struct mlx5_mprq_buf *buf = opaque;
> >
> > -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> > +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> >  		rte_mempool_put(buf->mp, buf);
> > -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > -		rte_atomic16_set(&buf->refcnt, 1);
> > +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > +					       __ATOMIC_RELAXED) == 0)) {
> > +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> >  		rte_mempool_put(buf->mp, buf);
> >  	}
> >  }
> > @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > rte_mbuf **pkts, uint16_t pkts_n)
> >
> >  		if (consumed_strd == strd_n) {
> >  			/* Replace WQE only if the buffer is still in use. */
> > -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> > +			if (__atomic_load_n(&buf->refcnt,
> > +					    __ATOMIC_RELAXED) > 1) {
> >  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> strd_n);
> >  				/* Release the old buffer. */
> >  				mlx5_mprq_buf_free(buf);
> > @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > rte_mbuf **pkts, uint16_t pkts_n)
> >  			void *buf_addr;
> >
> >  			/* Increment the refcnt of the whole chunk. */
> > -			rte_atomic16_add_return(&buf->refcnt, 1);
> > -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > >refcnt) <=
> > -				    strd_n + 1);
> > +			__atomic_add_fetch(&buf->refcnt, 1,
> > __ATOMIC_RELAXED);
> > +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> > +				    __ATOMIC_RELAXED) <= strd_n + 1);
> >  			buf_addr = RTE_PTR_SUB(addr,
> > RTE_PKTMBUF_HEADROOM);
> >  			/*
> >  			 * MLX5 device doesn't use iova but it is necessary in a
> diff
> > --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> > index c02a007..467f31d 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > @@ -68,7 +68,7 @@ struct rxq_zip {
> >  /* Multi-Packet RQ buffer header. */
> >  struct mlx5_mprq_buf {
> >  	struct rte_mempool *mp;
> > -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > +	uint16_t refcnt; /* Atomically accessed refcnt. */
> >  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> packet.
> > */
> >  	struct rte_mbuf_ext_shared_info shinfos[];
> >  	/*
> > --
> > 2.7.4
  
Phil Yang Sept. 29, 2020, 3:22 p.m. UTC | #3
Hi Raslan,

It seems that there are no more comments for this patch.
So shall we proceed further?

Thanks,
Phil Yang

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Thursday, September 10, 2020 9:37 AM
> To: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; Phil Yang
> <Phil.Yang@arm.com>; akozyrev@mellanox.com; rasland@mellanox.com;
> dev@dpdk.org
> Cc: Phil Yang <Phil.Yang@arm.com>; matan@mellanox.com; Shahaf Shuler
> <shahafs@mellanox.com>; viacheslavo@mellanox.com; nd <nd@arm.com>;
> nd <nd@arm.com>
> Subject: RE: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ
> buffer refcnt
> 
> > <snip>
> >
> > >
> > > Use c11 atomics with RELAXED ordering instead of the rte_atomic ops
> > > which enforce unnecessary barriers on aarch64.
> > >
> > > Signed-off-by: Phil Yang <phil.yang@arm.com>
> > Looks good.
> >
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> 
> Acked-by: Alexander Kozyrev <akozyrev@nvidia.com>
> 
> >
> > > ---
> > > v4:
> > > Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)
> > >
> > > v3:
> > > Split from the patchset:
> > >
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> > >
> >
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Cakozyrev%40
> nv
> > idia.
> > >
> >
> com%7Cf16ba4e8cfb145f5d82008d85529348e%7C43083d15727340c1b7db39e
> f
> > d9ccc
> > >
> >
> 17a%7C0%7C0%7C637352982762038088&amp;sdata=0HzTxbzh0Dqk0hZ5PIgE
> V
> > zieyV%
> > > 2BnLTivsVIFFxXFAtI%3D&amp;reserved=0
> > >
> > >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> > >  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> > > index
> > > 79eb8f8..40e0239 100644
> > > --- a/drivers/net/mlx5/mlx5_rxq.c
> > > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > > @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp,
> void
> > > *opaque_arg,
> > >
> > >  	memset(_m, 0, sizeof(*buf));
> > >  	buf->mp = mp;
> > > -	rte_atomic16_set(&buf->refcnt, 1);
> > > +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > >  	for (j = 0; j != strd_n; ++j) {
> > >  		shinfo = &buf->shinfos[j];
> > >  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> > > 1b71e94..549477b 100644
> > > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > > @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> > __rte_unused,
> > > void *opaque)  {
> > >  	struct mlx5_mprq_buf *buf = opaque;
> > >
> > > -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> > > +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> > >  		rte_mempool_put(buf->mp, buf);
> > > -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > > -		rte_atomic16_set(&buf->refcnt, 1);
> > > +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > > +					       __ATOMIC_RELAXED) == 0)) {
> > > +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > >  		rte_mempool_put(buf->mp, buf);
> > >  	}
> > >  }
> > > @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > rte_mbuf **pkts, uint16_t pkts_n)
> > >
> > >  		if (consumed_strd == strd_n) {
> > >  			/* Replace WQE only if the buffer is still in use. */
> > > -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> > > +			if (__atomic_load_n(&buf->refcnt,
> > > +					    __ATOMIC_RELAXED) > 1) {
> > >  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> > strd_n);
> > >  				/* Release the old buffer. */
> > >  				mlx5_mprq_buf_free(buf);
> > > @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > rte_mbuf **pkts, uint16_t pkts_n)
> > >  			void *buf_addr;
> > >
> > >  			/* Increment the refcnt of the whole chunk. */
> > > -			rte_atomic16_add_return(&buf->refcnt, 1);
> > > -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > > >refcnt) <=
> > > -				    strd_n + 1);
> > > +			__atomic_add_fetch(&buf->refcnt, 1,
> > > __ATOMIC_RELAXED);
> > > +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> > > +				    __ATOMIC_RELAXED) <= strd_n + 1);
> > >  			buf_addr = RTE_PTR_SUB(addr,
> > > RTE_PKTMBUF_HEADROOM);
> > >  			/*
> > >  			 * MLX5 device doesn't use iova but it is necessary in
> a
> > diff
> > > --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> > > index c02a007..467f31d 100644
> > > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > > @@ -68,7 +68,7 @@ struct rxq_zip {
> > >  /* Multi-Packet RQ buffer header. */
> > >  struct mlx5_mprq_buf {
> > >  	struct rte_mempool *mp;
> > > -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > > +	uint16_t refcnt; /* Atomically accessed refcnt. */
> > >  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> > packet.
> > > */
> > >  	struct rte_mbuf_ext_shared_info shinfos[];
> > >  	/*
> > > --
> > > 2.7.4
  
Slava Ovsiienko Sept. 30, 2020, 12:44 p.m. UTC | #4
Looks good to me, and we've checked the performance has no impact.
Thank you.

Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Phil Yang
> Sent: Tuesday, September 29, 2020 18:23
> To: Raslan Darawsheh <rasland@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Cc: nd <nd@arm.com>; Alexander Kozyrev <akozyrev@nvidia.com>;
> Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; dev@dpdk.org;
> nd <nd@arm.com>
> Subject: Re: [dpdk-dev] [PATCH v4] net/mlx5: relaxed ordering for multi-
> packet RQ buffer refcnt
> 
> Hi Raslan,
> 
> It seems that there are no more comments for this patch.
> So shall we proceed further?
> 
> Thanks,
> Phil Yang
> 
> > -----Original Message-----
> > From: Alexander Kozyrev <akozyrev@nvidia.com>
> > Sent: Thursday, September 10, 2020 9:37 AM
> > To: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; Phil Yang
> > <Phil.Yang@arm.com>; akozyrev@mellanox.com; rasland@mellanox.com;
> > dev@dpdk.org
> > Cc: Phil Yang <Phil.Yang@arm.com>; matan@mellanox.com; Shahaf Shuler
> > <shahafs@mellanox.com>; viacheslavo@mellanox.com; nd
> <nd@arm.com>; nd
> > <nd@arm.com>
> > Subject: RE: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ
> > buffer refcnt
> >
> > > <snip>
> > >
> > > >
> > > > Use c11 atomics with RELAXED ordering instead of the rte_atomic
> > > > ops which enforce unnecessary barriers on aarch64.
> > > >
> > > > Signed-off-by: Phil Yang <phil.yang@arm.com>
> > > Looks good.
> > >
> > > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> >
> > Acked-by: Alexander Kozyrev <akozyrev@nvidia.com>
> >
> > >
> > > > ---
> > > > v4:
> > > > Remove the unnecessary ACQUIRE barrier in rx burst path.
> > > > (Honnappa)
> > > >
> > > > v3:
> > > > Split from the patchset:
> > > >
> >
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> > > >
> > >
> >
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Cakozyrev%4
> 0
> > nv
> > > idia.
> > > >
> > >
> >
> com%7Cf16ba4e8cfb145f5d82008d85529348e%7C43083d15727340c1b7db3
> 9e
> > f
> > > d9ccc
> > > >
> > >
> >
> 17a%7C0%7C0%7C637352982762038088&amp;sdata=0HzTxbzh0Dqk0hZ5PI
> gE
> > V
> > > zieyV%
> > > > 2BnLTivsVIFFxXFAtI%3D&amp;reserved=0
> > > >
> > > >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> > > > drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > > > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> > > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/drivers/net/mlx5/mlx5_rxq.c
> > > > b/drivers/net/mlx5/mlx5_rxq.c index
> > > > 79eb8f8..40e0239 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxq.c
> > > > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > > > @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool
> *mp,
> > void
> > > > *opaque_arg,
> > > >
> > > >  	memset(_m, 0, sizeof(*buf));
> > > >  	buf->mp = mp;
> > > > -	rte_atomic16_set(&buf->refcnt, 1);
> > > > +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > > >  	for (j = 0; j != strd_n; ++j) {
> > > >  		shinfo = &buf->shinfos[j];
> > > >  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > > > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> > > > index 1b71e94..549477b 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > > > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > > > @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> > > __rte_unused,
> > > > void *opaque)  {
> > > >  	struct mlx5_mprq_buf *buf = opaque;
> > > >
> > > > -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> > > > +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> > > >  		rte_mempool_put(buf->mp, buf);
> > > > -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > > > -		rte_atomic16_set(&buf->refcnt, 1);
> > > > +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > > > +					       __ATOMIC_RELAXED) == 0)) {
> > > > +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > > >  		rte_mempool_put(buf->mp, buf);
> > > >  	}
> > > >  }
> > > > @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > >
> > > >  		if (consumed_strd == strd_n) {
> > > >  			/* Replace WQE only if the buffer is still in use. */
> > > > -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> > > > +			if (__atomic_load_n(&buf->refcnt,
> > > > +					    __ATOMIC_RELAXED) > 1) {
> > > >  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> > > strd_n);
> > > >  				/* Release the old buffer. */
> > > >  				mlx5_mprq_buf_free(buf);
> > > > @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > >  			void *buf_addr;
> > > >
> > > >  			/* Increment the refcnt of the whole chunk. */
> > > > -			rte_atomic16_add_return(&buf->refcnt, 1);
> > > > -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > > > >refcnt) <=
> > > > -				    strd_n + 1);
> > > > +			__atomic_add_fetch(&buf->refcnt, 1,
> > > > __ATOMIC_RELAXED);
> > > > +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> > > > +				    __ATOMIC_RELAXED) <= strd_n + 1);
> > > >  			buf_addr = RTE_PTR_SUB(addr,
> > > > RTE_PKTMBUF_HEADROOM);
> > > >  			/*
> > > >  			 * MLX5 device doesn't use iova but it is necessary in
> > a
> > > diff
> > > > --git a/drivers/net/mlx5/mlx5_rxtx.h
> > > > b/drivers/net/mlx5/mlx5_rxtx.h index c02a007..467f31d 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > > > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > > > @@ -68,7 +68,7 @@ struct rxq_zip {
> > > >  /* Multi-Packet RQ buffer header. */  struct mlx5_mprq_buf {
> > > >  	struct rte_mempool *mp;
> > > > -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > > > +	uint16_t refcnt; /* Atomically accessed refcnt. */
> > > >  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> > > packet.
> > > > */
> > > >  	struct rte_mbuf_ext_shared_info shinfos[];
> > > >  	/*
> > > > --
> > > > 2.7.4
  
Raslan Darawsheh Sept. 30, 2020, 12:52 p.m. UTC | #5
Sure, I'll take the patch since I was waiting for performance impact testing, and since Slava just confirmed it, 
Then we can take it yes thank you for your contribution. 

Kindest regards,
Raslan Darawsheh

> -----Original Message-----
> From: Slava Ovsiienko <viacheslavo@nvidia.com>
> Sent: Wednesday, September 30, 2020 3:45 PM
> To: Phil Yang <Phil.Yang@arm.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Matan Azrad <matan@nvidia.com>; Shahaf Shuler
> <shahafs@nvidia.com>
> Cc: nd <nd@arm.com>; Alexander Kozyrev <akozyrev@nvidia.com>;
> Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; dev@dpdk.org;
> nd <nd@arm.com>
> Subject: RE: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ
> buffer refcnt
> 
> Looks good to me, and we've checked the performance has no impact.
> Thank you.
> 
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Phil Yang
> > Sent: Tuesday, September 29, 2020 18:23
> > To: Raslan Darawsheh <rasland@nvidia.com>; Matan Azrad
> > <matan@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> > Cc: nd <nd@arm.com>; Alexander Kozyrev <akozyrev@nvidia.com>;
> > Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; dev@dpdk.org;
> > nd <nd@arm.com>
> > Subject: Re: [dpdk-dev] [PATCH v4] net/mlx5: relaxed ordering for multi-
> > packet RQ buffer refcnt
> >
> > Hi Raslan,
> >
> > It seems that there are no more comments for this patch.
> > So shall we proceed further?
> >
> > Thanks,
> > Phil Yang
> >
> > > -----Original Message-----
> > > From: Alexander Kozyrev <akozyrev@nvidia.com>
> > > Sent: Thursday, September 10, 2020 9:37 AM
> > > To: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; Phil Yang
> > > <Phil.Yang@arm.com>; akozyrev@mellanox.com;
> rasland@mellanox.com;
> > > dev@dpdk.org
> > > Cc: Phil Yang <Phil.Yang@arm.com>; matan@mellanox.com; Shahaf
> Shuler
> > > <shahafs@mellanox.com>; viacheslavo@mellanox.com; nd
> > <nd@arm.com>; nd
> > > <nd@arm.com>
> > > Subject: RE: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ
> > > buffer refcnt
> > >
> > > > <snip>
> > > >
> > > > >
> > > > > Use c11 atomics with RELAXED ordering instead of the rte_atomic
> > > > > ops which enforce unnecessary barriers on aarch64.
> > > > >
> > > > > Signed-off-by: Phil Yang <phil.yang@arm.com>
> > > > Looks good.
> > > >
> > > > Reviewed-by: Honnappa Nagarahalli
> <honnappa.nagarahalli@arm.com>
> > >
> > > Acked-by: Alexander Kozyrev <akozyrev@nvidia.com>
> > >
> > > >
> > > > > ---
> > > > > v4:
> > > > > Remove the unnecessary ACQUIRE barrier in rx burst path.
> > > > > (Honnappa)
> > > > >
> > > > > v3:
> > > > > Split from the patchset:
> > > > >
> > >
> >
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> > > > >
> > > >
> > >
> >
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Cakozyrev%4
> > 0
> > > nv
> > > > idia.
> > > > >
> > > >
> > >
> > com%7Cf16ba4e8cfb145f5d82008d85529348e%7C43083d15727340c1b7db3
> > 9e
> > > f
> > > > d9ccc
> > > > >
> > > >
> > >
> > 17a%7C0%7C0%7C637352982762038088&amp;sdata=0HzTxbzh0Dqk0hZ5PI
> > gE
> > > V
> > > > zieyV%
> > > > > 2BnLTivsVIFFxXFAtI%3D&amp;reserved=0
> > > > >
> > > > >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> > > > > drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > > > > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> > > > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > > > >
> > > > > diff --git a/drivers/net/mlx5/mlx5_rxq.c
> > > > > b/drivers/net/mlx5/mlx5_rxq.c index
> > > > > 79eb8f8..40e0239 100644
> > > > > --- a/drivers/net/mlx5/mlx5_rxq.c
> > > > > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > > > > @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool
> > *mp,
> > > void
> > > > > *opaque_arg,
> > > > >
> > > > >  	memset(_m, 0, sizeof(*buf));
> > > > >  	buf->mp = mp;
> > > > > -	rte_atomic16_set(&buf->refcnt, 1);
> > > > > +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > > > >  	for (j = 0; j != strd_n; ++j) {
> > > > >  		shinfo = &buf->shinfos[j];
> > > > >  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > > > > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> > > > > index 1b71e94..549477b 100644
> > > > > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > > > > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > > > > @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> > > > __rte_unused,
> > > > > void *opaque)  {
> > > > >  	struct mlx5_mprq_buf *buf = opaque;
> > > > >
> > > > > -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> > > > > +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) ==
> 1) {
> > > > >  		rte_mempool_put(buf->mp, buf);
> > > > > -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > > > > -		rte_atomic16_set(&buf->refcnt, 1);
> > > > > +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > > > > +					       __ATOMIC_RELAXED) ==
> 0)) {
> > > > > +		__atomic_store_n(&buf->refcnt, 1,
> __ATOMIC_RELAXED);
> > > > >  		rte_mempool_put(buf->mp, buf);
> > > > >  	}
> > > > >  }
> > > > > @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq,
> struct
> > > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > > >
> > > > >  		if (consumed_strd == strd_n) {
> > > > >  			/* Replace WQE only if the buffer is still in
> use. */
> > > > > -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> > > > > +			if (__atomic_load_n(&buf->refcnt,
> > > > > +					    __ATOMIC_RELAXED) > 1) {
> > > > >  				mprq_buf_replace(rxq, rq_ci &
> wq_mask,
> > > > strd_n);
> > > > >  				/* Release the old buffer. */
> > > > >  				mlx5_mprq_buf_free(buf);
> > > > > @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq,
> struct
> > > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > > >  			void *buf_addr;
> > > > >
> > > > >  			/* Increment the refcnt of the whole chunk.
> */
> > > > > -			rte_atomic16_add_return(&buf->refcnt, 1);
> > > > > -
> 	MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > > > > >refcnt) <=
> > > > > -				    strd_n + 1);
> > > > > +			__atomic_add_fetch(&buf->refcnt, 1,
> > > > > __ATOMIC_RELAXED);
> > > > > +			MLX5_ASSERT(__atomic_load_n(&buf-
> >refcnt,
> > > > > +				    __ATOMIC_RELAXED) <= strd_n +
> 1);
> > > > >  			buf_addr = RTE_PTR_SUB(addr,
> > > > > RTE_PKTMBUF_HEADROOM);
> > > > >  			/*
> > > > >  			 * MLX5 device doesn't use iova but it is
> necessary in
> > > a
> > > > diff
> > > > > --git a/drivers/net/mlx5/mlx5_rxtx.h
> > > > > b/drivers/net/mlx5/mlx5_rxtx.h index c02a007..467f31d 100644
> > > > > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > > > > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > > > > @@ -68,7 +68,7 @@ struct rxq_zip {
> > > > >  /* Multi-Packet RQ buffer header. */  struct mlx5_mprq_buf {
> > > > >  	struct rte_mempool *mp;
> > > > > -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > > > > +	uint16_t refcnt; /* Atomically accessed refcnt. */
> > > > >  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for
> the first
> > > > packet.
> > > > > */
> > > > >  	struct rte_mbuf_ext_shared_info shinfos[];
> > > > >  	/*
> > > > > --
> > > > > 2.7.4
  
Raslan Darawsheh Sept. 30, 2020, 1:57 p.m. UTC | #6
Hi,

> -----Original Message-----
> From: Phil Yang <phil.yang@arm.com>
> Sent: Thursday, September 3, 2020 5:53 AM
> To: akozyrev@mellanox.com; rasland@mellanox.com; dev@dpdk.org
> Cc: Honnappa.Nagarahalli@arm.com; Phil.Yang@arm.com;
> matan@mellanox.com; Shahaf Shuler <shahafs@mellanox.com>;
> viacheslavo@mellanox.com; nd@arm.com
> Subject: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ buffer
> refcnt
> 
> Use c11 atomics with RELAXED ordering instead of the rte_atomic ops
> which enforce unnecessary barriers on aarch64.
> 
> Signed-off-by: Phil Yang <phil.yang@arm.com>
> ---
> v4:
> Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)
> 
> v3:
> Split from the patchset:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Crasland%40n
> vidia.com%7Ca89037e982b84575f58808d84fb493d9%7C43083d15727340c1b7d
> b39efd9ccc17a%7C0%7C0%7C637346984280298342&amp;sdata=hjWErvvV%2
> Fg%2Bpb%2FMMzepHuRK0Weftf%2BFC2NNWsyoWSyo%3D&amp;reserved
> =0
> 
>  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
>  drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  3 files changed, 11 insertions(+), 9 deletions(-)
> 

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 79eb8f8..40e0239 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2012,7 +2012,7 @@  mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
 
 	memset(_m, 0, sizeof(*buf));
 	buf->mp = mp;
-	rte_atomic16_set(&buf->refcnt, 1);
+	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
 	for (j = 0; j != strd_n; ++j) {
 		shinfo = &buf->shinfos[j];
 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1b71e94..549477b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1626,10 +1626,11 @@  mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
 {
 	struct mlx5_mprq_buf *buf = opaque;
 
-	if (rte_atomic16_read(&buf->refcnt) == 1) {
+	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
 		rte_mempool_put(buf->mp, buf);
-	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
-		rte_atomic16_set(&buf->refcnt, 1);
+	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
+					       __ATOMIC_RELAXED) == 0)) {
+		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
 		rte_mempool_put(buf->mp, buf);
 	}
 }
@@ -1709,7 +1710,8 @@  mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 
 		if (consumed_strd == strd_n) {
 			/* Replace WQE only if the buffer is still in use. */
-			if (rte_atomic16_read(&buf->refcnt) > 1) {
+			if (__atomic_load_n(&buf->refcnt,
+					    __ATOMIC_RELAXED) > 1) {
 				mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
 				/* Release the old buffer. */
 				mlx5_mprq_buf_free(buf);
@@ -1821,9 +1823,9 @@  mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			void *buf_addr;
 
 			/* Increment the refcnt of the whole chunk. */
-			rte_atomic16_add_return(&buf->refcnt, 1);
-			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
-				    strd_n + 1);
+			__atomic_add_fetch(&buf->refcnt, 1, __ATOMIC_RELAXED);
+			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
+				    __ATOMIC_RELAXED) <= strd_n + 1);
 			buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
 			/*
 			 * MLX5 device doesn't use iova but it is necessary in a
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index c02a007..467f31d 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -68,7 +68,7 @@  struct rxq_zip {
 /* Multi-Packet RQ buffer header. */
 struct mlx5_mprq_buf {
 	struct rte_mempool *mp;
-	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
+	uint16_t refcnt; /* Atomically accessed refcnt. */
 	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
 	struct rte_mbuf_ext_shared_info shinfos[];
 	/*