[1/2] lib/ethdev: replace full barrier with relaxed barrier
Checks
Commit Message
From: Phil Yang <phil.yang@arm.com>
While registering the call back functions full write barrier
can be replaced with one-way write barrier.
Signed-off-by: Phil Yang <phil.yang@arm.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
lib/librte_ethdev/rte_ethdev.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
Comments
On 10/2/2020 1:07 AM, Honnappa Nagarahalli wrote:
> From: Phil Yang <phil.yang@arm.com>
>
> While registering the call back functions full write barrier
> can be replaced with one-way write barrier.
>
> Signed-off-by: Phil Yang <phil.yang@arm.com>
> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
+Konstantin & Jerin,
Can you please help reviewing this patch?
> ---
> lib/librte_ethdev/rte_ethdev.c | 9 ++++++---
> 1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index 7858ad5f1..59a41c07f 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -26,7 +26,6 @@
> #include <rte_eal.h>
> #include <rte_per_lcore.h>
> #include <rte_lcore.h>
> -#include <rte_atomic.h>
> #include <rte_branch_prediction.h>
> #include <rte_common.h>
> #include <rte_mempool.h>
> @@ -4527,8 +4526,12 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
> rte_spinlock_lock(&rte_eth_rx_cb_lock);
> /* Add the callbacks at first position */
> cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
> - rte_smp_wmb();
> - rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
> + /* Stores to cb->fn, cb->param and cb->next should complete before
> + * cb is visible to data plane threads.
> + */
> + __atomic_store_n(
> + &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
> + cb, __ATOMIC_RELEASE);
> rte_spinlock_unlock(&rte_eth_rx_cb_lock);
>
> return cb;
>
> From: Phil Yang <phil.yang@arm.com>
>
> While registering the call back functions full write barrier
> can be replaced with one-way write barrier.
>
> Signed-off-by: Phil Yang <phil.yang@arm.com>
> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
> lib/librte_ethdev/rte_ethdev.c | 9 ++++++---
> 1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
> index 7858ad5f1..59a41c07f 100644
> --- a/lib/librte_ethdev/rte_ethdev.c
> +++ b/lib/librte_ethdev/rte_ethdev.c
> @@ -26,7 +26,6 @@
> #include <rte_eal.h>
> #include <rte_per_lcore.h>
> #include <rte_lcore.h>
> -#include <rte_atomic.h>
> #include <rte_branch_prediction.h>
> #include <rte_common.h>
> #include <rte_mempool.h>
> @@ -4527,8 +4526,12 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
> rte_spinlock_lock(&rte_eth_rx_cb_lock);
> /* Add the callbacks at first position */
> cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
> - rte_smp_wmb();
> - rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
> + /* Stores to cb->fn, cb->param and cb->next should complete before
> + * cb is visible to data plane threads.
> + */
> + __atomic_store_n(
> + &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
> + cb, __ATOMIC_RELEASE);
> rte_spinlock_unlock(&rte_eth_rx_cb_lock);
>
> return cb;
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> 2.17.1
@@ -26,7 +26,6 @@
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_common.h>
#include <rte_mempool.h>
@@ -4527,8 +4526,12 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_spinlock_lock(&rte_eth_rx_cb_lock);
/* Add the callbacks at first position */
cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
- rte_smp_wmb();
- rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+ /* Stores to cb->fn, cb->param and cb->next should complete before
+ * cb is visible to data plane threads.
+ */
+ __atomic_store_n(
+ &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
+ cb, __ATOMIC_RELEASE);
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
return cb;