This commit uses spinlock to protect the shared action list in multiple
thread.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 1 +
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow_dv.c | 5 +++++
3 files changed, 7 insertions(+)
@@ -1534,6 +1534,7 @@
}
priv->mreg_cp_tbl->ctx = eth_dev;
}
+ rte_spinlock_init(&priv->shared_act_sl);
mlx5_flow_counter_mode_config(eth_dev);
return eth_dev;
error:
@@ -912,6 +912,7 @@ struct mlx5_priv {
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
+ rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions;
/* shared actions */
};
@@ -11189,7 +11189,9 @@ struct mlx5_cache_entry *
if (shared_action) {
__atomic_add_fetch(&shared_action->refcnt, 1,
__ATOMIC_RELAXED);
+ rte_spinlock_lock(&priv->shared_act_sl);
LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
}
return shared_action;
}
@@ -11216,6 +11218,7 @@ struct mlx5_cache_entry *
struct rte_flow_shared_action *action,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
switch (action->type) {
@@ -11230,7 +11233,9 @@ struct mlx5_cache_entry *
}
if (ret)
return ret;
+ rte_spinlock_lock(&priv->shared_act_sl);
LIST_REMOVE(action, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
rte_free(action);
return 0;
}