From: Xueming Li <xuemingl@nvidia.com>
To support multi-thread flow operations, this patch introduces list lock
for the rte_flow list manages all the rte_flow handlers.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 1 +
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow.c | 10 ++++++++--
3 files changed, 10 insertions(+), 2 deletions(-)
@@ -1250,6 +1250,7 @@
MLX5_MAX_MAC_ADDRESSES);
priv->flows = 0;
priv->ctrl_flows = 0;
+ rte_spinlock_init(&priv->flow_list_lock);
TAILQ_INIT(&priv->flow_meters);
TAILQ_INIT(&priv->flow_meter_profiles);
/* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -784,6 +784,7 @@ struct mlx5_priv {
struct mlx5_drop drop_queue; /* Flow drop queues. */
uint32_t flows; /* RTE Flow rules. */
uint32_t ctrl_flows; /* Control flow rules. */
+ rte_spinlock_t flow_list_lock;
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
@@ -4479,9 +4479,12 @@ struct mlx5_flow_tunnel_info {
if (ret < 0)
goto error;
}
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_rxq_flags_set(dev, flow);
/* Nested flow creation index recovery. */
wks->flow_idx = wks->flow_nested_idx;
@@ -4637,9 +4640,12 @@ struct rte_flow *
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
flow_drv_destroy(dev, flow);
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
flow_idx, flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_mreg_del_copy_action(dev, flow);
if (flow->fdir) {
LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {