[v3,32/34] net/mlx5: make tunnel hub list thread safe

Message ID 1603801650-442376-33-git-send-email-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: support multiple-thread flow operations |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Oct. 27, 2020, 12:27 p.m. UTC
  This commit uses spinlock to protect the tunnel hub list in multiple
thread.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 20 +++++++++++++++++---
 drivers/net/mlx5/mlx5_flow.h |  1 +
 2 files changed, 18 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5483f75..87446f7 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -669,10 +669,14 @@  enum mlx5_expansion {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items)
+		if (&tun->item == pmd_items) {
+			LIST_REMOVE(tun, chain);
 			break;
+		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (!tun || num_items != 1)
 		return rte_flow_error_set(err, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -690,10 +694,14 @@  enum mlx5_expansion {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions)
+		if (&tun->action == pmd_actions) {
+			LIST_REMOVE(tun, chain);
 			break;
+		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (!tun || num_actions != 1)
 		return rte_flow_error_set(err, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -5871,8 +5879,12 @@  struct rte_flow *
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
+
+		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
+		LIST_REMOVE(tunnel, chain);
+		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7931,7 +7943,6 @@  struct mlx5_meter_domains_infos *
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
 	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	LIST_REMOVE(tunnel, chain);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
 			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
@@ -8020,6 +8031,7 @@  struct mlx5_meter_domains_infos *
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
 		if (!memcmp(app_tunnel, &tun->app_tunnel,
 			    sizeof(*app_tunnel))) {
@@ -8037,6 +8049,7 @@  struct mlx5_meter_domains_infos *
 			ret = -ENOMEM;
 		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (tun)
 		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
 
@@ -8065,6 +8078,7 @@  int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	if (!thub)
 		return -ENOMEM;
 	LIST_INIT(&thub->tunnels);
+	rte_spinlock_init(&thub->sl);
 	thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
 					 0, mlx5_flow_tunnel_grp2tbl_create_cb,
 					 NULL,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2de8988..c15f5e7 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -946,6 +946,7 @@  struct mlx5_flow_tunnel {
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
+	rte_spinlock_t sl;			/* Tunnel list spinlock. */
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };