[v5,06/34] net/mlx5: make rte flow list thread safe

Message ID 1603875616-272798-7-git-send-email-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: support multiple-thread flow operations |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Oct. 28, 2020, 8:59 a.m. UTC
  From: Xueming Li <xuemingl@nvidia.com>

To support multi-thread flow operations, this patch introduces list lock
for the rte_flow list manages all the rte_flow handlers.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c |  1 +
 drivers/net/mlx5/mlx5.h          |  1 +
 drivers/net/mlx5/mlx5_flow.c     | 10 ++++++++--
 3 files changed, 10 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 0b59e74..a579dde 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1358,6 +1358,7 @@ 
 				      MLX5_MAX_MAC_ADDRESSES);
 	priv->flows = 0;
 	priv->ctrl_flows = 0;
+	rte_spinlock_init(&priv->flow_list_lock);
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
 	/* Hint libmlx5 to use PMD allocator for data plane resources */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5bda233..9ab2976 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -856,6 +856,7 @@  struct mlx5_priv {
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
 	uint32_t flows; /* RTE Flow rules. */
 	uint32_t ctrl_flows; /* Control flow rules. */
+	rte_spinlock_t flow_list_lock;
 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ed2acd1..cc31801 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5774,9 +5774,12 @@  struct tunnel_default_miss_ctx {
 		if (ret < 0)
 			goto error;
 	}
-	if (list)
+	if (list) {
+		rte_spinlock_lock(&priv->flow_list_lock);
 		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
 			     flow, next);
+		rte_spinlock_unlock(&priv->flow_list_lock);
+	}
 	flow_rxq_flags_set(dev, flow);
 	rte_free(translated_actions);
 	/* Nested flow creation index recovery. */
@@ -5957,9 +5960,12 @@  struct rte_flow *
 	if (dev->data->dev_started)
 		flow_rxq_flags_trim(dev, flow);
 	flow_drv_destroy(dev, flow);
-	if (list)
+	if (list) {
+		rte_spinlock_lock(&priv->flow_list_lock);
 		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
 			     flow_idx, flow, next);
+		rte_spinlock_unlock(&priv->flow_list_lock);
+	}
 	flow_mreg_del_copy_action(dev, flow);
 	if (flow->fdir) {
 		LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {