[v4,09/14] net/mlx5: add flow flush function

Message ID 20220224134051.18167-10-suanmingm@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: add hardware steering |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Feb. 24, 2022, 1:40 p.m. UTC
  In case port is being stopped, all created flows should be flushed.
This commit adds the flow flush helper function.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |   8 ++
 drivers/net/mlx5/mlx5_flow_hw.c | 129 ++++++++++++++++++++++++++++++++
 2 files changed, 137 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1702d60fc0..5af71585fc 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7024,6 +7024,14 @@  mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 	uint32_t num_flushed = 0, fidx = 1;
 	struct rte_flow *flow;
 
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	if (priv->sh->config.dv_flow_en == 2 &&
+	    type == MLX5_FLOW_TYPE_GEN) {
+		flow_hw_q_flow_flush(dev, NULL);
+		return;
+	}
+#endif
+
 	MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
 		flow_list_destroy(dev, type, fidx);
 		num_flushed++;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f0cb530524..74f8ee1d6a 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -13,6 +13,12 @@ 
 /* The maximum actions support in the flow. */
 #define MLX5_HW_MAX_ACTS 16
 
+/* Default push burst threshold. */
+#define BURST_THR 32u
+
+/* Default queue to flush the flows. */
+#define MLX5_DEFAULT_FLUSH_QUEUE 0
+
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
 
 /* DR action flags with different table. */
@@ -391,6 +397,129 @@  flow_hw_push(struct rte_eth_dev *dev,
 	return 0;
 }
 
+/**
+ * Drain the enqueued flows' completion.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ *   The queue to pull the flow.
+ * @param[in] pending_rules
+ *   The pending flow number.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static int
+__flow_hw_pull_comp(struct rte_eth_dev *dev,
+		    uint32_t queue,
+		    uint32_t pending_rules,
+		    struct rte_flow_error *error)
+{
+	struct rte_flow_op_result comp[BURST_THR];
+	int ret, i, empty_loop = 0;
+
+	flow_hw_push(dev, queue, error);
+	while (pending_rules) {
+		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
+		if (ret < 0)
+			return -1;
+		if (!ret) {
+			rte_delay_us_sleep(20000);
+			if (++empty_loop > 5) {
+				DRV_LOG(WARNING, "No available dequeue, quit.");
+				break;
+			}
+			continue;
+		}
+		for (i = 0; i < ret; i++) {
+			if (comp[i].status == RTE_FLOW_OP_ERROR)
+				DRV_LOG(WARNING, "Flow flush get error CQE.");
+		}
+		if ((uint32_t)ret > pending_rules) {
+			DRV_LOG(WARNING, "Flow flush get extra CQE.");
+			return rte_flow_error_set(error, ERANGE,
+					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					"get extra CQE");
+		}
+		pending_rules -= ret;
+		empty_loop = 0;
+	}
+	return 0;
+}
+
+/**
+ * Flush created flows.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+int
+flow_hw_q_flow_flush(struct rte_eth_dev *dev,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hw_q *hw_q;
+	struct rte_flow_template_table *tbl;
+	struct rte_flow_hw *flow;
+	struct rte_flow_op_attr attr = {
+		.postpone = 0,
+	};
+	uint32_t pending_rules = 0;
+	uint32_t queue;
+	uint32_t fidx;
+
+	/*
+	 * Ensure to push and dequeue all the enqueued flow
+	 * creation/destruction jobs in case user forgot to
+	 * dequeue. Or the enqueued created flows will be
+	 * leaked. The forgotten dequeues would also cause
+	 * flow flush get extra CQEs as expected and pending_rules
+	 * be minus value.
+	 */
+	for (queue = 0; queue < priv->nb_queue; queue++) {
+		hw_q = &priv->hw_q[queue];
+		if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
+					error))
+			return -1;
+	}
+	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
+	hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
+	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
+		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
+			if (flow_hw_async_flow_destroy(dev,
+						MLX5_DEFAULT_FLUSH_QUEUE,
+						&attr,
+						(struct rte_flow *)flow,
+						NULL,
+						error))
+				return -1;
+			pending_rules++;
+			/* Drain completion with queue size. */
+			if (pending_rules >= hw_q->size) {
+				if (__flow_hw_pull_comp(dev,
+						MLX5_DEFAULT_FLUSH_QUEUE,
+						pending_rules, error))
+					return -1;
+				pending_rules = 0;
+			}
+		}
+	}
+	/* Drain left completion. */
+	if (pending_rules &&
+	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
+				error))
+		return -1;
+	return 0;
+}
+
 /**
  * Create flow table.
  *