[11/13] net/mlx5: add mark action

Message ID 20220210162926.20436-12-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: add hardware steering |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Feb. 10, 2022, 4:29 p.m. UTC
  The mark action is covered by tag action internally. While it is added
the HW will add a tag to the packet. The mark value can be set as fixed
or dynamic as the action mask indicates.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |  3 ++
 drivers/net/mlx5/mlx5_flow.h    |  1 +
 drivers/net/mlx5/mlx5_flow_hw.c | 87 ++++++++++++++++++++++++++++++---
 3 files changed, 85 insertions(+), 6 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6fb82bf1f3..c78dc3c431 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1529,6 +1529,9 @@  struct mlx5_priv {
 	/* HW steering global drop action. */
 	struct mlx5dr_action *hw_drop[MLX5_HW_ACTION_FLAG_MAX]
 				     [MLX5DR_TABLE_TYPE_MAX];
+	/* HW steering global drop action. */
+	struct mlx5dr_action *hw_tag[MLX5_HW_ACTION_FLAG_MAX]
+				    [MLX5DR_TABLE_TYPE_MAX];
 	struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 33094c8c07..8e65486a1f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1080,6 +1080,7 @@  struct mlx5_hw_actions {
 	struct mlx5_hw_jump_action *jump; /* Jump action. */
 	struct mlx5_hrxq *tir; /* TIR action. */
 	uint32_t acts_num:4; /* Total action number. */
+	uint32_t mark:1; /* Indicate the mark action. */
 	/* Translated DR action array from action template. */
 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
 };
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index e59d812072..a754cdd084 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -31,6 +31,50 @@  static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
 	},
 };
 
+/**
+ * Trim rxq flag refcnt.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ */
+static void
+flow_hw_rxq_flag_trim(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	unsigned int i;
+
+	if (!priv->mark_enabled)
+		return;
+	for (i = 0; i < priv->rxqs_n; ++i) {
+		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+
+		rxq_ctrl->rxq.mark = 0;
+	}
+	priv->mark_enabled = 0;
+}
+
+/**
+ * Set rxq flag refcnt.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ */
+static void
+flow_hw_rxq_flag_set(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	unsigned int i;
+
+	if (priv->mark_enabled)
+		return;
+	for (i = 0; i < priv->rxqs_n; ++i) {
+		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+
+		rxq_ctrl->rxq.mark = 1;
+	}
+	priv->mark_enabled = 1;
+}
+
 /**
  * Register destination table DR jump action.
  *
@@ -292,6 +336,20 @@  flow_hw_actions_translate(struct rte_eth_dev *dev,
 			acts->rule_acts[i++].action =
 				priv->hw_drop[!!attr->group][type];
 			break;
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			acts->mark = true;
+			if (masks->conf)
+				acts->rule_acts[i].tag.value =
+					mlx5_flow_mark_set
+					(((const struct rte_flow_action_mark *)
+					(masks->conf))->id);
+			else if (__flow_hw_act_data_general_append(priv, acts,
+				actions->type, actions - action_start, i))
+				goto err;
+			acts->rule_acts[i++].action =
+				priv->hw_tag[!!attr->group][type];
+			flow_hw_rxq_flag_set(dev);
+			break;
 		case RTE_FLOW_ACTION_TYPE_JUMP:
 			if (masks->conf) {
 				uint32_t jump_group =
@@ -418,6 +476,7 @@  flow_hw_actions_construct(struct rte_eth_dev *dev,
 	}
 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
 		uint32_t jump_group;
+		uint32_t tag;
 		struct mlx5_hw_jump_action *jump;
 		struct mlx5_hrxq *hrxq;
 
@@ -429,6 +488,12 @@  flow_hw_actions_construct(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ACTION_TYPE_VOID:
 			break;
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			tag = mlx5_flow_mark_set
+			      (((const struct rte_flow_action_mark *)
+			      (action->conf))->id);
+			rule_acts[act_data->action_dst].tag.value = tag;
+			break;
 		case RTE_FLOW_ACTION_TYPE_JUMP:
 			jump_group = ((const struct rte_flow_action_jump *)
 						action->conf)->group;
@@ -998,6 +1063,8 @@  flow_hw_table_destroy(struct rte_eth_dev *dev,
 		__atomic_sub_fetch(&table->its[i]->refcnt,
 				   1, __ATOMIC_RELAXED);
 	for (i = 0; i < table->nb_action_templates; i++) {
+		if (table->ats[i].acts.mark)
+			flow_hw_rxq_flag_trim(dev);
 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
 		__atomic_sub_fetch(&table->ats[i].action_template->refcnt,
 				   1, __ATOMIC_RELAXED);
@@ -1499,15 +1566,21 @@  flow_hw_configure(struct rte_eth_dev *dev,
 				(priv->dr_ctx, mlx5_hw_act_flag[i][j]);
 			if (!priv->hw_drop[i][j])
 				goto err;
+			priv->hw_tag[i][j] = mlx5dr_action_create_tag
+				(priv->dr_ctx, mlx5_hw_act_flag[i][j]);
+			if (!priv->hw_tag[i][j])
+				goto err;
 		}
 	}
 	return 0;
 err:
 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
 		for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
-			if (!priv->hw_drop[i][j])
-				continue;
-			mlx5dr_action_destroy(priv->hw_drop[i][j]);
+			if (priv->hw_drop[i][j])
+				mlx5dr_action_destroy(priv->hw_drop[i][j]);
+			if (priv->hw_tag[i][j])
+				mlx5dr_action_destroy(priv->hw_tag[i][j]);
+
 		}
 	}
 	if (dr_ctx)
@@ -1556,9 +1629,11 @@  flow_hw_resource_release(struct rte_eth_dev *dev)
 	}
 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
 		for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
-			if (!priv->hw_drop[i][j])
-				continue;
-			mlx5dr_action_destroy(priv->hw_drop[i][j]);
+			if (priv->hw_drop[i][j])
+				mlx5dr_action_destroy(priv->hw_drop[i][j]);
+			if (priv->hw_tag[i][j])
+				mlx5dr_action_destroy(priv->hw_tag[i][j]);
+
 		}
 	}
 	if (priv->acts_ipool) {