[v4,07/14] net/mlx5: add table management

Message ID 20220224134051.18167-8-suanmingm@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: add hardware steering |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Feb. 24, 2022, 1:40 p.m. UTC
  Flow table is a group of flows with the same matching criteria
and the same actions defined for them. The table defines rules
that have the same matching fields but with different matching
values. For example, matching on 5 tuple, the table will be
(IPv4 source + IPv4 dest + s_port + d_port + next_proto)
while the values for each rule will be different.

The templates' relevant matching criteria and action instances
will be created in the table creation and saved in the table.
As table attributes indicate the supported flow number, the flow
memory will also be allocated at the same time.

This commit adds the table management functions.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  45 ++-
 drivers/net/mlx5/mlx5.h         |  21 +-
 drivers/net/mlx5/mlx5_flow.c    |  93 ++++++
 drivers/net/mlx5/mlx5_flow.h    |  73 +++++
 drivers/net/mlx5/mlx5_flow_hw.c | 527 ++++++++++++++++++++++++++++++++
 5 files changed, 753 insertions(+), 6 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 24bbfee519..44e3e76737 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1607,12 +1607,46 @@  void
 mlx5_free_table_hash_list(struct mlx5_priv *priv)
 {
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
-
-	if (!sh->flow_tbls)
+	struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
+				   &sh->groups : &sh->flow_tbls;
+	if (*tbls == NULL)
 		return;
-	mlx5_hlist_destroy(sh->flow_tbls);
-	sh->flow_tbls = NULL;
+	mlx5_hlist_destroy(*tbls);
+	*tbls = NULL;
+}
+
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+/**
+ * Allocate HW steering group hash list.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ */
+static int
+mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
+{
+	int err = 0;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	char s[MLX5_NAME_SIZE];
+
+	MLX5_ASSERT(sh);
+	snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
+	sh->groups = mlx5_hlist_create
+			(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
+			 false, true, sh,
+			 flow_hw_grp_create_cb,
+			 flow_hw_grp_match_cb,
+			 flow_hw_grp_remove_cb,
+			 flow_hw_grp_clone_cb,
+			 flow_hw_grp_clone_free_cb);
+	if (!sh->groups) {
+		DRV_LOG(ERR, "flow groups with hash creation failed.");
+		err = ENOMEM;
+	}
+	return err;
 }
+#endif
+
 
 /**
  * Initialize flow table hash list and create the root tables entry
@@ -1628,11 +1662,14 @@  int
 mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
 {
 	int err = 0;
+
 	/* Tables are only used in DV and DR modes. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	char s[MLX5_NAME_SIZE];
 
+	if (priv->sh->config.dv_flow_en == 2)
+		return mlx5_alloc_hw_group_hash_list(priv);
 	MLX5_ASSERT(sh);
 	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
 	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1722d38aad..4eedc8859f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -64,7 +64,9 @@  enum mlx5_ipool_index {
 	MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */
 	MLX5_IPOOL_TAG, /* Pool for tag resource. */
 	MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */
-	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
+	MLX5_IPOOL_JUMP, /* Pool for SWS jump resource. */
+	/* Pool for HWS group. Jump action will be created internally. */
+	MLX5_IPOOL_HW_GRP = MLX5_IPOOL_JUMP,
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
 	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
@@ -108,6 +110,13 @@  enum mlx5_delay_drop_mode {
 	MLX5_DELAY_DROP_HAIRPIN = RTE_BIT32(1), /* Hairpin queues enable. */
 };
 
+/* The HWS action type root/non-root. */
+enum mlx5_hw_action_flag_type {
+	MLX5_HW_ACTION_FLAG_ROOT, /* Root action. */
+	MLX5_HW_ACTION_FLAG_NONE_ROOT, /* Non-root ation. */
+	MLX5_HW_ACTION_FLAG_MAX, /* Maximum action flag. */
+};
+
 /* Hlist and list callback context. */
 struct mlx5_flow_cb_ctx {
 	struct rte_eth_dev *dev;
@@ -1204,7 +1213,10 @@  struct mlx5_dev_ctx_shared {
 	rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
 	/* UAR same-page access control required in 32bit implementations. */
 #endif
-	struct mlx5_hlist *flow_tbls;
+	union {
+		struct mlx5_hlist *flow_tbls; /* SWS flow table. */
+		struct mlx5_hlist *groups; /* HWS flow group. */
+	};
 	struct mlx5_flow_tunnel_hub *tunnel_hub;
 	/* Direct Rules tables for FDB, NIC TX+RX */
 	void *dr_drop_action; /* Pointer to DR drop action, any domain. */
@@ -1511,6 +1523,11 @@  struct mlx5_priv {
 	uint32_t nb_queue; /* HW steering queue number. */
 	/* HW steering queue polling mechanism job descriptor LIFO. */
 	struct mlx5_hw_q *hw_q;
+	/* HW steering rte flow table list header. */
+	LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
+	/* HW steering global drop action. */
+	struct mlx5dr_action *hw_drop[MLX5_HW_ACTION_FLAG_MAX]
+				     [MLX5DR_TABLE_TYPE_MAX];
 #endif
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 26af6d0106..472a5b975d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -838,6 +838,19 @@  mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
 				   struct rte_flow_actions_template *template,
 				   struct rte_flow_error *error);
 
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+		       const struct rte_flow_template_table_attr *attr,
+		       struct rte_flow_pattern_template *item_templates[],
+		       uint8_t nb_item_templates,
+		       struct rte_flow_actions_template *action_templates[],
+		       uint8_t nb_action_templates,
+		       struct rte_flow_error *error);
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+			struct rte_flow_template_table *table,
+			struct rte_flow_error *error);
+
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
 	.create = mlx5_flow_create,
@@ -864,6 +877,8 @@  static const struct rte_flow_ops mlx5_flow_ops = {
 	.pattern_template_destroy = mlx5_flow_pattern_template_destroy,
 	.actions_template_create = mlx5_flow_actions_template_create,
 	.actions_template_destroy = mlx5_flow_actions_template_destroy,
+	.template_table_create = mlx5_flow_table_create,
+	.template_table_destroy = mlx5_flow_table_destroy,
 };
 
 /* Tunnel information. */
@@ -8087,6 +8102,84 @@  mlx5_flow_actions_template_destroy(struct rte_eth_dev *dev,
 	return fops->actions_template_destroy(dev, template, error);
 }
 
+/**
+ * Create flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the table attributes.
+ * @param[in] item_templates
+ *   Item template array to be binded to the table.
+ * @param[in] nb_item_templates
+ *   Number of item template.
+ * @param[in] action_templates
+ *   Action template array to be binded to the table.
+ * @param[in] nb_action_templates
+ *   Number of action template.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Table on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_template_table *
+mlx5_flow_table_create(struct rte_eth_dev *dev,
+		       const struct rte_flow_template_table_attr *attr,
+		       struct rte_flow_pattern_template *item_templates[],
+		       uint8_t nb_item_templates,
+		       struct rte_flow_actions_template *action_templates[],
+		       uint8_t nb_action_templates,
+		       struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops;
+
+	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
+		rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"table create with incorrect steering mode");
+		return NULL;
+	}
+	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+	return fops->template_table_create(dev,
+					   attr,
+					   item_templates,
+					   nb_item_templates,
+					   action_templates,
+					   nb_action_templates,
+					   error);
+}
+
+/**
+ * PMD destroy flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] table
+ *   Pointer to the table to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_table_destroy(struct rte_eth_dev *dev,
+			struct rte_flow_template_table *table,
+			struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops;
+
+	if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				NULL,
+				"table destroy with incorrect steering mode");
+	fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+	return fops->template_table_destroy(dev, table, error);
+}
+
 /**
  * Allocate a new memory for the counter values wrapped by all the needed
  * management.
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 9a643fe0a8..1579036f58 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1036,6 +1036,55 @@  struct rte_flow_actions_template {
 	uint32_t refcnt; /* Reference counter. */
 };
 
+/* Jump action struct. */
+struct mlx5_hw_jump_action {
+	/* Action jump from root. */
+	struct mlx5dr_action *root_action;
+	/* HW steering jump action. */
+	struct mlx5dr_action *hws_action;
+};
+
+/* DR action set struct. */
+struct mlx5_hw_actions {
+	struct mlx5dr_action *drop; /* Drop action. */
+};
+
+/* mlx5 action template struct. */
+struct mlx5_hw_action_template {
+	/* Action template pointer. */
+	struct rte_flow_actions_template *action_template;
+	struct mlx5_hw_actions acts; /* Template actions. */
+};
+
+/* mlx5 flow group struct. */
+struct mlx5_flow_group {
+	struct mlx5_list_entry entry;
+	struct mlx5dr_table *tbl; /* HWS table object. */
+	struct mlx5_hw_jump_action jump; /* Jump action. */
+	enum mlx5dr_table_type type; /* Table type. */
+	uint32_t group_id; /* Group id. */
+	uint32_t idx; /* Group memory index. */
+};
+
+
+#define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
+#define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
+
+struct rte_flow_template_table {
+	LIST_ENTRY(rte_flow_template_table) next;
+	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
+	struct mlx5dr_matcher *matcher; /* Template matcher. */
+	/* Item templates bind to the table. */
+	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+	/* Action templates bind to the table. */
+	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
+	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
+	uint32_t type; /* Flow table type RX/TX/FDB. */
+	uint8_t nb_item_templates; /* Item template number. */
+	uint8_t nb_action_templates; /* Action template number. */
+	uint32_t refcnt; /* Table reference counter. */
+};
+
 #endif
 
 /*
@@ -1310,6 +1359,18 @@  typedef int (*mlx5_flow_actions_template_destroy_t)
 			(struct rte_eth_dev *dev,
 			 struct rte_flow_actions_template *template,
 			 struct rte_flow_error *error);
+typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
+		(struct rte_eth_dev *dev,
+		 const struct rte_flow_template_table_attr *attr,
+		 struct rte_flow_pattern_template *item_templates[],
+		 uint8_t nb_item_templates,
+		 struct rte_flow_actions_template *action_templates[],
+		 uint8_t nb_action_templates,
+		 struct rte_flow_error *error);
+typedef int (*mlx5_flow_table_destroy_t)
+			(struct rte_eth_dev *dev,
+			 struct rte_flow_template_table *table,
+			 struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
@@ -1354,6 +1415,8 @@  struct mlx5_flow_driver_ops {
 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
 	mlx5_flow_actions_template_create_t actions_template_create;
 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
+	mlx5_flow_table_create_t template_table_create;
+	mlx5_flow_table_destroy_t template_table_destroy;
 };
 
 /* mlx5_flow.c */
@@ -1784,6 +1847,16 @@  struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
 void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
 				      struct mlx5_list_entry *entry);
 
+struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
+void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+int flow_hw_grp_match_cb(void *tool_ctx,
+			 struct mlx5_list_entry *entry,
+			 void *cb_ctx);
+struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
+					     struct mlx5_list_entry *oentry,
+					     void *cb_ctx);
+void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+
 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
 						    uint32_t age_idx);
 int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 4214a63a73..c1e3b56f23 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -12,6 +12,308 @@ 
 
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
 
+/* DR action flags with different table. */
+static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
+				[MLX5DR_TABLE_TYPE_MAX] = {
+	{
+		MLX5DR_ACTION_FLAG_ROOT_RX,
+		MLX5DR_ACTION_FLAG_ROOT_TX,
+		MLX5DR_ACTION_FLAG_ROOT_FDB,
+	},
+	{
+		MLX5DR_ACTION_FLAG_HWS_RX,
+		MLX5DR_ACTION_FLAG_HWS_TX,
+		MLX5DR_ACTION_FLAG_HWS_FDB,
+	},
+};
+
+/**
+ * Destroy DR actions created by action template.
+ *
+ * For DR actions created during table creation's action translate.
+ * Need to destroy the DR action when destroying the table.
+ *
+ * @param[in] acts
+ *   Pointer to the template HW steering DR actions.
+ */
+static void
+__flow_hw_action_template_destroy(struct mlx5_hw_actions *acts __rte_unused)
+{
+}
+
+/**
+ * Translate rte_flow actions to DR action.
+ *
+ * As the action template has already indicated the actions. Translate
+ * the rte_flow actions to DR action if possbile. So in flow create
+ * stage we will save cycles from handing the actions' organizing.
+ * For the actions with limited information, need to add these to a
+ * list.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] table_attr
+ *   Pointer to the table attributes.
+ * @param[in] item_templates
+ *   Item template array to be binded to the table.
+ * @param[in/out] acts
+ *   Pointer to the template HW steering DR actions.
+ * @param[in] at
+ *   Action template.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Table on success, NULL otherwise and rte_errno is set.
+ */
+static int
+flow_hw_actions_translate(struct rte_eth_dev *dev,
+			  const struct rte_flow_template_table_attr *table_attr,
+			  struct mlx5_hw_actions *acts,
+			  struct rte_flow_actions_template *at,
+			  struct rte_flow_error *error __rte_unused)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	const struct rte_flow_attr *attr = &table_attr->flow_attr;
+	struct rte_flow_action *actions = at->actions;
+	struct rte_flow_action *masks = at->masks;
+	bool actions_end = false;
+	uint32_t type;
+
+	if (attr->transfer)
+		type = MLX5DR_TABLE_TYPE_FDB;
+	else if (attr->egress)
+		type = MLX5DR_TABLE_TYPE_NIC_TX;
+	else
+		type = MLX5DR_TABLE_TYPE_NIC_RX;
+	for (; !actions_end; actions++, masks++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_INDIRECT:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			acts->drop = priv->hw_drop[!!attr->group][type];
+			break;
+		case RTE_FLOW_ACTION_TYPE_END:
+			actions_end = true;
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * Create flow table.
+ *
+ * The input item and action templates will be binded to the table.
+ * Flow memory will also be allocated. Matcher will be created based
+ * on the item template. Action will be translated to the dedicated
+ * DR action if possible.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ *   Pointer to the table attributes.
+ * @param[in] item_templates
+ *   Item template array to be binded to the table.
+ * @param[in] nb_item_templates
+ *   Number of item template.
+ * @param[in] action_templates
+ *   Action template array to be binded to the table.
+ * @param[in] nb_action_templates
+ *   Number of action template.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *    Table on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_template_table *
+flow_hw_table_create(struct rte_eth_dev *dev,
+		     const struct rte_flow_template_table_attr *attr,
+		     struct rte_flow_pattern_template *item_templates[],
+		     uint8_t nb_item_templates,
+		     struct rte_flow_actions_template *action_templates[],
+		     uint8_t nb_action_templates,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5dr_matcher_attr matcher_attr = {0};
+	struct rte_flow_template_table *tbl = NULL;
+	struct mlx5_flow_group *grp;
+	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+	struct rte_flow_attr flow_attr = attr->flow_attr;
+	struct mlx5_flow_cb_ctx ctx = {
+		.dev = dev,
+		.error = error,
+		.data = &flow_attr,
+	};
+	struct mlx5_indexed_pool_config cfg = {
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 1 << 12,
+		.per_core_cache = 1 << 13,
+		.need_lock = 1,
+		.release_mem_en = !!priv->sh->config.reclaim_mode,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.type = "mlx5_hw_table_flow",
+	};
+	struct mlx5_list_entry *ge;
+	uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
+	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
+	int err;
+
+	/* HWS layer accepts only 1 item template with root table. */
+	if (!attr->flow_attr.group)
+		max_tpl = 1;
+	cfg.max_idx = nb_flows;
+	/* For table has very limited flows, disable cache. */
+	if (nb_flows < cfg.trunk_size) {
+		cfg.per_core_cache = 0;
+		cfg.trunk_size = nb_flows;
+	}
+	/* Check if we requires too many templates. */
+	if (nb_item_templates > max_tpl ||
+	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
+		rte_errno = EINVAL;
+		goto error;
+	}
+	/* Allocate the table memory. */
+	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
+	if (!tbl)
+		goto error;
+	/* Allocate flow indexed pool. */
+	tbl->flow = mlx5_ipool_create(&cfg);
+	if (!tbl->flow)
+		goto error;
+	/* Register the flow group. */
+	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
+	if (!ge)
+		goto error;
+	grp = container_of(ge, struct mlx5_flow_group, entry);
+	tbl->grp = grp;
+	/* Prepare matcher information. */
+	matcher_attr.priority = attr->flow_attr.priority;
+	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
+	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
+	/* Build the item template. */
+	for (i = 0; i < nb_item_templates; i++) {
+		uint32_t ret;
+
+		ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
+					 __ATOMIC_RELAXED);
+		if (ret <= 1) {
+			rte_errno = EINVAL;
+			goto it_error;
+		}
+		mt[i] = item_templates[i]->mt;
+		tbl->its[i] = item_templates[i];
+	}
+	tbl->matcher = mlx5dr_matcher_create
+		(tbl->grp->tbl, mt, nb_item_templates, &matcher_attr);
+	if (!tbl->matcher)
+		goto it_error;
+	tbl->nb_item_templates = nb_item_templates;
+	/* Build the action template. */
+	for (i = 0; i < nb_action_templates; i++) {
+		uint32_t ret;
+
+		ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
+					 __ATOMIC_RELAXED);
+		if (ret <= 1) {
+			rte_errno = EINVAL;
+			goto at_error;
+		}
+		err = flow_hw_actions_translate(dev, attr,
+						&tbl->ats[i].acts,
+						action_templates[i], error);
+		if (err) {
+			i++;
+			goto at_error;
+		}
+		tbl->ats[i].action_template = action_templates[i];
+	}
+	tbl->nb_action_templates = nb_action_templates;
+	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
+		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
+		    MLX5DR_TABLE_TYPE_NIC_RX);
+	LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
+	return tbl;
+at_error:
+	while (i--) {
+		__flow_hw_action_template_destroy(&tbl->ats[i].acts);
+		__atomic_sub_fetch(&action_templates[i]->refcnt,
+				   1, __ATOMIC_RELAXED);
+	}
+	i = nb_item_templates;
+it_error:
+	while (i--)
+		__atomic_sub_fetch(&item_templates[i]->refcnt,
+				   1, __ATOMIC_RELAXED);
+	mlx5dr_matcher_destroy(tbl->matcher);
+error:
+	err = rte_errno;
+	if (tbl) {
+		if (tbl->grp)
+			mlx5_hlist_unregister(priv->sh->groups,
+					      &tbl->grp->entry);
+		if (tbl->flow)
+			mlx5_ipool_destroy(tbl->flow);
+		mlx5_free(tbl);
+	}
+	rte_flow_error_set(error, err,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+			  "fail to create rte table");
+	return NULL;
+}
+
+/**
+ * Destroy flow table.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] table
+ *   Pointer to the table to be destroyed.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_table_destroy(struct rte_eth_dev *dev,
+		      struct rte_flow_template_table *table,
+		      struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	int i;
+
+	if (table->refcnt) {
+		DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
+		return rte_flow_error_set(error, EBUSY,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "table in using");
+	}
+	LIST_REMOVE(table, next);
+	for (i = 0; i < table->nb_item_templates; i++)
+		__atomic_sub_fetch(&table->its[i]->refcnt,
+				   1, __ATOMIC_RELAXED);
+	for (i = 0; i < table->nb_action_templates; i++) {
+		__flow_hw_action_template_destroy(&table->ats[i].acts);
+		__atomic_sub_fetch(&table->ats[i].action_template->refcnt,
+				   1, __ATOMIC_RELAXED);
+	}
+	mlx5dr_matcher_destroy(table->matcher);
+	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
+	mlx5_ipool_destroy(table->flow);
+	mlx5_free(table);
+	return 0;
+}
+
 /**
  * Create flow action template.
  *
@@ -228,6 +530,199 @@  flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+/**
+ * Create group callback.
+ *
+ * @param[in] tool_ctx
+ *   Pointer to the hash list related context.
+ * @param[in] cb_ctx
+ *   Pointer to the group creation context.
+ *
+ * @return
+ *   Group entry on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_list_entry *
+flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = tool_ctx;
+	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+	struct rte_eth_dev *dev = ctx->dev;
+	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5dr_table_attr dr_tbl_attr = {0};
+	struct rte_flow_error *error = ctx->error;
+	struct mlx5_flow_group *grp_data;
+	struct mlx5dr_table *tbl = NULL;
+	struct mlx5dr_action *jump;
+	uint32_t idx = 0;
+
+	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
+	if (!grp_data) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "cannot allocate flow table data entry");
+		return NULL;
+	}
+	dr_tbl_attr.level = attr->group;
+	if (attr->transfer)
+		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
+	else if (attr->egress)
+		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
+	else
+		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
+	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
+	if (!tbl)
+		goto error;
+	grp_data->tbl = tbl;
+	if (attr->group) {
+		/* Jump action be used by non-root table. */
+		jump = mlx5dr_action_create_dest_table
+			(priv->dr_ctx, tbl,
+			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
+		if (!jump)
+			goto error;
+		grp_data->jump.hws_action = jump;
+		/* Jump action be used by root table.  */
+		jump = mlx5dr_action_create_dest_table
+			(priv->dr_ctx, tbl,
+			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
+					 [dr_tbl_attr.type]);
+		if (!jump)
+			goto error;
+		grp_data->jump.root_action = jump;
+	}
+	grp_data->idx = idx;
+	grp_data->group_id = attr->group;
+	grp_data->type = dr_tbl_attr.type;
+	return &grp_data->entry;
+error:
+	if (grp_data->jump.root_action)
+		mlx5dr_action_destroy(grp_data->jump.root_action);
+	if (grp_data->jump.hws_action)
+		mlx5dr_action_destroy(grp_data->jump.hws_action);
+	if (tbl)
+		mlx5dr_table_destroy(tbl);
+	if (idx)
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
+	rte_flow_error_set(error, ENOMEM,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			   NULL,
+			   "cannot allocate flow dr table");
+	return NULL;
+}
+
+/**
+ * Remove group callback.
+ *
+ * @param[in] tool_ctx
+ *   Pointer to the hash list related context.
+ * @param[in] entry
+ *   Pointer to the entry to be removed.
+ */
+void
+flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_dev_ctx_shared *sh = tool_ctx;
+	struct mlx5_flow_group *grp_data =
+		    container_of(entry, struct mlx5_flow_group, entry);
+
+	MLX5_ASSERT(entry && sh);
+	/* To use the wrapper glue functions instead. */
+	if (grp_data->jump.hws_action)
+		mlx5dr_action_destroy(grp_data->jump.hws_action);
+	if (grp_data->jump.root_action)
+		mlx5dr_action_destroy(grp_data->jump.root_action);
+	mlx5dr_table_destroy(grp_data->tbl);
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
+}
+
+/**
+ * Match group callback.
+ *
+ * @param[in] tool_ctx
+ *   Pointer to the hash list related context.
+ * @param[in] entry
+ *   Pointer to the group to be matched.
+ * @param[in] cb_ctx
+ *   Pointer to the group matching context.
+ *
+ * @return
+ *   0 on matched, 1 on miss matched.
+ */
+int
+flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+		     void *cb_ctx)
+{
+	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+	struct mlx5_flow_group *grp_data =
+		container_of(entry, struct mlx5_flow_group, entry);
+	struct rte_flow_attr *attr =
+			(struct rte_flow_attr *)ctx->data;
+
+	return (grp_data->group_id != attr->group) ||
+		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
+		attr->transfer) ||
+		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
+		attr->egress) ||
+		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
+		attr->ingress);
+}
+
+/**
+ * Clone group entry callback.
+ *
+ * @param[in] tool_ctx
+ *   Pointer to the hash list related context.
+ * @param[in] entry
+ *   Pointer to the group to be matched.
+ * @param[in] cb_ctx
+ *   Pointer to the group matching context.
+ *
+ * @return
+ *   0 on matched, 1 on miss matched.
+ */
+struct mlx5_list_entry *
+flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
+		     void *cb_ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = tool_ctx;
+	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+	struct mlx5_flow_group *grp_data;
+	struct rte_flow_error *error = ctx->error;
+	uint32_t idx = 0;
+
+	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
+	if (!grp_data) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "cannot allocate flow table data entry");
+		return NULL;
+	}
+	memcpy(grp_data, oentry, sizeof(*grp_data));
+	grp_data->idx = idx;
+	return &grp_data->entry;
+}
+
+/**
+ * Free cloned group entry callback.
+ *
+ * @param[in] tool_ctx
+ *   Pointer to the hash list related context.
+ * @param[in] entry
+ *   Pointer to the group to be freed.
+ */
+void
+flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_dev_ctx_shared *sh = tool_ctx;
+	struct mlx5_flow_group *grp_data =
+		    container_of(entry, struct mlx5_flow_group, entry);
+
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -245,6 +740,7 @@  flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
+
 static int
 flow_hw_configure(struct rte_eth_dev *dev,
 		  const struct rte_flow_port_attr *port_attr,
@@ -321,8 +817,24 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		goto err;
 	priv->dr_ctx = dr_ctx;
 	priv->nb_queue = nb_queue;
+	/* Add global actions. */
+	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
+		for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
+			priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
+				(priv->dr_ctx, mlx5_hw_act_flag[i][j]);
+			if (!priv->hw_drop[i][j])
+				goto err;
+		}
+	}
 	return 0;
 err:
+	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
+		for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
+			if (!priv->hw_drop[i][j])
+				continue;
+			mlx5dr_action_destroy(priv->hw_drop[i][j]);
+		}
+	}
 	if (dr_ctx)
 		claim_zero(mlx5dr_context_close(dr_ctx));
 	mlx5_free(priv->hw_q);
@@ -342,11 +854,17 @@  void
 flow_hw_resource_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct rte_flow_template_table *tbl;
 	struct rte_flow_pattern_template *it;
 	struct rte_flow_actions_template *at;
+	int i, j;
 
 	if (!priv->dr_ctx)
 		return;
+	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
+		tbl = LIST_FIRST(&priv->flow_hw_tbl);
+		flow_hw_table_destroy(dev, tbl, NULL);
+	}
 	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
 		it = LIST_FIRST(&priv->flow_hw_itt);
 		flow_hw_pattern_template_destroy(dev, it, NULL);
@@ -355,6 +873,13 @@  flow_hw_resource_release(struct rte_eth_dev *dev)
 		at = LIST_FIRST(&priv->flow_hw_at);
 		flow_hw_actions_template_destroy(dev, at, NULL);
 	}
+	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
+		for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
+			if (!priv->hw_drop[i][j])
+				continue;
+			mlx5dr_action_destroy(priv->hw_drop[i][j]);
+		}
+	}
 	mlx5_free(priv->hw_q);
 	priv->hw_q = NULL;
 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
@@ -369,6 +894,8 @@  const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
 	.actions_template_create = flow_hw_actions_template_create,
 	.actions_template_destroy = flow_hw_actions_template_destroy,
+	.template_table_create = flow_hw_table_create,
+	.template_table_destroy = flow_hw_table_destroy,
 };
 
 #endif