[RFC,5/9] net/mlx5/hws: add IPv6 routing extension matching support

Message ID 20221221084304.3680690-6-rongweil@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series support ipv6 routing header matching |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Rongwei Liu Dec. 21, 2022, 8:43 a.m. UTC
  Add mlx5 HWS logic to match IPv6 routing extension header.

Once detecting IPv6 matching extension items in pattern template
create callback, PMD allocates a flex parser to sample the first
dword of srv6 header.

Only support next_hdr/segments_left/type for now.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
---
 doc/guides/nics/features/mlx5.ini     |   1 +
 doc/guides/nics/mlx5.rst              |   1 +
 drivers/net/mlx5/hws/mlx5dr.h         |  21 ++++++
 drivers/net/mlx5/hws/mlx5dr_context.c |  81 +++++++++++++++++++-
 drivers/net/mlx5/hws/mlx5dr_context.h |   1 +
 drivers/net/mlx5/hws/mlx5dr_definer.c | 103 ++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5.h               |  10 +++
 drivers/net/mlx5/mlx5_flow.h          |   3 +
 drivers/net/mlx5/mlx5_flow_hw.c       |  39 ++++++++--
 9 files changed, 250 insertions(+), 10 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 62fd330e2b..bd911a467b 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -87,6 +87,7 @@  vlan                 = Y
 vxlan                = Y
 vxlan_gpe            = Y
 represented_port     = Y
+ipv6_routing_ext     = Y
 
 [rte_flow actions]
 age                  = I
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 51f51259e3..98dcf9af16 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -106,6 +106,7 @@  Features
 - Sub-Function representors.
 - Sub-Function.
 - Matching on represented port.
+- Matching on IPv6 routing extension header.
 
 
 Limitations
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index f8de27c615..ba1566de9f 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -592,4 +592,25 @@  int mlx5dr_send_queue_action(struct mlx5dr_context *ctx,
  */
 int mlx5dr_debug_dump(struct mlx5dr_context *ctx, FILE *f);
 
+/* Allocate an internal flex parser for srv6 option.
+ *
+ * @param[in] dr_ctx
+ *	The dr_context which the flex parser belongs to.
+ * @param[in] config
+ *	Devx configuration per port.
+ * @param[in] ctx
+ *	Device contex
+ * @return zero on success non zero otherwise.
+ */
+int mlx5dr_alloc_srh_flex_parser(struct mlx5dr_context *dr_ctx,
+				 struct mlx5_common_dev_config *config,
+				 void *ctx);
+
+/* Free srv6 flex parser.
+ *
+ * @param[in] dr_ctx
+ *	The dr_context which the flex parser belongs to.
+ * @return zero on success non zero otherwise.
+ */
+int mlx5dr_free_srh_flex_parser(struct mlx5dr_context *dr_ctx);
 #endif
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.c b/drivers/net/mlx5/hws/mlx5dr_context.c
index 76ada7bb7f..6329271ff6 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.c
+++ b/drivers/net/mlx5/hws/mlx5dr_context.c
@@ -178,6 +178,76 @@  static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
 	mlx5dr_context_uninit_pd(ctx);
 }
 
+int mlx5dr_alloc_srh_flex_parser(struct mlx5dr_context *dr_ctx,
+				 struct mlx5_common_dev_config *config,
+				 void *ctx)
+{
+	struct mlx5_devx_graph_node_attr node = {
+		.modify_field_select = 0,
+	};
+	struct mlx5_ext_sample_id ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+	int ret;
+
+	memset(ids, 0xff, sizeof(ids));
+	if (!config->hca_attr.parse_graph_flex_node) {
+		DR_LOG(ERR, "Dynamic flex parser is not supported");
+		return -ENOTSUP;
+	}
+	if (__atomic_add_fetch(&dr_ctx->srh_flex_parser->refcnt, 1, __ATOMIC_RELAXED) > 1)
+		return 0;
+
+	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+	/* Srv6 first two DW are not counted in. */
+	node.header_length_base_value = 0x8;
+	/* The unit is uint64_t. */
+	node.header_length_field_shift = 0x3;
+	/* Header length is the 2nd byte. */
+	node.header_length_field_offset = 0x8;
+	node.header_length_field_mask = 0xF;
+	/* One byte next header protocol. */
+	node.next_header_field_size = 0x8;
+	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IP;
+	node.in[0].compare_condition_value = IPPROTO_ROUTING;
+	node.sample[0].flow_match_sample_en = 1;
+	/* First come first serve not matter inner or outer. */
+	node.sample[0].flow_match_sample_tunnel_mode = MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+	node.out[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_TCP;
+	node.out[0].compare_condition_value = IPPROTO_TCP;
+	node.out[1].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_UDP;
+	node.out[1].compare_condition_value = IPPROTO_UDP;
+	node.out[2].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IPV6;
+	node.out[2].compare_condition_value = IPPROTO_IPV6;
+
+	dr_ctx->srh_flex_parser->fp = mlx5_devx_cmd_create_flex_parser(ctx, &node);
+	if (!dr_ctx->srh_flex_parser->fp) {
+		DR_LOG(ERR, "Failed to create flex parser node object.");
+		return (rte_errno == 0) ? -ENODEV : -rte_errno;
+	}
+	dr_ctx->srh_flex_parser->num = 1;
+	ret = mlx5_devx_cmd_query_parse_samples(dr_ctx->srh_flex_parser->fp, ids,
+						dr_ctx->srh_flex_parser->num,
+						&dr_ctx->srh_flex_parser->anchor_id);
+	if (ret) {
+		DR_LOG(ERR, "Failed to query sample IDs.");
+		return (rte_errno == 0) ? -ENODEV : -rte_errno;
+	}
+	dr_ctx->srh_flex_parser->offset[0] = 0x0;
+	dr_ctx->srh_flex_parser->ids[0].id = ids[0].id;
+	return 0;
+}
+
+int mlx5dr_free_srh_flex_parser(struct mlx5dr_context *dr_ctx)
+{
+	struct mlx5_internal_flex_parser_profile *fp = dr_ctx->srh_flex_parser;
+
+	if (__atomic_sub_fetch(&fp->refcnt, 1, __ATOMIC_RELAXED))
+		return 0;
+	if (fp->fp)
+		mlx5_devx_cmd_destroy(fp->fp);
+	fp->fp = NULL;
+	return 0;
+}
+
 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
 					   struct mlx5dr_context_attr *attr)
 {
@@ -197,16 +267,22 @@  struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
 	if (!ctx->caps)
 		goto free_ctx;
 
+	ctx->srh_flex_parser = simple_calloc(1, sizeof(*ctx->srh_flex_parser));
+	if (!ctx->srh_flex_parser)
+		goto free_caps;
+
 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
 	if (ret)
-		goto free_caps;
+		goto free_flex;
 
 	ret = mlx5dr_context_init_hws(ctx, attr);
 	if (ret)
-		goto free_caps;
+		goto free_flex;
 
 	return ctx;
 
+free_flex:
+	simple_free(ctx->srh_flex_parser);
 free_caps:
 	simple_free(ctx->caps);
 free_ctx:
@@ -217,6 +293,7 @@  struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
 int mlx5dr_context_close(struct mlx5dr_context *ctx)
 {
 	mlx5dr_context_uninit_hws(ctx);
+	simple_free(ctx->srh_flex_parser);
 	simple_free(ctx->caps);
 	pthread_spin_destroy(&ctx->ctrl_lock);
 	simple_free(ctx);
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.h b/drivers/net/mlx5/hws/mlx5dr_context.h
index b0c7802daf..c1c627aced 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.h
+++ b/drivers/net/mlx5/hws/mlx5dr_context.h
@@ -35,6 +35,7 @@  struct mlx5dr_context {
 	struct mlx5dr_send_engine *send_queue;
 	size_t queues;
 	LIST_HEAD(table_head, mlx5dr_table) head;
+	struct mlx5_internal_flex_parser_profile *srh_flex_parser;
 };
 
 #endif /* MLX5DR_CONTEXT_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 10b1e43d6e..09acd5d719 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -125,6 +125,7 @@  struct mlx5dr_definer_conv_data {
 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
+	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
 	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr[0],	rte_flow_item_ipv6) \
 	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr[4],	rte_flow_item_ipv6) \
@@ -293,6 +294,18 @@  mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
+				    const void *item,
+				    uint8_t *tag)
+{
+	const struct rte_flow_item_ipv6_routing_ext *v = item;
+	uint32_t val = 0;
+
+	val = v->hdr.nexthdr << 24 | v->hdr.type << 8 | v->hdr.segments_left;
+	DR_SET_BE32(tag, RTE_BE32(val), fc->byte_off, 0, fc->bit_mask);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1468,6 +1481,91 @@  mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd,
+					  struct rte_flow_item *item,
+					  int item_idx)
+{
+	struct mlx5_internal_flex_parser_profile *fp = cd->ctx->srh_flex_parser;
+	enum mlx5dr_definer_fname i = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	const struct rte_flow_item_ipv6_routing_ext *m = item->mask;
+	uint32_t byte_off = fp->ids[0].format_select_dw * 4;
+	struct mlx5dr_definer_fc *fc;
+	bool inner = cd->tunnel;
+
+	if (!m)
+		return 0;
+
+	if (!fp->num)
+		return -1;
+
+	if (!cd->relaxed) {
+		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, inner);
+
+		/* Overwrite - Unset ethertype if present */
+		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
+		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+		if (!fc->tag_set) {
+			fc->item_idx = item_idx;
+			fc->tag_set = &mlx5dr_definer_ipv6_routing_hdr_set;
+			fc->tag_mask_set = &mlx5dr_definer_ones_set;
+			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
+		}
+	}
+
+	if (m->hdr.nexthdr || m->hdr.type || m->hdr.segments_left) {
+		for (; i <= MLX5DR_DEFINER_FNAME_FLEX_PARSER_7; i++) {
+			switch (i) {
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_0:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_0];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_0);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_1:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_1];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_1);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_2:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_2];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_2);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_3:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_3];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_3);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_4:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_4];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_4);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_5:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_5];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_5);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_6:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_6];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_6);
+				break;
+			case MLX5DR_DEFINER_FNAME_FLEX_PARSER_7:
+			default:
+				fc = &cd->fc[MLX5DR_DEFINER_FNAME_FLEX_PARSER_7];
+				DR_CALC_SET_HDR(fc, flex_parser, flex_parser_7);
+				break;
+			}
+			if (fc->byte_off == byte_off)
+				break;
+		}
+		if (i > MLX5DR_DEFINER_FNAME_FLEX_PARSER_7)
+			return -ENOTSUP;
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ipv6_routing_ext_set;
+		fc->fname = i;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1584,6 +1682,11 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+			ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT :
+						  MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1c11b77ac3..6dbd5f9622 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -543,6 +543,16 @@  struct mlx5_counter_stats_raw {
 	volatile struct flow_counter_stats *data;
 };
 
+/* Mlx5 internal flex parser profile structure. */
+struct mlx5_internal_flex_parser_profile {
+	uint32_t num;/* Actual number of samples. */
+	struct mlx5_ext_sample_id ids[MLX5_FLEX_ITEM_MAPPING_NUM];/* Sample IDs for this profile. */
+	uint32_t offset[MLX5_FLEX_ITEM_MAPPING_NUM];/* Each ID sample offset. */
+	uint8_t anchor_id;
+	uint32_t refcnt;
+	void *fp; /* DevX flex parser object. */
+};
+
 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
 
 /* Counter global management structure. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 1f57ecd6e1..81e2bc47a0 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -218,6 +218,9 @@  enum mlx5_feature_name {
 
 /* Meter color item */
 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
+/* IPv6 routing extension item */
+#define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
+#define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
 
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 20c71ff7f0..ff52eb28f0 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -213,23 +213,25 @@  flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
 }
 
 /**
- * Generate the pattern item flags.
+ * Generate the matching pattern item flags.
  * Will be used for shared RSS action.
  *
  * @param[in] items
  *   Pointer to the list of items.
+ * @param[out] flags
+ *   Flags superset including non-RSS items.
  *
  * @return
- *   Item flags.
+ *   RSS Item flags.
  */
 static uint64_t
-flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
+flow_hw_matching_item_flags_get(const struct rte_flow_item items[], uint64_t *flags)
 {
-	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 
+	*flags = 0;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
-		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+		int tunnel = !!(*flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
 
 		switch (item_type) {
@@ -249,6 +251,10 @@  flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
+					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
+			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
@@ -273,9 +279,10 @@  flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
 		default:
 			break;
 		}
-		item_flags |= last_item;
+		*flags |= last_item;
 	}
-	return item_flags;
+	return *flags & ~(MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT |
+			  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT);
 }
 
 /**
@@ -4732,6 +4739,7 @@  flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ICMP:
 		case RTE_FLOW_ITEM_TYPE_ICMP6:
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4809,6 +4817,8 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	struct mlx5dr_context *dr_ctx = priv->dr_ctx;
+	uint64_t flags;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -4860,7 +4870,7 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 				   "cannot create match template");
 		return NULL;
 	}
-	it->item_flags = flow_hw_rss_item_flags_get(tmpl_items);
+	it->item_flags = flow_hw_matching_item_flags_get(tmpl_items, &flags);
 	if (copied_items) {
 		if (attr->ingress)
 			it->implicit_port = true;
@@ -4868,6 +4878,15 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			it->implicit_tag = true;
 		mlx5_free(copied_items);
 	}
+	if (flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
+		     MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
+		if (mlx5dr_alloc_srh_flex_parser(dr_ctx, &priv->sh->cdev->config,
+						 priv->sh->cdev->ctx)) {
+			claim_zero(mlx5dr_match_template_destroy(it->mt));
+			mlx5_free(it);
+			return NULL;
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4891,6 +4910,9 @@  flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5dr_context *dr_ctx = priv->dr_ctx;
+
 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
 		DRV_LOG(WARNING, "Item template %p is still in use.",
 			(void *)template);
@@ -4899,6 +4921,7 @@  flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   NULL,
 				   "item template in using");
 	}
+	mlx5dr_free_srh_flex_parser(dr_ctx);
 	LIST_REMOVE(template, next);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);