[v1,3/5] net/mlx5/hws: add IPv6 routing extension matching support

Message ID 20230202101128.2446928-4-rongweil@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series add IPv6 routing extension implementation |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Rongwei Liu Feb. 2, 2023, 10:11 a.m. UTC
  Add mlx5 HWS logic to match IPv6 routing extension header.

Once detecting IPv6 matching extension items in pattern template
create callback, PMD allocates a flex parser to sample the first
dword of srv6 header.

Only support next_hdr/segments_left/type for now.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c  |  7 +-
 drivers/net/mlx5/hws/mlx5dr_definer.c | 91 ++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_definer.h | 15 +++++
 drivers/net/mlx5/mlx5.c               | 92 ++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5.h               | 16 +++++
 drivers/net/mlx5/mlx5_flow.h          | 28 ++++++++
 drivers/net/mlx5/mlx5_flow_hw.c       | 29 +++++++--
 7 files changed, 268 insertions(+), 10 deletions(-)
  

Patch

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 1f65ea7dcb..22a94c1e1a 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -607,7 +607,7 @@  mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
 
 int
 mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
-				  struct mlx5_ext_sample_id ids[],
+				  struct mlx5_ext_sample_id *ids,
 				  uint32_t num, uint8_t *anchor)
 {
 	uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
@@ -637,8 +637,9 @@  mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 			(void *)flex_obj);
 		return -rte_errno;
 	}
-	*anchor = MLX5_GET(parse_graph_flex, flex, head_anchor_id);
-	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+	if (anchor)
+		*anchor = MLX5_GET(parse_graph_flex, flex, head_anchor_id);
+	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM && idx <= num; i++) {
 		void *s_off = (void *)((char *)sample + i *
 			      MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
 		uint32_t en;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 0f1cab7e07..142fc545eb 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -125,6 +125,7 @@  struct mlx5dr_definer_conv_data {
 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
+	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
 	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr[0],	rte_flow_item_ipv6) \
 	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr[4],	rte_flow_item_ipv6) \
@@ -293,6 +294,21 @@  mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
+				    const void *item,
+				    uint8_t *tag)
+{
+	const struct rte_flow_item_ipv6_routing_ext *v = item;
+	uint32_t val;
+
+	val = v->hdr.next_hdr << __mlx5_dw_bit_off(header_ipv6_routing_ext, next_hdr);
+	val |= v->hdr.type << __mlx5_dw_bit_off(header_ipv6_routing_ext, type);
+	val |= v->hdr.segments_left <<
+		__mlx5_dw_bit_off(header_ipv6_routing_ext, segments_left);
+	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1468,6 +1484,76 @@  mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static struct mlx5dr_definer_fc *
+mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t byte_off)
+{
+	uint32_t byte_off_fp7 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_7);
+	uint32_t byte_off_fp0 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	enum mlx5dr_definer_fname fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	struct mlx5dr_definer_fc *fc;
+	uint32_t idx;
+
+	if (byte_off < byte_off_fp7 || byte_off > byte_off_fp0) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	idx = (byte_off_fp0 - byte_off) / (sizeof(uint32_t));
+	fname += (enum mlx5dr_definer_fname)idx;
+	fc = &cd->fc[fname];
+	fc->byte_off = byte_off;
+	fc->bit_mask = UINT32_MAX;
+	return fc;
+}
+
+static int
+mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd,
+					  struct rte_flow_item *item,
+					  int item_idx)
+{
+	const struct rte_flow_item_ipv6_routing_ext *m = item->mask;
+	struct mlx5dr_definer_fc *fc;
+	bool inner = cd->tunnel;
+	uint32_t byte_off;
+
+	if (!cd->relaxed) {
+		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, inner);
+
+		/* Overwrite - Unset ethertype if present */
+		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
+
+		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+		if (!fc->tag_set) {
+			fc->item_idx = item_idx;
+			fc->tag_set = &mlx5dr_definer_ipv6_routing_hdr_set;
+			fc->tag_mask_set = &mlx5dr_definer_ones_set;
+			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
+		}
+	}
+
+	if (!m)
+		return 0;
+
+	if (m->hdr.hdr_len || m->hdr.flags) {
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
+	if (m->hdr.next_hdr || m->hdr.type || m->hdr.segments_left) {
+		byte_off = flow_hw_get_srh_flex_parser_byte_off_from_ctx(cd->ctx);
+		fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off);
+		if (!fc)
+			return rte_errno;
+
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ipv6_routing_ext_set;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1583,6 +1669,11 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+			ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
+						  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index d52c6b0627..c857848a28 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -511,6 +511,21 @@  struct mlx5_ifc_header_ipv6_vtc_bits {
 	u8 flow_label[0x14];
 };
 
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+	u8 next_hdr[0x8];
+	u8 hdr_len[0x8];
+	u8 type[0x8];
+	u8 segments_left[0x8];
+	union {
+		u8 flags[0x20];
+		struct {
+			u8 last_entry[0x8];
+			u8 flag[0x8];
+			u8 tag[0x10];
+		};
+	};
+};
+
 struct mlx5_ifc_header_vxlan_bits {
 	u8 flags[0x8];
 	u8 reserved1[0x18];
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0b97c4e78d..94fd5a91e3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -970,7 +970,6 @@  mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 		.modify_field_select = 0,
 	};
 	struct mlx5_ext_sample_id ids[8];
-	uint8_t anchor_id;
 	int ret;
 
 	if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
@@ -1006,7 +1005,7 @@  mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
 	}
 	prf->num = 2;
-	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, &anchor_id);
+	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, NULL);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to query sample IDs.");
 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
@@ -1041,6 +1040,95 @@  mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 	prf->obj = NULL;
 }
 
+/*
+ * Allocation of a flex parser for srh. Once refcnt is zero, the resources held
+ * by this parser will be freed.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev)
+{
+	struct mlx5_devx_graph_node_attr node = {
+		.modify_field_select = 0,
+	};
+	struct mlx5_ext_sample_id ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_common_dev_config *config = &priv->sh->cdev->config;
+	void *ibv_ctx = priv->sh->cdev->ctx;
+	int ret;
+
+	memset(ids, 0xff, sizeof(ids));
+	if (!config->hca_attr.parse_graph_flex_node) {
+		DRV_LOG(ERR, "Dynamic flex parser is not supported");
+		return -ENOTSUP;
+	}
+	if (__atomic_add_fetch(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) > 1)
+		return 0;
+
+	node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+	/* Srv6 first two DW are not counted in. */
+	node.header_length_base_value = 0x8;
+	/* The unit is uint64_t. */
+	node.header_length_field_shift = 0x3;
+	/* Header length is the 2nd byte. */
+	node.header_length_field_offset = 0x8;
+	node.header_length_field_mask = 0xF;
+	/* One byte next header protocol. */
+	node.next_header_field_size = 0x8;
+	node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IP;
+	node.in[0].compare_condition_value = IPPROTO_ROUTING;
+	node.sample[0].flow_match_sample_en = 1;
+	/* First come first serve no matter inner or outer. */
+	node.sample[0].flow_match_sample_tunnel_mode = MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+	node.out[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_TCP;
+	node.out[0].compare_condition_value = IPPROTO_TCP;
+	node.out[1].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_UDP;
+	node.out[1].compare_condition_value = IPPROTO_UDP;
+	node.out[2].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_IPV6;
+	node.out[2].compare_condition_value = IPPROTO_IPV6;
+	priv->sh->srh_flex_parser.fp = mlx5_devx_cmd_create_flex_parser(ibv_ctx, &node);
+	if (!priv->sh->srh_flex_parser.fp) {
+		DRV_LOG(ERR, "Failed to create flex parser node object.");
+		return (rte_errno == 0) ? -ENODEV : -rte_errno;
+	}
+	priv->sh->srh_flex_parser.num = 1;
+	ret = mlx5_devx_cmd_query_parse_samples(priv->sh->srh_flex_parser.fp, ids,
+						priv->sh->srh_flex_parser.num,
+						&priv->sh->srh_flex_parser.anchor_id);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to query sample IDs.");
+		return (rte_errno == 0) ? -ENODEV : -rte_errno;
+	}
+	priv->sh->srh_flex_parser.offset[0] = 0x0;
+	priv->sh->srh_flex_parser.ids[0].id = ids[0].id;
+	return 0;
+}
+
+/*
+ * Destroy the flex parser node, including the parser itself, input / output
+ * arcs and DW samples. Resources could be reused then.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure
+ */
+void
+mlx5_free_srh_flex_parser(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
+
+	if (__atomic_sub_fetch(&fp->refcnt, 1, __ATOMIC_RELAXED))
+		return;
+	if (fp->fp)
+		mlx5_devx_cmd_destroy(fp->fp);
+	fp->fp = NULL;
+	fp->num = 0;
+}
+
 uint32_t
 mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
 {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 83fb316ad8..bea1f62ea8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -543,6 +543,17 @@  struct mlx5_counter_stats_raw {
 	volatile struct flow_counter_stats *data;
 };
 
+/* Mlx5 internal flex parser profile structure. */
+struct mlx5_internal_flex_parser_profile {
+	uint32_t num;/* Actual number of samples. */
+	/* Sample IDs for this profile. */
+	struct mlx5_ext_sample_id ids[MLX5_FLEX_ITEM_MAPPING_NUM];
+	uint32_t offset[MLX5_FLEX_ITEM_MAPPING_NUM]; /* Each ID sample offset. */
+	uint8_t anchor_id;
+	uint32_t refcnt;
+	void *fp; /* DevX flex parser object. */
+};
+
 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
 
 /* Counter global management structure. */
@@ -1436,6 +1447,7 @@  struct mlx5_dev_ctx_shared {
 	struct mlx5_uar rx_uar; /* DevX UAR for Rx. */
 	struct mlx5_proc_priv *pppriv; /* Pointer to primary private process. */
 	struct mlx5_ecpri_parser_profile ecpri_parser;
+	struct mlx5_internal_flex_parser_profile srh_flex_parser; /* srh flex parser structure. */
 	/* Flex parser profiles information. */
 	LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
 	struct mlx5_aso_age_mng *aso_age_mng;
@@ -2258,4 +2270,8 @@  struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
 						  void *ctx);
 void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
 				    struct mlx5_list_entry *entry);
+
+int mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev);
+
+void mlx5_free_srh_flex_parser(struct rte_eth_dev *dev);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e376dcae93..1f359cfb12 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -219,6 +219,10 @@  enum mlx5_feature_name {
 /* Meter color item */
 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
 
+/* IPv6 routing extension item */
+#define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
+#define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -2611,4 +2615,28 @@  int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
 			   enum rte_flow_field_id field, int inherit,
 			   const struct rte_flow_attr *attr,
 			   struct rte_flow_error *error);
+
+static __rte_always_inline int
+flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	uint16_t port;
+
+	MLX5_ETH_FOREACH_DEV(port, NULL) {
+		struct mlx5_priv *priv;
+		struct mlx5_hca_flex_attr *attr;
+
+		priv = rte_eth_devices[port].data->dev_private;
+		attr = &priv->sh->cdev->config.hca_attr.flex;
+		if (priv->dr_ctx == dr_ctx && attr->ext_sample_id) {
+			if (priv->sh->srh_flex_parser.num)
+				return priv->sh->srh_flex_parser.ids[0].format_select_dw *
+					sizeof(uint32_t);
+			else
+				return UINT32_MAX;
+		}
+	}
+#endif
+	return UINT32_MAX;
+}
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 952247d3cf..d0e07acc4e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -213,17 +213,17 @@  flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
 }
 
 /**
- * Generate the pattern item flags.
- * Will be used for shared RSS action.
+ * Generate the matching pattern item flags.
  *
  * @param[in] items
  *   Pointer to the list of items.
  *
  * @return
- *   Item flags.
+ *   Matching item flags. RSS hash field function
+ *   silently ignores the flags which are unsupported.
  */
 static uint64_t
-flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
+flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
 {
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
@@ -249,6 +249,10 @@  flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
+					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
+			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
@@ -4736,6 +4740,7 @@  flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ICMP:
 		case RTE_FLOW_ITEM_TYPE_ICMP6:
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4864,7 +4869,7 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 				   "cannot create match template");
 		return NULL;
 	}
-	it->item_flags = flow_hw_rss_item_flags_get(tmpl_items);
+	it->item_flags = flow_hw_matching_item_flags_get(tmpl_items);
 	if (copied_items) {
 		if (attr->ingress)
 			it->implicit_port = true;
@@ -4872,6 +4877,17 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			it->implicit_tag = true;
 		mlx5_free(copied_items);
 	}
+	/* Either inner or outer, can't both. */
+	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
+			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
+		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
+		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
+		    (mlx5_alloc_srh_flex_parser(dev))) {
+			claim_zero(mlx5dr_match_template_destroy(it->mt));
+			mlx5_free(it);
+			return NULL;
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4903,6 +4919,9 @@  flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   NULL,
 				   "item template in using");
 	}
+	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
+				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
+		mlx5_free_srh_flex_parser(dev);
 	LIST_REMOVE(template, next);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);