[v2,12/16] net/mlx5/hws: add range definer creation support

Message ID 20230201072815.1329101-13-valex@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Alex Vesker Feb. 1, 2023, 7:28 a.m. UTC
  Calculate and create an additional definer used for range
check during matcher creation. In such case two definers will
be created one for specific matching and a range definer.
Since range HW GTA WQE doesn't support the needed range
support rule insertion rule insertion is done using the FW
GTA WQE command.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 255 +++++++++++++++++++++++---
 drivers/net/mlx5/hws/mlx5dr_definer.h |  16 +-
 drivers/net/mlx5/hws/mlx5dr_matcher.c |  27 ++-
 drivers/net/mlx5/hws/mlx5dr_matcher.h |  17 +-
 4 files changed, 281 insertions(+), 34 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 0da38a818c..ed91a0998c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -1508,26 +1508,33 @@  mlx5dr_definer_mt_set_fc(struct mlx5dr_match_template *mt,
 			 struct mlx5dr_definer_fc *fc,
 			 uint8_t *hl)
 {
-	uint32_t fc_sz = 0;
+	uint32_t fc_sz = 0, fcr_sz = 0;
 	int i;
 
 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++)
 		if (fc[i].tag_set)
-			fc_sz++;
+			fc[i].is_range ? fcr_sz++ : fc_sz++;
 
-	mt->fc = simple_calloc(fc_sz, sizeof(*mt->fc));
+	mt->fc = simple_calloc(fc_sz + fcr_sz, sizeof(*mt->fc));
 	if (!mt->fc) {
 		rte_errno = ENOMEM;
 		return rte_errno;
 	}
 
+	mt->fcr = mt->fc + fc_sz;
+
 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
 		if (!fc[i].tag_set)
 			continue;
 
 		fc[i].fname = i;
-		memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
-		DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+
+		if (fc[i].is_range) {
+			memcpy(&mt->fcr[mt->fcr_sz++], &fc[i], sizeof(*mt->fcr));
+		} else {
+			memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
+			DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+		}
 	}
 
 	return 0;
@@ -1686,7 +1693,7 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 
 	mt->item_flags = item_flags;
 
-	/* Fill in headers layout and allocate fc array on mt */
+	/* Fill in headers layout and allocate fc & fcr array on mt */
 	ret = mlx5dr_definer_mt_set_fc(mt, fc, hl);
 	if (ret) {
 		DR_LOG(ERR, "Failed to set field copy to match template");
@@ -1855,9 +1862,92 @@  mlx5dr_definer_copy_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
 }
 
 static int
-mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx,
-				struct mlx5dr_definer *definer,
-				uint8_t *hl)
+mlx5dr_definer_find_best_range_fit(struct mlx5dr_definer *definer,
+				   struct mlx5dr_matcher *matcher)
+{
+	uint8_t tag_byte_offset[MLX5DR_DEFINER_FNAME_MAX] = {0};
+	uint8_t field_select[MLX5DR_DEFINER_FNAME_MAX] = {0};
+	struct mlx5dr_definer_sel_ctrl ctrl = {0};
+	uint32_t byte_offset, algn_byte_off;
+	struct mlx5dr_definer_fc *fcr;
+	bool require_dw;
+	int idx, i, j;
+
+	/* Try to create a range definer */
+	ctrl.allowed_full_dw = DW_SELECTORS_RANGE;
+	ctrl.allowed_bytes = BYTE_SELECTORS_RANGE;
+
+	/* Multiple fields cannot share the same DW for range match.
+	 * The HW doesn't recognize each field but compares the full dw.
+	 * For example definer DW consists of FieldA_FieldB
+	 * FieldA: Mask 0xFFFF range 0x1 to 0x2
+	 * FieldB: Mask 0xFFFF range 0x3 to 0x4
+	 * STE DW range will be 0x00010003 - 0x00020004
+	 * This will cause invalid match for FieldB if FieldA=1 and FieldB=8
+	 * Since 0x10003 < 0x10008 < 0x20004
+	 */
+	for (i = 0; i < matcher->num_of_mt; i++) {
+		for (j = 0; j < matcher->mt[i].fcr_sz; j++) {
+			fcr = &matcher->mt[i].fcr[j];
+
+			/* Found - Reuse previous mt binding */
+			if (field_select[fcr->fname]) {
+				fcr->byte_off = tag_byte_offset[fcr->fname];
+				continue;
+			}
+
+			/* Not found */
+			require_dw = fcr->byte_off >= (64 * DW_SIZE);
+			if (require_dw || ctrl.used_bytes == ctrl.allowed_bytes) {
+				/* Try to cover using DW selector */
+				if (ctrl.used_full_dw == ctrl.allowed_full_dw)
+					goto not_supported;
+
+				ctrl.full_dw_selector[ctrl.used_full_dw++] =
+					fcr->byte_off / DW_SIZE;
+
+				/* Bind DW */
+				idx = ctrl.used_full_dw - 1;
+				byte_offset = fcr->byte_off % DW_SIZE;
+				byte_offset += DW_SIZE * (DW_SELECTORS - idx - 1);
+			} else {
+				/* Try to cover using Bytes selectors */
+				if (ctrl.used_bytes == ctrl.allowed_bytes)
+					goto not_supported;
+
+				algn_byte_off = DW_SIZE * (fcr->byte_off / DW_SIZE);
+				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 3;
+				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 2;
+				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 1;
+				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off;
+
+				/* Bind BYTE */
+				byte_offset = DW_SIZE * DW_SELECTORS;
+				byte_offset += BYTE_SELECTORS - ctrl.used_bytes;
+				byte_offset += fcr->byte_off % DW_SIZE;
+			}
+
+			fcr->byte_off = byte_offset;
+			tag_byte_offset[fcr->fname] = byte_offset;
+			field_select[fcr->fname] = 1;
+		}
+	}
+
+	mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
+	definer->type = MLX5DR_DEFINER_TYPE_RANGE;
+
+	return 0;
+
+not_supported:
+	DR_LOG(ERR, "Unable to find supporting range definer combination");
+	rte_errno = ENOTSUP;
+	return rte_errno;
+}
+
+static int
+mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx,
+				   struct mlx5dr_definer *definer,
+				   uint8_t *hl)
 {
 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
 	bool found;
@@ -1923,6 +2013,43 @@  void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
 	}
 }
 
+static uint32_t mlx5dr_definer_get_range_byte_off(uint32_t match_byte_off)
+{
+	uint8_t curr_dw_idx = match_byte_off / DW_SIZE;
+	uint8_t new_dw_idx;
+
+	/* Range DW can have the following values 7,8,9,10
+	 * -DW7 is mapped to DW9
+	 * -DW8 is mapped to DW7
+	 * -DW9 is mapped to DW5
+	 * -DW10 is mapped to DW3
+	 * To reduce calculation the following formula is used:
+	 */
+	new_dw_idx = curr_dw_idx * (-2) + 23;
+
+	return new_dw_idx * DW_SIZE + match_byte_off % DW_SIZE;
+}
+
+void mlx5dr_definer_create_tag_range(const struct rte_flow_item *items,
+				     struct mlx5dr_definer_fc *fc,
+				     uint32_t fc_sz,
+				     uint8_t *tag)
+{
+	struct mlx5dr_definer_fc tmp_fc;
+	uint32_t i;
+
+	for (i = 0; i < fc_sz; i++) {
+		tmp_fc = *fc;
+		/* Set MAX value */
+		tmp_fc.byte_off = mlx5dr_definer_get_range_byte_off(fc->byte_off);
+		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].last, tag);
+		/* Set MIN value */
+		tmp_fc.byte_off += DW_SIZE;
+		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].spec, tag);
+		fc++;
+	}
+}
+
 int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
 {
 	return definer->obj->id;
@@ -1951,27 +2078,26 @@  mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
 
 static int
 mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
-			   struct mlx5dr_definer *match_definer)
+			   struct mlx5dr_definer *match_definer,
+			   struct mlx5dr_definer *range_definer)
 {
 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
 	struct mlx5dr_match_template *mt = matcher->mt;
-	uint8_t *match_hl, *hl_buff;
+	uint8_t *match_hl;
 	int i, ret;
 
 	/* Union header-layout (hl) is used for creating a single definer
 	 * field layout used with different bitmasks for hash and match.
 	 */
-	hl_buff = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
-	if (!hl_buff) {
+	match_hl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
+	if (!match_hl) {
 		DR_LOG(ERR, "Failed to allocate memory for header layout");
 		rte_errno = ENOMEM;
 		return rte_errno;
 	}
 
-	match_hl = hl_buff;
-
 	/* Convert all mt items to header layout (hl)
-	 * and allocate the match field copy array (fc).
+	 * and allocate the match and range field copy array (fc & fcr).
 	 */
 	for (i = 0; i < matcher->num_of_mt; i++) {
 		ret = mlx5dr_definer_conv_items_to_hl(ctx, &mt[i], match_hl);
@@ -1982,13 +2108,20 @@  mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
 	}
 
 	/* Find the match definer layout for header layout match union */
-	ret = mlx5dr_definer_find_best_hl_fit(ctx, match_definer, match_hl);
+	ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl);
 	if (ret) {
 		DR_LOG(ERR, "Failed to create match definer from header layout");
 		goto free_fc;
 	}
 
-	simple_free(hl_buff);
+	/* Find the range definer layout for match templates fcrs */
+	ret = mlx5dr_definer_find_best_range_fit(range_definer, matcher);
+	if (ret) {
+		DR_LOG(ERR, "Failed to create range definer from header layout");
+		goto free_fc;
+	}
+
+	simple_free(match_hl);
 	return 0;
 
 free_fc:
@@ -1996,7 +2129,7 @@  mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
 		if (mt[i].fc)
 			simple_free(mt[i].fc);
 
-	simple_free(hl_buff);
+	simple_free(match_hl);
 	return rte_errno;
 }
 
@@ -2005,7 +2138,8 @@  mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
 		     struct mlx5dr_definer_fc *fc,
 		     int fc_sz,
 		     struct rte_flow_item *items,
-		     struct mlx5dr_definer *layout)
+		     struct mlx5dr_definer *layout,
+		     bool bind_fc)
 {
 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
 	struct mlx5dr_definer *definer;
@@ -2021,10 +2155,12 @@  mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
 	memcpy(definer, layout, sizeof(*definer));
 
 	/* Align field copy array based on given layout */
-	ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
-	if (ret) {
-		DR_LOG(ERR, "Failed to bind field copy to definer");
-		goto free_definer;
+	if (bind_fc) {
+		ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
+		if (ret) {
+			DR_LOG(ERR, "Failed to bind field copy to definer");
+			goto free_definer;
+		}
 	}
 
 	/* Create the tag mask used for definer creation */
@@ -2067,7 +2203,8 @@  mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
 						     mt[i].fc,
 						     mt[i].fc_sz,
 						     mt[i].items,
-						     match_layout);
+						     match_layout,
+						     true);
 		if (!mt[i].definer) {
 			DR_LOG(ERR, "Failed to create match definer");
 			goto free_definers;
@@ -2091,6 +2228,58 @@  mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
 		mlx5dr_definer_free(matcher->mt[i].definer);
 }
 
+static int
+mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
+				  struct mlx5dr_matcher *matcher,
+				  struct mlx5dr_definer *range_layout)
+{
+	struct mlx5dr_match_template *mt = matcher->mt;
+	int i;
+
+	/* Create optional range definers */
+	for (i = 0; i < matcher->num_of_mt; i++) {
+		if (!mt[i].fcr_sz)
+			continue;
+
+		/* All must use range if requested */
+		if (i && !mt[i - 1].range_definer) {
+			DR_LOG(ERR, "Using range and non range templates is not allowed");
+			goto free_definers;
+		}
+
+		matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
+		/* Create definer without fcr binding, already binded */
+		mt[i].range_definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+							   mt[i].fcr,
+							   mt[i].fcr_sz,
+							   mt[i].items,
+							   range_layout,
+							   false);
+		if (!mt[i].range_definer) {
+			DR_LOG(ERR, "Failed to create match definer");
+			goto free_definers;
+		}
+	}
+	return 0;
+
+free_definers:
+	while (i--)
+		if (mt[i].range_definer)
+			mlx5dr_definer_free(mt[i].range_definer);
+
+	return rte_errno;
+}
+
+static void
+mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
+{
+	int i;
+
+	for (i = 0; i < matcher->num_of_mt; i++)
+		if (matcher->mt[i].range_definer)
+			mlx5dr_definer_free(matcher->mt[i].range_definer);
+}
+
 static int
 mlx5dr_definer_matcher_hash_init(struct mlx5dr_context *ctx,
 				 struct mlx5dr_matcher *matcher)
@@ -2169,13 +2358,13 @@  int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
 				struct mlx5dr_matcher *matcher)
 {
 	struct mlx5dr_definer match_layout = {0};
+	struct mlx5dr_definer range_layout = {0};
 	int ret, i;
 
 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
 		return 0;
 
-	/* Calculate header layout based on matcher items */
-	ret = mlx5dr_definer_calc_layout(matcher, &match_layout);
+	ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout);
 	if (ret) {
 		DR_LOG(ERR, "Failed to calculate matcher definer layout");
 		return ret;
@@ -2188,15 +2377,24 @@  int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
 		goto free_fc;
 	}
 
+	/* Calculate definers needed for range */
+	ret = mlx5dr_definer_matcher_range_init(ctx, matcher, &range_layout);
+	if (ret) {
+		DR_LOG(ERR, "Failed to init range definers");
+		goto uninit_match_definer;
+	}
+
 	/* Calculate partial hash definer */
 	ret = mlx5dr_definer_matcher_hash_init(ctx, matcher);
 	if (ret) {
 		DR_LOG(ERR, "Failed to init hash definer");
-		goto uninit_match_definer;
+		goto uninit_range_definer;
 	}
 
 	return 0;
 
+uninit_range_definer:
+	mlx5dr_definer_matcher_range_uninit(matcher);
 uninit_match_definer:
 	mlx5dr_definer_matcher_match_uninit(matcher);
 free_fc:
@@ -2214,6 +2412,7 @@  void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
 		return;
 
 	mlx5dr_definer_matcher_hash_uninit(matcher);
+	mlx5dr_definer_matcher_range_uninit(matcher);
 	mlx5dr_definer_matcher_match_uninit(matcher);
 
 	for (i = 0; i < matcher->num_of_mt; i++)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index a14a08838a..dd9a297007 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -5,11 +5,17 @@ 
 #ifndef MLX5DR_DEFINER_H_
 #define MLX5DR_DEFINER_H_
 
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
 /* Selectors based on match TAG */
 #define DW_SELECTORS_MATCH 6
 #define DW_SELECTORS_LIMITED 3
-#define DW_SELECTORS 9
-#define BYTE_SELECTORS 8
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
 
 enum mlx5dr_definer_fname {
 	MLX5DR_DEFINER_FNAME_ETH_SMAC_48_16_O,
@@ -112,6 +118,7 @@  enum mlx5dr_definer_fname {
 enum mlx5dr_definer_type {
 	MLX5DR_DEFINER_TYPE_MATCH,
 	MLX5DR_DEFINER_TYPE_JUMBO,
+	MLX5DR_DEFINER_TYPE_RANGE,
 };
 
 struct mlx5dr_definer_fc {
@@ -573,6 +580,11 @@  void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
 			       uint32_t fc_sz,
 			       uint8_t *tag);
 
+void mlx5dr_definer_create_tag_range(const struct rte_flow_item *items,
+				     struct mlx5dr_definer_fc *fc,
+				     uint32_t fc_sz,
+				     uint8_t *tag);
+
 int mlx5dr_definer_get_id(struct mlx5dr_definer *definer);
 
 int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c
index 0158a60ac3..5508cfe230 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.c
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c
@@ -360,6 +360,12 @@  static bool mlx5dr_matcher_supp_fw_wqe(struct mlx5dr_matcher *matcher)
 		return false;
 	}
 
+	if ((matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) &&
+	    !IS_BIT_SET(caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+		DR_LOG(INFO, "Extended match gen wqe RANGE format not supported");
+		return false;
+	}
+
 	if (!(caps->supp_type_gen_wqe & MLX5_GENERATE_WQE_TYPE_FLOW_UPDATE)) {
 		DR_LOG(ERR, "Gen WQE command not supporting GTA");
 		return false;
@@ -460,14 +466,20 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 		ste = &matcher->match_ste.ste;
 		ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
 
+		/* Add additional rows due to additional range STE */
+		if (mlx5dr_matcher_mt_is_range(mt))
+			ste->order++;
+
 		rtc_attr.log_size = attr->table.sz_row_log;
 		rtc_attr.log_depth = attr->table.sz_col_log;
 		rtc_attr.is_frst_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
+		rtc_attr.is_scnd_range = mlx5dr_matcher_mt_is_range(mt);
 		rtc_attr.miss_ft_id = matcher->end_ft->id;
 
 		if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_HASH) {
 			/* The usual Hash Table */
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
 			if (matcher->hash_definer) {
 				/* Specify definer_id_0 is used for hashing */
 				rtc_attr.fw_gen_wqe = true;
@@ -477,6 +489,16 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 			} else {
 				/* The first mt is used since all share the same definer */
 				rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer);
+
+				/* This is tricky, instead of passing two definers for
+				 * match and range, we specify that this RTC uses a hash
+				 * definer, this will allow us to use any range definer
+				 * since only first STE is used for hashing anyways.
+				 */
+				if (matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
+					rtc_attr.fw_gen_wqe = true;
+					rtc_attr.num_hash_definer = 1;
+				}
 			}
 		} else if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
@@ -751,7 +773,7 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 	struct mlx5dr_pool_attr pool_attr = {0};
 	int ret;
 
-	/* Calculate match and hash definers */
+	/* Calculate match, range and hash definers */
 	ret = mlx5dr_definer_matcher_init(ctx, matcher);
 	if (ret) {
 		DR_LOG(ERR, "Failed to set matcher templates with match definers");
@@ -772,6 +794,9 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_MATCHER_STE_POOL;
 	pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
 				 matcher->attr.table.sz_row_log;
+	/* Add additional rows due to additional range STE */
+	if (matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER)
+		pool_attr.alloc_log_sz++;
 	mlx5dr_matcher_set_pool_attr(&pool_attr, matcher);
 
 	matcher->match_ste.pool = mlx5dr_pool_create(ctx, &pool_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.h b/drivers/net/mlx5/hws/mlx5dr_matcher.h
index 8dadc0ee66..4759068ab4 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.h
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.h
@@ -23,8 +23,9 @@ 
 #define MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
 
 enum mlx5dr_matcher_flags {
-	MLX5DR_MATCHER_FLAGS_HASH_DEFINER	= 1 << 0,
-	MLX5DR_MATCHER_FLAGS_COLLISION		= 1 << 1,
+	MLX5DR_MATCHER_FLAGS_RANGE_DEFINER	= 1 << 0,
+	MLX5DR_MATCHER_FLAGS_HASH_DEFINER	= 1 << 1,
+	MLX5DR_MATCHER_FLAGS_COLLISION		= 1 << 2,
 };
 
 struct mlx5dr_match_template {
@@ -32,7 +33,9 @@  struct mlx5dr_match_template {
 	struct mlx5dr_definer *definer;
 	struct mlx5dr_definer *range_definer;
 	struct mlx5dr_definer_fc *fc;
+	struct mlx5dr_definer_fc *fcr;
 	uint16_t fc_sz;
+	uint16_t fcr_sz;
 	uint64_t item_flags;
 	uint8_t vport_item_id;
 	enum mlx5dr_match_template_flags flags;
@@ -80,10 +83,18 @@  mlx5dr_matcher_mt_is_jumbo(struct mlx5dr_match_template *mt)
 	return mlx5dr_definer_is_jumbo(mt->definer);
 }
 
+static inline bool
+mlx5dr_matcher_mt_is_range(struct mlx5dr_match_template *mt)
+{
+	return (!!mt->range_definer);
+}
+
 static inline bool mlx5dr_matcher_req_fw_wqe(struct mlx5dr_matcher *matcher)
 {
 	/* Currently HWS doesn't support hash different from match or range */
-	return unlikely(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER);
+	return unlikely(matcher->flags &
+			(MLX5DR_MATCHER_FLAGS_HASH_DEFINER |
+			 MLX5DR_MATCHER_FLAGS_RANGE_DEFINER));
 }
 
 int mlx5dr_matcher_conv_items_to_prm(uint64_t *match_buf,