diff mbox series

[v2] net/mlx5: enlarge maximal flow priority value

Message ID 1610433600-171600-1-git-send-email-dongzhou@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers show
Series [v2] net/mlx5: enlarge maximal flow priority value | expand

Checks

Context Check Description
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/checkpatch success coding style OK

Commit Message

Dong Zhou Jan. 12, 2021, 6:40 a.m. UTC
Currently, the maximal flow priority in non-root table to user
is 4, it's not enough for user to do some flow match by priority,
such as LPM, for one IPV4 address, we need 32 priorities for each
bit of 32 mask length.

PMD will manage 3 sub-priorities per user priority according to L2,
L3 and L4. The internal priority is 16 bits, user can use priorities
from 0 - 21843.

Those enlarged flow priorities are only used for ingress or egress
flow groups greater than 0 and for any transfer flow group.

Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
v2:
* fix coding style issues, 'prority' to 'priority'.
---
 doc/guides/nics/mlx5.rst               |  2 ++
 doc/guides/rel_notes/release_21_02.rst |  7 ++++
 drivers/net/mlx5/mlx5_flow.c           | 62 +++++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow.h           | 16 +++++++--
 drivers/net/mlx5/mlx5_flow_dv.c        | 14 +++-----
 drivers/net/mlx5/mlx5_flow_verbs.c     |  9 +++--
 6 files changed, 91 insertions(+), 19 deletions(-)

Comments

Raslan Darawsheh Jan. 20, 2021, 4:11 p.m. UTC | #1
Hi,

> -----Original Message-----
> From: Dong Zhou <dongzhou@nvidia.com>
> Sent: Tuesday, January 12, 2021 8:40 AM
> To: Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2] net/mlx5: enlarge maximal flow priority value
> 
> Currently, the maximal flow priority in non-root table to user
> is 4, it's not enough for user to do some flow match by priority,
> such as LPM, for one IPV4 address, we need 32 priorities for each
> bit of 32 mask length.
> 
> PMD will manage 3 sub-priorities per user priority according to L2,
> L3 and L4. The internal priority is 16 bits, user can use priorities
> from 0 - 21843.
> 
> Those enlarged flow priorities are only used for ingress or egress
> flow groups greater than 0 and for any transfer flow group.
> 
> Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
> v2:
> * fix coding style issues, 'prority' to 'priority'.
> ---
Patch rebased and applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
diff mbox series

Patch

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 3bda0f8..8e25ccd 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -98,6 +98,8 @@  Features
 - Hardware LRO.
 - Hairpin.
 - Multiple-thread flow insertion.
+- 21844 flow priorities for ingress or egress flow groups greater than 0 and for any transfer
+  flow group.
 
 Limitations
 -----------
diff --git a/doc/guides/rel_notes/release_21_02.rst b/doc/guides/rel_notes/release_21_02.rst
index 706cbf8..559aed7 100644
--- a/doc/guides/rel_notes/release_21_02.rst
+++ b/doc/guides/rel_notes/release_21_02.rst
@@ -55,6 +55,13 @@  New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Mellanox mlx5 driver.**
+
+  Updated the Mellanox mlx5 driver with new features and improvements, including:
+
+  * Enlarge the number of flow priorities to 21844(0 - 21843) for ingress or egress
+    flow groups greater than 0 and for any transfer flow group.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2a4073c..7c0e985 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -861,6 +861,58 @@  struct mlx5_flow_tunnel_info {
 }
 
 /**
+ * Get the lowest priority.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ *   Pointer to device flow rule attributes.
+ *
+ * @return
+ *   The value of lowest priority of flow.
+ */
+uint32_t
+mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+			  const struct rte_flow_attr *attr)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!attr->group && !attr->transfer)
+		return priv->config.flow_prio - 2;
+	return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
+}
+
+/**
+ * Calculate matcher priority of the flow.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attr
+ *   Pointer to device flow rule attributes.
+ * @param[in] subpriority
+ *   The priority based on the items.
+ * @return
+ *   The matcher priority of the flow.
+ */
+uint16_t
+mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+			  const struct rte_flow_attr *attr,
+			  uint32_t subpriority)
+{
+	uint16_t priority = (uint16_t)attr->priority;
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!attr->group && !attr->transfer) {
+		if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+			priority = priv->config.flow_prio - 1;
+		return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+	}
+	if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
+		priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
+	return priority * 3 + subpriority;
+}
+
+/**
  * Verify the @p item specifications (spec, last, mask) are compatible with the
  * NIC capabilities.
  *
@@ -1674,7 +1726,7 @@  struct mlx5_flow_tunnel_info {
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
 					  NULL, "groups is not supported");
-	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
 	    attributes->priority >= priority_max)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -3687,7 +3739,7 @@  struct mlx5_hlist_entry *
 		};
 	} else {
 		/* Default rule, wildcard match. */
-		attr.priority = MLX5_FLOW_PRIO_RSVD;
+		attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
 		items[0] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_END,
 		};
@@ -5456,7 +5508,7 @@  struct mlx5_hlist_entry *
 	 */
 	if (external || dev->data->dev_started ||
 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
-	     attr->priority == MLX5_FLOW_PRIO_RSVD)) {
+	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
 		ret = flow_drv_apply(dev, flow, error);
 		if (ret < 0)
 			goto error;
@@ -5970,7 +6022,7 @@  struct mlx5_flow_workspace*
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct rte_flow_attr attr = {
 		.ingress = 1,
-		.priority = MLX5_FLOW_PRIO_RSVD,
+		.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
 	};
 	struct rte_flow_item items[] = {
 		{
@@ -6855,7 +6907,7 @@  struct mlx5_meter_domains_infos *
 	for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
 		struct rte_flow_attr attr = {
 			.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
-			.priority = MLX5_FLOW_PRIO_RSVD,
+			.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
 			.ingress = 1,
 		};
 		struct rte_flow_item items[] = {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ee85c9d..b6b2123 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -266,8 +266,15 @@  enum mlx5_feature_name {
 /* UDP port numbers for GENEVE. */
 #define MLX5_UDP_PORT_GENEVE 6081
 
-/* Priority reserved for default flows. */
-#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+/* Lowest priority indicator. */
+#define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
+
+/*
+ * Max priority for ingress\egress flow groups
+ * greater than 0 and for any transfer flow group.
+ * From user configation: 0 - 21843.
+ */
+#define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
 
 /*
  * Number of sub priorities.
@@ -1284,6 +1291,11 @@  uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				   uint32_t subpriority);
+uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
+					const struct rte_flow_attr *attr);
+uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
+				     const struct rte_flow_attr *attr,
+				     uint32_t subpriority);
 int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 				     enum mlx5_feature_name feature,
 				     uint32_t id,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e4736ee..a54ee3c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5150,7 +5150,7 @@  struct mlx5_hlist_entry *
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	uint32_t priority_max = priv->config.flow_prio - 1;
+	uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
 	int ret = 0;
 
 #ifndef HAVE_MLX5DV_DR
@@ -5171,8 +5171,8 @@  struct mlx5_hlist_entry *
 	if (!table)
 		ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
 #endif
-	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
-	    attributes->priority >= priority_max)
+	if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
+	    attributes->priority > lowest_priority)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
 					  NULL,
@@ -9665,7 +9665,6 @@  struct mlx5_cache_entry *
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint64_t action_flags = 0;
-	uint64_t priority = attr->priority;
 	struct mlx5_flow_dv_matcher matcher = {
 		.mask = {
 			.size = sizeof(matcher.mask.buf) -
@@ -9736,8 +9735,6 @@  struct mlx5_cache_entry *
 	dev_flow->dv.group = table;
 	if (attr->transfer)
 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
-	if (priority == MLX5_FLOW_PRIO_RSVD)
-		priority = dev_conf->flow_prio - 1;
 	/* number of actions must be set to 0 in case of dirty stack. */
 	mhdr_res->actions_num = 0;
 	if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
@@ -10591,9 +10588,8 @@  struct mlx5_cache_entry *
 	/* Register matcher. */
 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
 				    matcher.mask.size);
-	matcher.priority = mlx5_os_flow_adjust_priority(dev,
-							priority,
-							matcher.priority);
+	matcher.priority = mlx5_get_matcher_priority(dev, attr,
+					matcher.priority);
 	/* reserved field no needs to be set to 0 here. */
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 2d43819..9a2af64 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -109,8 +109,11 @@  struct ibv_spec_header {
 			dev->data->port_id, priority);
 		return -rte_errno;
 	}
-	DRV_LOG(INFO, "port %u flow maximum priority: %d",
-		dev->data->port_id, priority);
+	DRV_LOG(INFO, "port %u supported flow priorities:"
+		" 0-%d for ingress or egress root table,"
+		" 0-%d for non-root table or transfer root table.",
+		dev->data->port_id, priority - 2,
+		MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
 	return priority;
 }
 
@@ -1710,7 +1713,7 @@  struct ibv_spec_header {
 
 	MLX5_ASSERT(wks);
 	rss_desc = &wks->rss_desc;
-	if (priority == MLX5_FLOW_PRIO_RSVD)
+	if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
 		priority = priv->config.flow_prio - 1;
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		int ret;