[v5,3/6] net/mlx5: fix structure passing method in function call

Message ID 20201116140224.8464-4-getelson@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series restore tunnel offload functionality in mlx5 |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Gregory Etelson Nov. 16, 2020, 2:02 p.m. UTC
  Tunnel offload implementation introduced 64 bit-field flow_grp_info
structure. Since the structure size is 64 bits, the code passed that
type by value in function calls.

The patch changes that structure passing method to reference.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 20 +++++++++++---------
 drivers/net/mlx5/mlx5_flow.h    |  4 ++--
 drivers/net/mlx5/mlx5_flow_dv.c | 10 +++++-----
 3 files changed, 18 insertions(+), 16 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c8152cb8b1..bc93aa6377 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6699,9 +6699,11 @@  mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
-		    struct flow_grp_info grp_info, struct rte_flow_error *error)
+		    const struct flow_grp_info *grp_info,
+		    struct rte_flow_error *error)
 {
-	if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+	if (grp_info->transfer && grp_info->external &&
+	    grp_info->fdb_def_rule) {
 		if (group == UINT32_MAX)
 			return rte_flow_error_set
 						(error, EINVAL,
@@ -6758,25 +6760,25 @@  int
 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			 const struct mlx5_flow_tunnel *tunnel,
 			 uint32_t group, uint32_t *table,
-			 struct flow_grp_info grp_info,
+			 const struct flow_grp_info *grp_info,
 			 struct rte_flow_error *error)
 {
 	int ret;
 	bool standard_translation;
 
-	if (!grp_info.skip_scale && grp_info.external &&
+	if (!grp_info->skip_scale && grp_info->external &&
 	    group < MLX5_MAX_TABLES_EXTERNAL)
 		group *= MLX5_FLOW_TABLE_FACTOR;
 	if (is_tunnel_offload_active(dev)) {
-		standard_translation = !grp_info.external ||
-					grp_info.std_tbl_fix;
+		standard_translation = !grp_info->external ||
+					grp_info->std_tbl_fix;
 	} else {
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
 		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
-		dev->data->port_id, group, grp_info.transfer,
-		grp_info.external, grp_info.fdb_def_rule,
+		dev->data->port_id, group, grp_info->transfer,
+		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
 	if (standard_translation)
 		ret = flow_group_to_table(dev->data->port_id, group, table,
@@ -7273,7 +7275,7 @@  flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
 	miss_attr.group = jump_data->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
+				       &flow_table, &grp_info, error);
 	if (ret)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index fbc6173fcb..c33c0fee7c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1273,8 +1273,8 @@  tunnel_use_standard_attr_group_translate
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			     const struct mlx5_flow_tunnel *tunnel,
 			     uint32_t group, uint32_t *table,
-			     struct flow_grp_info flags,
-				 struct rte_flow_error *error);
+			     const struct flow_grp_info *flags,
+			     struct rte_flow_error *error);
 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 				     int tunnel, uint64_t layer_types,
 				     uint64_t hash_fields);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 62d9ca9ffb..25ab9adee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3935,7 +3935,7 @@  flow_dv_validate_action_jump(struct rte_eth_dev *dev,
 	target_group =
 		((const struct rte_flow_action_jump *)action->conf)->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	if (attributes->group == target_group &&
@@ -5103,7 +5103,7 @@  static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
 			    const struct mlx5_flow_tunnel *tunnel,
 			    const struct rte_flow_attr *attributes,
-			    struct flow_grp_info grp_info,
+			    const struct flow_grp_info *grp_info,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -5258,7 +5258,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	}
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
-	ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
 	if (ret < 0)
 		return ret;
 	is_root = (uint64_t)ret;
@@ -9597,7 +9597,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	dev_flow->dv.group = table;
@@ -9944,7 +9944,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 			ret = mlx5_flow_group_to_table(dev, tunnel,
 						       jump_group,
 						       &table,
-						       grp_info, error);
+						       &grp_info, error);
 			if (ret)
 				return ret;
 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,