[14/71] net/mlx5: replace use of fixed size rte_memcpy

Message ID 20240229225936.483472-15-stephen@networkplumber.org (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series replace use of fixed size rte_memcpy |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Stephen Hemminger Feb. 29, 2024, 10:58 p.m. UTC
  Automatically generated by devtools/cocci/rte_memcpy.cocci

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/mlx5/mlx5_devx.c     |  4 ++--
 drivers/net/mlx5/mlx5_flow.c     | 38 ++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_aso.c |  6 ++---
 drivers/net/mlx5/mlx5_flow_hw.c  | 16 +++++++-------
 drivers/net/mlx5/mlx5_rx.c       |  6 ++---
 drivers/net/mlx5/mlx5_rxtx_vec.c |  8 +++----
 6 files changed, 39 insertions(+), 39 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 9fa400fc48ef..6380a5c83cd3 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -512,7 +512,7 @@  mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
 		 * during queue setup.
 		 */
 		MLX5_ASSERT(hca_attr->hairpin_data_buffer_locked);
-		rte_memcpy(&locked_attr, &unlocked_attr, sizeof(locked_attr));
+		memcpy(&locked_attr, &unlocked_attr, sizeof(locked_attr));
 		locked_attr.hairpin_data_buffer_type =
 				MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_LOCKED_INTERNAL_BUFFER;
 		tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &locked_attr,
@@ -1289,7 +1289,7 @@  mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 		 */
 		MLX5_ASSERT(hca_attr->hairpin_sq_wq_in_host_mem);
 		MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0);
-		rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr));
+		memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr));
 		umem_size = MLX5_WQE_SIZE *
 			RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets);
 		umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2b2ae626186e..53c20592b91a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4495,8 +4495,8 @@  flow_action_handles_translate(struct rte_eth_dev *dev,
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
 				 NULL, "too many shared actions");
 		}
-		rte_memcpy(&handle[copied_n].action, &actions[n].conf,
-			   sizeof(actions[n].conf));
+		memcpy(&handle[copied_n].action, &actions[n].conf,
+		       sizeof(actions[n].conf));
 		handle[copied_n].index = n;
 		copied_n++;
 	}
@@ -5362,29 +5362,29 @@  flow_hairpin_split(struct rte_eth_dev *dev,
 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
-			rte_memcpy(actions_tx, actions,
+			memcpy(actions_tx, actions,
 			       sizeof(struct rte_flow_action));
 			actions_tx++;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
 			if (push_vlan) {
-				rte_memcpy(actions_tx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_tx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_tx++;
 			} else {
-				rte_memcpy(actions_rx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_rx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_rx++;
 			}
 			break;
 		case RTE_FLOW_ACTION_TYPE_COUNT:
 			if (encap) {
-				rte_memcpy(actions_tx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_tx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_tx++;
 			} else {
-				rte_memcpy(actions_rx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_rx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_rx++;
 			}
 			break;
@@ -5396,8 +5396,8 @@  flow_hairpin_split(struct rte_eth_dev *dev,
 				actions_tx++;
 				encap = 1;
 			} else {
-				rte_memcpy(actions_rx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_rx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_rx++;
 			}
 			break;
@@ -5408,14 +5408,14 @@  flow_hairpin_split(struct rte_eth_dev *dev,
 				       sizeof(struct rte_flow_action));
 				actions_tx++;
 			} else {
-				rte_memcpy(actions_rx, actions,
-					   sizeof(struct rte_flow_action));
+				memcpy(actions_rx, actions,
+				       sizeof(struct rte_flow_action));
 				actions_rx++;
 			}
 			break;
 		default:
-			rte_memcpy(actions_rx, actions,
-				   sizeof(struct rte_flow_action));
+			memcpy(actions_rx, actions,
+			       sizeof(struct rte_flow_action));
 			actions_rx++;
 			break;
 		}
@@ -5425,7 +5425,7 @@  flow_hairpin_split(struct rte_eth_dev *dev,
 	tag_action->type = (enum rte_flow_action_type)
 			   MLX5_RTE_FLOW_ACTION_TYPE_TAG;
 	actions_rx++;
-	rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
+	memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
 	actions_rx++;
 	set_tag = (void *)actions_rx;
 	*set_tag = (struct mlx5_rte_flow_action_set_tag) {
@@ -5435,7 +5435,7 @@  flow_hairpin_split(struct rte_eth_dev *dev,
 	MLX5_ASSERT(set_tag->id > REG_NON);
 	tag_action->conf = set_tag;
 	/* Create Tx item list. */
-	rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
+	memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
 	addr = (void *)&pattern_tx[2];
 	item = pattern_tx;
 	item->type = (enum rte_flow_item_type)
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index f3114434729e..49de92c675cd 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -1327,9 +1327,9 @@  mlx5_aso_ct_status_update(struct mlx5_aso_sq *sq, uint16_t num)
 		MLX5_ASSERT(ct);
 		MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_READY);
 		if (sq->elts[idx].query_data)
-			rte_memcpy(sq->elts[idx].query_data,
-				   (char *)((uintptr_t)sq->mr.addr + idx * 64),
-				   64);
+			memcpy(sq->elts[idx].query_data,
+			       (char *)((uintptr_t)sq->mr.addr + idx * 64),
+			       64);
 	}
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bcf43f545779..04b12bba95a9 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1970,7 +1970,7 @@  mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "translate modify_header: no memory for modify header context");
-	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
+	memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
 	if (mhdr->shared) {
 		uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
@@ -2823,9 +2823,9 @@  flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
 		return 0;
 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
-		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
+		memcpy(values, &mhdr_action->src.value, sizeof(values));
 	else
-		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
+		memcpy(values, mhdr_action->src.pvalue, sizeof(values));
 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
@@ -4467,7 +4467,7 @@  flow_hw_table_create(struct rte_eth_dev *dev,
 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					   "Failed to create template table");
 		else
-			rte_memcpy(error, &sub_error, sizeof(sub_error));
+			memcpy(error, &sub_error, sizeof(sub_error));
 	}
 	return NULL;
 }
@@ -7773,8 +7773,8 @@  flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
 	struct rte_flow_action actions_m[4] = { { 0 } };
 	unsigned int idx = 0;
 
-	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
-	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
+	memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
+	memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
 				   &set_tag_v, &set_tag_m);
@@ -8181,8 +8181,8 @@  flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
 	};
 
 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
-	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
-	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
+	memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
+	memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
 }
 
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 5bf1a679b2d8..e35f9abd2064 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -761,9 +761,9 @@  mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 					ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci);
 					if (ret != MLX5_CQE_STATUS_SW_OWN ||
 					    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-						rte_memcpy(&rxq->title_cqe,
-							   (const void *)(uintptr_t)cqe,
-							   sizeof(struct mlx5_cqe));
+						memcpy(&rxq->title_cqe,
+						       (const void *)(uintptr_t)cqe,
+						       sizeof(struct mlx5_cqe));
 				}
 			}
 		}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 2363d7ed27a7..c3bcd3ef16de 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -349,8 +349,8 @@  rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
 		if (ret != MLX5_CQE_STATUS_SW_OWN ||
 		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+			memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+			       sizeof(struct rte_mbuf));
 	}
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
@@ -499,8 +499,8 @@  rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
 		if (ret != MLX5_CQE_STATUS_SW_OWN ||
 		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+			memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+			       sizeof(struct rte_mbuf));
 	}
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {