@@ -560,7 +560,7 @@ cn10k_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
/* Set model info */
memset(info, 0, sizeof(struct rte_ml_model_info));
- rte_memcpy(info->name, metadata->model.name, MRVL_ML_MODEL_NAME_LEN);
+ memcpy(info->name, metadata->model.name, MRVL_ML_MODEL_NAME_LEN);
snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", metadata->model.version[0],
metadata->model.version[1], metadata->model.version[2],
metadata->model.version[3]);
@@ -579,7 +579,8 @@ cn10k_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
/* Set input info */
for (i = 0; i < info->nb_inputs; i++) {
- rte_memcpy(input[i].name, io_info->input[i].name, MRVL_ML_INPUT_NAME_LEN);
+ memcpy(input[i].name, io_info->input[i].name,
+ MRVL_ML_INPUT_NAME_LEN);
input[i].nb_dims = io_info->input[i].nb_dims;
input[i].shape = &io_info->input[i].shape[0];
input[i].type = io_info->input[i].qtype;
@@ -590,7 +591,8 @@ cn10k_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
/* Set output info */
for (i = 0; i < info->nb_outputs; i++) {
- rte_memcpy(output[i].name, io_info->output[i].name, MRVL_ML_INPUT_NAME_LEN);
+ memcpy(output[i].name, io_info->output[i].name,
+ MRVL_ML_INPUT_NAME_LEN);
output[i].nb_dims = io_info->output[i].nb_dims;
output[i].shape = &io_info->output[i].shape[0];
output[i].type = io_info->output[i].qtype;
@@ -668,11 +668,13 @@ cn10k_ml_layer_load(void *device, uint16_t model_id, const char *layer_name, uin
}
/* Copy metadata to internal buffer */
- rte_memcpy(&layer->glow.metadata, buffer, sizeof(struct cn10k_ml_model_metadata));
+ memcpy(&layer->glow.metadata, buffer,
+ sizeof(struct cn10k_ml_model_metadata));
cn10k_ml_model_metadata_update(&layer->glow.metadata);
/* Set layer name */
- rte_memcpy(layer->name, layer->glow.metadata.model.name, MRVL_ML_MODEL_NAME_LEN);
+ memcpy(layer->name, layer->glow.metadata.model.name,
+ MRVL_ML_MODEL_NAME_LEN);
/* Enable support for batch_size of 256 */
if (layer->glow.metadata.model.batch_size == 0)
@@ -748,11 +750,12 @@ cn10k_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *
model->subtype = ML_CNXK_MODEL_SUBTYPE_GLOW_MRVL;
/* Copy metadata to internal buffer */
- rte_memcpy(&model->glow.metadata, params->addr, sizeof(struct cn10k_ml_model_metadata));
+ memcpy(&model->glow.metadata, params->addr,
+ sizeof(struct cn10k_ml_model_metadata));
cn10k_ml_model_metadata_update(&model->glow.metadata);
/* Set model name */
- rte_memcpy(model->name, (char *)model->glow.metadata.model.name, 64);
+ memcpy(model->name, (char *)model->glow.metadata.model.name, 64);
/* Enable support for batch_size of 256 */
if (model->glow.metadata.model.batch_size == 0)
@@ -1400,7 +1400,7 @@ cnxk_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,
}
info = (struct rte_ml_model_info *)model->info;
- rte_memcpy(model_info, info, sizeof(struct rte_ml_model_info));
+ memcpy(model_info, info, sizeof(struct rte_ml_model_info));
model_info->input_info = info->input_info;
model_info->output_info = info->output_info;
@@ -310,7 +310,7 @@ mvtvm_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
goto tvm_mrvl_model;
metadata = &model->mvtvm.metadata;
- rte_memcpy(info->name, metadata->model.name, TVMDP_NAME_STRLEN);
+ memcpy(info->name, metadata->model.name, TVMDP_NAME_STRLEN);
snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", metadata->model.version[0],
metadata->model.version[1], metadata->model.version[2],
metadata->model.version[3]);
@@ -327,7 +327,8 @@ mvtvm_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
/* Set input info */
for (i = 0; i < info->nb_inputs; i++) {
- rte_memcpy(input[i].name, metadata->input[i].name, MRVL_ML_INPUT_NAME_LEN);
+ memcpy(input[i].name, metadata->input[i].name,
+ MRVL_ML_INPUT_NAME_LEN);
input[i].nb_dims = metadata->input[i].ndim;
input[i].shape = &model->mvtvm.info.input[i].shape[0];
input[i].type = model->mvtvm.info.input[i].qtype;
@@ -338,7 +339,8 @@ mvtvm_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mo
/* Set output info */
for (i = 0; i < info->nb_outputs; i++) {
- rte_memcpy(output[i].name, metadata->output[i].name, MRVL_ML_OUTPUT_NAME_LEN);
+ memcpy(output[i].name, metadata->output[i].name,
+ MRVL_ML_OUTPUT_NAME_LEN);
output[i].nb_dims = metadata->output[i].ndim;
output[i].shape = &model->mvtvm.info.output[i].shape[0];
output[i].type = model->mvtvm.info.output[i].qtype;
@@ -194,7 +194,7 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *
/* Copy mod.so */
model->mvtvm.object.so.addr = mz->addr;
model->mvtvm.object.so.size = object[0].size;
- rte_memcpy(model->mvtvm.object.so.name, object[0].name, TVMDP_NAME_STRLEN);
+ memcpy(model->mvtvm.object.so.name, object[0].name, TVMDP_NAME_STRLEN);
rte_memcpy(model->mvtvm.object.so.addr, object[0].buffer, object[0].size);
rte_free(object[0].buffer);
@@ -203,7 +203,8 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *
RTE_PTR_ADD(model->mvtvm.object.so.addr,
RTE_ALIGN_CEIL(model->mvtvm.object.so.size, RTE_CACHE_LINE_MIN_SIZE));
model->mvtvm.object.json.size = object[1].size;
- rte_memcpy(model->mvtvm.object.json.name, object[1].name, TVMDP_NAME_STRLEN);
+ memcpy(model->mvtvm.object.json.name, object[1].name,
+ TVMDP_NAME_STRLEN);
rte_memcpy(model->mvtvm.object.json.addr, object[1].buffer, object[1].size);
rte_free(object[1].buffer);
@@ -212,7 +213,8 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *
RTE_PTR_ADD(model->mvtvm.object.json.addr,
RTE_ALIGN_CEIL(model->mvtvm.object.json.size, RTE_CACHE_LINE_MIN_SIZE));
model->mvtvm.object.params.size = object[2].size;
- rte_memcpy(model->mvtvm.object.params.name, object[2].name, TVMDP_NAME_STRLEN);
+ memcpy(model->mvtvm.object.params.name, object[2].name,
+ TVMDP_NAME_STRLEN);
rte_memcpy(model->mvtvm.object.params.addr, object[2].buffer, object[2].size);
rte_free(object[2].buffer);