[v2,19/37] ml/cnxk: enable support to get model information

Message ID 20221208201806.21893-20-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Implementation of ML CNXK driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Srikanth Yalavarthi Dec. 8, 2022, 8:17 p.m. UTC
  Added driver functions to get model information. Added
internal functions to set and get model info.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_model.c | 54 ++++++++++++++++++++++++++++++++
 drivers/ml/cnxk/cn10k_ml_model.h |  9 ++++++
 drivers/ml/cnxk/cn10k_ml_ops.c   | 37 ++++++++++++++++++++--
 3 files changed, 98 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index 11b52af68c..19595656ae 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -340,3 +340,57 @@  cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, int16_t model_id, uin
 
 	return 0;
 }
+
+void
+cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)
+{
+	struct rte_ml_model_info *info;
+	struct rte_ml_io_info *output;
+	struct rte_ml_io_info *input;
+	uint8_t i;
+
+	info = PLT_PTR_CAST(model->info);
+	input = PLT_PTR_ADD(info, sizeof(struct rte_ml_model_info));
+	output =
+		PLT_PTR_ADD(input, model->metadata.model.num_input * sizeof(struct rte_ml_io_info));
+
+	/* Set model info */
+	memset(info, 0, sizeof(struct rte_ml_model_info));
+	memcpy(info->name, model->metadata.model.name, MRVL_ML_MODEL_NAME_LEN);
+	snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", model->metadata.model.version[0],
+		 model->metadata.model.version[1], model->metadata.model.version[2],
+		 model->metadata.model.version[3]);
+	info->model_id = model->model_id;
+	info->device_id = dev->data->dev_id;
+	info->batch_size = model->batch_size;
+	info->nb_inputs = model->metadata.model.num_input;
+	info->input_info = input;
+	info->nb_outputs = model->metadata.model.num_output;
+	info->output_info = output;
+	info->wb_size = model->metadata.weights_bias.file_size;
+
+	/* Set input info */
+	for (i = 0; i < info->nb_inputs; i++) {
+		memcpy(input[i].name, model->metadata.input[i].input_name, MRVL_ML_INPUT_NAME_LEN);
+		input[i].dtype = model->metadata.input[i].input_type;
+		input[i].qtype = model->metadata.input[i].model_input_type;
+		input[i].shape.format = model->metadata.input[i].shape.format;
+		input[i].shape.w = model->metadata.input[i].shape.w;
+		input[i].shape.x = model->metadata.input[i].shape.x;
+		input[i].shape.y = model->metadata.input[i].shape.y;
+		input[i].shape.z = model->metadata.input[i].shape.z;
+	}
+
+	/* Set output info */
+	for (i = 0; i < info->nb_outputs; i++) {
+		memcpy(output[i].name, model->metadata.output[i].output_name,
+		       MRVL_ML_OUTPUT_NAME_LEN);
+		output[i].dtype = model->metadata.output[i].output_type;
+		output[i].qtype = model->metadata.output[i].model_output_type;
+		output[i].shape.format = RTE_ML_IO_FORMAT_1D;
+		output[i].shape.w = model->metadata.output[i].size;
+		output[i].shape.x = 1;
+		output[i].shape.y = 1;
+		output[i].shape.z = 1;
+	}
+}
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index 64160032c1..2372ac9b72 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -425,6 +425,14 @@  struct cn10k_ml_model {
 	/* Tile and memory information object */
 	struct cn10k_ml_ocm_model_map model_mem_map;
 
+	/* Internal model information structure
+	 * Size of the buffer = sizeof(struct rte_ml_model_info)
+	 *                    + num_inputs * sizeof(struct rte_ml_io_info)
+	 *                    + num_outputs * sizeof(struct rte_ml_io_info).
+	 * Structures would be arranged in the same order in the buffer.
+	 */
+	uint8_t *info;
+
 	/* Model lock, used to update model state */
 	plt_spinlock_t lock;
 
@@ -441,5 +449,6 @@  void cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer,
 				uint8_t *base_dma_addr);
 int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, int16_t model_id, uint8_t *buffer,
 				   uint16_t *wb_pages, uint16_t *scratch_pages);
+void cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model);
 
 #endif /* _CN10K_ML_MODEL_H_ */
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index a0b0fc7e1f..f26cfcfd06 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -506,6 +506,7 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	char str[RTE_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
 	size_t model_data_size;
+	size_t model_info_size;
 	uint8_t *base_dma_addr;
 	uint16_t scratch_pages;
 	uint16_t wb_pages;
@@ -544,8 +545,13 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	model_data_size = metadata->init_model.file_size + metadata->main_model.file_size +
 			  metadata->finish_model.file_size + metadata->weights_bias.file_size;
 	model_data_size = PLT_ALIGN_CEIL(model_data_size, ML_CN10K_ALIGN_SIZE);
+	model_info_size = sizeof(struct rte_ml_model_info) +
+			  metadata->model.num_input * sizeof(struct rte_ml_io_info) +
+			  metadata->model.num_output * sizeof(struct rte_ml_io_info);
+	model_info_size = PLT_ALIGN_CEIL(model_info_size, ML_CN10K_ALIGN_SIZE);
+
 	mz_size = PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
-		  2 * model_data_size +
+		  2 * model_data_size + model_info_size +
 		  PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE);
 
 	/* Allocate memzone for model object and model data */
@@ -559,6 +565,9 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	model = mz->addr;
 	model->mldev = mldev;
 	model->model_id = idx;
+	model->info = PLT_PTR_ADD(
+		mz->addr, PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
+				  2 * model_data_size);
 
 	memcpy(&model->metadata, params->addr, sizeof(struct cn10k_ml_model_metadata));
 	cn10k_ml_model_metadata_update(&model->metadata);
@@ -587,7 +596,10 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	/* Set slow-path request address and state */
 	model->req = PLT_PTR_ADD(
 		mz->addr, PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
-				  2 * model_data_size);
+				  2 * model_data_size + model_info_size);
+
+	/* Set model info */
+	cn10k_ml_model_info_set(dev, model);
 
 	plt_spinlock_init(&model->lock);
 	model->state = ML_CN10K_MODEL_STATE_LOADED;
@@ -877,6 +889,26 @@  cn10k_ml_model_stop(struct rte_ml_dev *dev, int16_t model_id)
 	return ret;
 }
 
+static int
+cn10k_ml_model_info_get(struct rte_ml_dev *dev, int16_t model_id,
+			struct rte_ml_model_info *model_info)
+{
+	struct cn10k_ml_model *model;
+
+	model = dev->data->models[model_id];
+
+	if (model == NULL) {
+		plt_err("Invalid model_id = %d", model_id);
+		return -EINVAL;
+	}
+
+	memcpy(model_info, model->info, sizeof(struct rte_ml_model_info));
+	model_info->input_info = ((struct rte_ml_model_info *)model->info)->input_info;
+	model_info->output_info = ((struct rte_ml_model_info *)model->info)->output_info;
+
+	return 0;
+}
+
 struct rte_ml_dev_ops cn10k_ml_ops = {
 	/* Device control ops */
 	.dev_info_get = cn10k_ml_dev_info_get,
@@ -894,4 +926,5 @@  struct rte_ml_dev_ops cn10k_ml_ops = {
 	.model_unload = cn10k_ml_model_unload,
 	.model_start = cn10k_ml_model_start,
 	.model_stop = cn10k_ml_model_stop,
+	.model_info_get = cn10k_ml_model_info_get,
 };