[v5,20/39] ml/cnxk: enable support to get model information

Message ID 20230207160719.1307-21-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Implementation of ML CNXK driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Srikanth Yalavarthi Feb. 7, 2023, 4:07 p.m. UTC
  Added driver functions to get model information. Added
internal functions to set and get model info.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_model.c | 55 ++++++++++++++++++++++++++++++++
 drivers/ml/cnxk/cn10k_ml_model.h |  9 ++++++
 drivers/ml/cnxk/cn10k_ml_ops.c   | 37 ++++++++++++++++++---
 3 files changed, 97 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index 69d6306104..0ded355d81 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -356,3 +356,58 @@  cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui
 
 	return 0;
 }
+
+void
+cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)
+{
+	struct rte_ml_model_info *info;
+	struct rte_ml_io_info *output;
+	struct rte_ml_io_info *input;
+	uint8_t i;
+
+	info = PLT_PTR_CAST(model->info);
+	input = PLT_PTR_ADD(info, sizeof(struct rte_ml_model_info));
+	output =
+		PLT_PTR_ADD(input, model->metadata.model.num_input * sizeof(struct rte_ml_io_info));
+
+	/* Set model info */
+	memset(info, 0, sizeof(struct rte_ml_model_info));
+	rte_memcpy(info->name, model->metadata.model.name, MRVL_ML_MODEL_NAME_LEN);
+	snprintf(info->version, RTE_ML_STR_MAX, "%u.%u.%u.%u", model->metadata.model.version[0],
+		 model->metadata.model.version[1], model->metadata.model.version[2],
+		 model->metadata.model.version[3]);
+	info->model_id = model->model_id;
+	info->device_id = dev->data->dev_id;
+	info->batch_size = model->batch_size;
+	info->nb_inputs = model->metadata.model.num_input;
+	info->input_info = input;
+	info->nb_outputs = model->metadata.model.num_output;
+	info->output_info = output;
+	info->wb_size = model->metadata.weights_bias.file_size;
+
+	/* Set input info */
+	for (i = 0; i < info->nb_inputs; i++) {
+		rte_memcpy(input[i].name, model->metadata.input[i].input_name,
+			   MRVL_ML_INPUT_NAME_LEN);
+		input[i].dtype = model->metadata.input[i].input_type;
+		input[i].qtype = model->metadata.input[i].model_input_type;
+		input[i].shape.format = model->metadata.input[i].shape.format;
+		input[i].shape.w = model->metadata.input[i].shape.w;
+		input[i].shape.x = model->metadata.input[i].shape.x;
+		input[i].shape.y = model->metadata.input[i].shape.y;
+		input[i].shape.z = model->metadata.input[i].shape.z;
+	}
+
+	/* Set output info */
+	for (i = 0; i < info->nb_outputs; i++) {
+		rte_memcpy(output[i].name, model->metadata.output[i].output_name,
+			   MRVL_ML_OUTPUT_NAME_LEN);
+		output[i].dtype = model->metadata.output[i].output_type;
+		output[i].qtype = model->metadata.output[i].model_output_type;
+		output[i].shape.format = RTE_ML_IO_FORMAT_1D;
+		output[i].shape.w = model->metadata.output[i].size;
+		output[i].shape.x = 1;
+		output[i].shape.y = 1;
+		output[i].shape.z = 1;
+	}
+}
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index 355915deeb..75990fe1e4 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -422,6 +422,14 @@  struct cn10k_ml_model {
 	/* Tile and memory information object */
 	struct cn10k_ml_ocm_model_map model_mem_map;
 
+	/* Internal model information structure
+	 * Size of the buffer = sizeof(struct rte_ml_model_info)
+	 *                    + num_inputs * sizeof(struct rte_ml_io_info)
+	 *                    + num_outputs * sizeof(struct rte_ml_io_info).
+	 * Structures would be arranged in the same order in the buffer.
+	 */
+	uint8_t *info;
+
 	/* Spinlock, used to update model state */
 	plt_spinlock_t lock;
 
@@ -438,5 +446,6 @@  void cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer,
 				uint8_t *base_dma_addr);
 int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, uint8_t *buffer,
 				   uint16_t *wb_pages, uint16_t *scratch_pages);
+void cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model);
 
 #endif /* _CN10K_ML_MODEL_H_ */
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index 77d3728d8d..ad9b3dfd21 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -506,6 +506,7 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	char str[RTE_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
 	size_t model_data_size;
+	size_t model_info_size;
 	uint8_t *base_dma_addr;
 	uint16_t scratch_pages;
 	uint16_t wb_pages;
@@ -544,8 +545,13 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	model_data_size = metadata->init_model.file_size + metadata->main_model.file_size +
 			  metadata->finish_model.file_size + metadata->weights_bias.file_size;
 	model_data_size = PLT_ALIGN_CEIL(model_data_size, ML_CN10K_ALIGN_SIZE);
+	model_info_size = sizeof(struct rte_ml_model_info) +
+			  metadata->model.num_input * sizeof(struct rte_ml_io_info) +
+			  metadata->model.num_output * sizeof(struct rte_ml_io_info);
+	model_info_size = PLT_ALIGN_CEIL(model_info_size, ML_CN10K_ALIGN_SIZE);
+
 	mz_size = PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
-		  2 * model_data_size +
+		  2 * model_data_size + model_info_size +
 		  PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE);
 
 	/* Allocate memzone for model object and model data */
@@ -585,10 +591,12 @@  cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	model->model_mem_map.wb_pages = wb_pages;
 	model->model_mem_map.scratch_pages = scratch_pages;
 
+	/* Set model info */
+	model->info = PLT_PTR_ADD(base_dma_addr, 2 * model_data_size);
+	cn10k_ml_model_info_set(dev, model);
+
 	/* Set slow-path request address and state */
-	model->req = PLT_PTR_ADD(
-		mz->addr, PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_model), ML_CN10K_ALIGN_SIZE) +
-				  2 * model_data_size);
+	model->req = PLT_PTR_ADD(model->info, model_info_size);
 
 	plt_spinlock_init(&model->lock);
 	model->state = ML_CN10K_MODEL_STATE_LOADED;
@@ -877,6 +885,26 @@  cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 	return ret;
 }
 
+static int
+cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,
+			struct rte_ml_model_info *model_info)
+{
+	struct cn10k_ml_model *model;
+
+	model = dev->data->models[model_id];
+
+	if (model == NULL) {
+		plt_err("Invalid model_id = %u", model_id);
+		return -EINVAL;
+	}
+
+	rte_memcpy(model_info, model->info, sizeof(struct rte_ml_model_info));
+	model_info->input_info = ((struct rte_ml_model_info *)model->info)->input_info;
+	model_info->output_info = ((struct rte_ml_model_info *)model->info)->output_info;
+
+	return 0;
+}
+
 struct rte_ml_dev_ops cn10k_ml_ops = {
 	/* Device control ops */
 	.dev_info_get = cn10k_ml_dev_info_get,
@@ -894,4 +922,5 @@  struct rte_ml_dev_ops cn10k_ml_ops = {
 	.model_unload = cn10k_ml_model_unload,
 	.model_start = cn10k_ml_model_start,
 	.model_stop = cn10k_ml_model_stop,
+	.model_info_get = cn10k_ml_model_info_get,
 };