[v1,06/12] mldev: support input and output data handling

Message ID 20221114120238.2143832-7-jerinj@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Thomas Monjalon
Headers
Series mldev: introduce machine learning device library |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jerin Jacob Kollanukkaran Nov. 14, 2022, 12:02 p.m. UTC
  From: Srikanth Yalavarthi <syalavarthi@marvell.com>

Added RTE library functions to handle model input and
output data. The APIs can be used to get the size of I/O
buffers, quantize input data and dequantize output data.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
---
 lib/mldev/rte_mldev.c      |  94 ++++++++++++++++++++++++++++++++
 lib/mldev/rte_mldev_core.h | 106 +++++++++++++++++++++++++++++++++++++
 lib/mldev/version.map      |   4 ++
 3 files changed, 204 insertions(+)
  

Patch

diff --git a/lib/mldev/rte_mldev.c b/lib/mldev/rte_mldev.c
index 327ed7144d..13b7e93943 100644
--- a/lib/mldev/rte_mldev.c
+++ b/lib/mldev/rte_mldev.c
@@ -462,3 +462,97 @@  rte_ml_model_params_update(int16_t dev_id, int16_t model_id, void *buffer)
 
 	return (*dev->dev_ops->model_params_update)(dev, model_id, buffer);
 }
+
+int
+rte_ml_io_input_size_get(int16_t dev_id, int16_t model_id, uint32_t nb_batches,
+			 uint64_t *input_qsize, uint64_t *input_dsize)
+{
+	struct rte_ml_dev *dev;
+
+	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+		ML_DEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = rte_ml_dev_pmd_get_dev(dev_id);
+	if (*dev->dev_ops->io_input_size_get == NULL)
+		return -ENOTSUP;
+
+	return (*dev->dev_ops->io_input_size_get)(dev, model_id, nb_batches, input_qsize,
+						  input_dsize);
+}
+
+int
+rte_ml_io_output_size_get(int16_t dev_id, int16_t model_id, uint32_t nb_batches,
+			  uint64_t *output_qsize, uint64_t *output_dsize)
+{
+	struct rte_ml_dev *dev;
+
+	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+		ML_DEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = rte_ml_dev_pmd_get_dev(dev_id);
+	if (*dev->dev_ops->io_output_size_get == NULL)
+		return -ENOTSUP;
+
+	return (*dev->dev_ops->io_output_size_get)(dev, model_id, nb_batches, output_qsize,
+						   output_dsize);
+}
+
+int
+rte_ml_io_quantize(int16_t dev_id, int16_t model_id, uint16_t nb_batches, void *dbuffer,
+		   void *qbuffer)
+{
+	struct rte_ml_dev *dev;
+
+	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+		ML_DEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = rte_ml_dev_pmd_get_dev(dev_id);
+	if (*dev->dev_ops->io_quantize == NULL)
+		return -ENOTSUP;
+
+	if (dbuffer == NULL) {
+		ML_DEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (qbuffer == NULL) {
+		ML_DEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL\n", dev_id);
+		return -EINVAL;
+	}
+
+	return (*dev->dev_ops->io_quantize)(dev, model_id, nb_batches, dbuffer, qbuffer);
+}
+
+int
+rte_ml_io_dequantize(int16_t dev_id, int16_t model_id, uint16_t nb_batches, void *qbuffer,
+		     void *dbuffer)
+{
+	struct rte_ml_dev *dev;
+
+	if (!rte_ml_dev_is_valid_dev(dev_id)) {
+		ML_DEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = rte_ml_dev_pmd_get_dev(dev_id);
+	if (*dev->dev_ops->io_dequantize == NULL)
+		return -ENOTSUP;
+
+	if (qbuffer == NULL) {
+		ML_DEV_LOG(ERR, "Dev %d, qbuffer cannot be NULL\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dbuffer == NULL) {
+		ML_DEV_LOG(ERR, "Dev %d, dbuffer cannot be NULL\n", dev_id);
+		return -EINVAL;
+	}
+
+	return (*dev->dev_ops->io_dequantize)(dev, model_id, nb_batches, qbuffer, dbuffer);
+}
diff --git a/lib/mldev/rte_mldev_core.h b/lib/mldev/rte_mldev_core.h
index 172454c2aa..b388553a96 100644
--- a/lib/mldev/rte_mldev_core.h
+++ b/lib/mldev/rte_mldev_core.h
@@ -259,6 +259,100 @@  typedef int (*mldev_model_info_get_t)(struct rte_ml_dev *dev, int16_t model_id,
  */
 typedef int (*mldev_model_params_update_t)(struct rte_ml_dev *dev, int16_t model_id, void *buffer);
 
+/**
+ * @internal
+ *
+ * Get size of input buffers.
+ *
+ * @param dev
+ *	ML device pointer.
+ * @param model_id
+ *	Model ID to use.
+ * @param nb_batches
+ *	Number of batches.
+ * @param input_qsize
+ *	Size of quantized input.
+ * @param input_dsize
+ *	Size of dequantized input.
+ *
+ * @return
+ *	- 0 on success.
+ *	- <0, error on failure.
+ */
+typedef int (*mldev_io_input_size_get_t)(struct rte_ml_dev *dev, int16_t model_id,
+					 uint32_t nb_batches, uint64_t *input_qsize,
+					 uint64_t *input_dsize);
+
+/**
+ * @internal
+ *
+ * Get size of output buffers.
+ *
+ * @param dev
+ *	ML device pointer.
+ * @param model_id
+ *	Model ID to use.
+ * @param nb_batches
+ *	Number of batches.
+ * @param output_qsize
+ *	Size of quantized output.
+ * @param output_dsize
+ *	Size of dequantized output.
+ *
+ * @return
+ *	- 0 on success.
+ *	- <0, error on failure.
+ */
+typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, int16_t model_id,
+					  uint32_t nb_batches, uint64_t *output_qsize,
+					  uint64_t *output_dsize);
+
+/**
+ * @internal
+ *
+ * Quantize model data.
+ *
+ * @param dev
+ *	ML device pointer.
+ * @param model_id
+ *	Model ID to use.
+ * @param nb_batches
+ *	Number of batches.
+ * @param dbuffer
+ *	Pointer t de-quantized data buffer.
+ * @param qbuffer
+ *	Pointer t de-quantized data buffer.
+ *
+ * @return
+ *	- 0 on success.
+ *	- <0, error on failure.
+ */
+typedef int (*mldev_io_quantize_t)(struct rte_ml_dev *dev, int16_t model_id, uint16_t nb_batches,
+				   void *dbuffer, void *qbuffer);
+
+/**
+ * @internal
+ *
+ * Quantize model data.
+ *
+ * @param dev
+ *	ML device pointer.
+ * @param model_id
+ *	Model ID to use.
+ * @param nb_batches
+ *	Number of batches.
+ * @param qbuffer
+ *	Pointer t de-quantized data buffer.
+ * @param dbuffer
+ *	Pointer t de-quantized data buffer.
+ *
+ * @return
+ *	- 0 on success.
+ *	- <0, error on failure.
+ */
+typedef int (*mldev_io_dequantize_t)(struct rte_ml_dev *dev, int16_t model_id, uint16_t nb_batches,
+				     void *qbuffer, void *dbuffer);
+
 /**
  * @internal
  *
@@ -303,6 +397,18 @@  struct rte_ml_dev_ops {
 
 	/** Update model params. */
 	mldev_model_params_update_t model_params_update;
+
+	/** Get input buffer size. */
+	mldev_io_input_size_get_t io_input_size_get;
+
+	/** Get output buffer size. */
+	mldev_io_output_size_get_t io_output_size_get;
+
+	/** Quantize data */
+	mldev_io_quantize_t io_quantize;
+
+	/** De-quantize data */
+	mldev_io_dequantize_t io_dequantize;
 };
 
 /**
diff --git a/lib/mldev/version.map b/lib/mldev/version.map
index 4459f02925..0b180020db 100644
--- a/lib/mldev/version.map
+++ b/lib/mldev/version.map
@@ -10,6 +10,10 @@  EXPERIMENTAL {
 	rte_ml_dev_socket_id;
 	rte_ml_dev_start;
 	rte_ml_dev_stop;
+	rte_ml_io_dequantize;
+	rte_ml_io_input_size_get;
+	rte_ml_io_output_size_get;
+	rte_ml_io_quantize;
 	rte_ml_model_info_get;
 	rte_ml_model_load;
 	rte_ml_model_params_update;