[v1,3/3] mldev: drop input and output size get APIs

Message ID 20230830155303.30380-4-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Spec changes to support multi I/O models |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS

Commit Message

Srikanth Yalavarthi Aug. 30, 2023, 3:53 p.m. UTC
  Drop support and use of ML input and output size get functions,
rte_ml_io_input_size_get and rte_ml_io_output_size_get.

These functions are not required, as the model buffer size can
be computed from the fields of updated rte_ml_io_info structure.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_ops.c | 50 ----------------------------
 lib/mldev/rte_mldev.c          | 38 ---------------------
 lib/mldev/rte_mldev.h          | 60 ----------------------------------
 lib/mldev/rte_mldev_core.h     | 54 ------------------------------
 lib/mldev/version.map          |  2 --
 5 files changed, 204 deletions(-)
  

Patch

diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index 1d72fb52a6a..4abf4ae0d39 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -2110,54 +2110,6 @@  cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *bu
 	return 0;
 }
 
-static int
-cn10k_ml_io_input_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches,
-			   uint64_t *input_qsize, uint64_t *input_dsize)
-{
-	struct cn10k_ml_model *model;
-
-	model = dev->data->models[model_id];
-
-	if (model == NULL) {
-		plt_err("Invalid model_id = %u", model_id);
-		return -EINVAL;
-	}
-
-	if (input_qsize != NULL)
-		*input_qsize = PLT_U64_CAST(model->addr.total_input_sz_q *
-					    PLT_DIV_CEIL(nb_batches, model->batch_size));
-
-	if (input_dsize != NULL)
-		*input_dsize = PLT_U64_CAST(model->addr.total_input_sz_d *
-					    PLT_DIV_CEIL(nb_batches, model->batch_size));
-
-	return 0;
-}
-
-static int
-cn10k_ml_io_output_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches,
-			    uint64_t *output_qsize, uint64_t *output_dsize)
-{
-	struct cn10k_ml_model *model;
-
-	model = dev->data->models[model_id];
-
-	if (model == NULL) {
-		plt_err("Invalid model_id = %u", model_id);
-		return -EINVAL;
-	}
-
-	if (output_qsize != NULL)
-		*output_qsize = PLT_U64_CAST(model->addr.total_output_sz_q *
-					     PLT_DIV_CEIL(nb_batches, model->batch_size));
-
-	if (output_dsize != NULL)
-		*output_dsize = PLT_U64_CAST(model->addr.total_output_sz_d *
-					     PLT_DIV_CEIL(nb_batches, model->batch_size));
-
-	return 0;
-}
-
 static int
 cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **dbuffer,
 		     struct rte_ml_buff_seg **qbuffer)
@@ -2636,8 +2588,6 @@  struct rte_ml_dev_ops cn10k_ml_ops = {
 	.model_params_update = cn10k_ml_model_params_update,
 
 	/* I/O ops */
-	.io_input_size_get = cn10k_ml_io_input_size_get,
-	.io_output_size_get = cn10k_ml_io_output_size_get,
 	.io_quantize = cn10k_ml_io_quantize,
 	.io_dequantize = cn10k_ml_io_dequantize,
 };
diff --git a/lib/mldev/rte_mldev.c b/lib/mldev/rte_mldev.c
index 9a48ed3e944..cc5f2e0cc63 100644
--- a/lib/mldev/rte_mldev.c
+++ b/lib/mldev/rte_mldev.c
@@ -691,44 +691,6 @@  rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer)
 	return (*dev->dev_ops->model_params_update)(dev, model_id, buffer);
 }
 
-int
-rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
-			 uint64_t *input_qsize, uint64_t *input_dsize)
-{
-	struct rte_ml_dev *dev;
-
-	if (!rte_ml_dev_is_valid_dev(dev_id)) {
-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
-		return -EINVAL;
-	}
-
-	dev = rte_ml_dev_pmd_get_dev(dev_id);
-	if (*dev->dev_ops->io_input_size_get == NULL)
-		return -ENOTSUP;
-
-	return (*dev->dev_ops->io_input_size_get)(dev, model_id, nb_batches, input_qsize,
-						  input_dsize);
-}
-
-int
-rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
-			  uint64_t *output_qsize, uint64_t *output_dsize)
-{
-	struct rte_ml_dev *dev;
-
-	if (!rte_ml_dev_is_valid_dev(dev_id)) {
-		RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
-		return -EINVAL;
-	}
-
-	dev = rte_ml_dev_pmd_get_dev(dev_id);
-	if (*dev->dev_ops->io_output_size_get == NULL)
-		return -ENOTSUP;
-
-	return (*dev->dev_ops->io_output_size_get)(dev, model_id, nb_batches, output_qsize,
-						   output_dsize);
-}
-
 int
 rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **dbuffer,
 		   struct rte_ml_buff_seg **qbuffer)
diff --git a/lib/mldev/rte_mldev.h b/lib/mldev/rte_mldev.h
index 316c6fd0188..63b2670bb04 100644
--- a/lib/mldev/rte_mldev.h
+++ b/lib/mldev/rte_mldev.h
@@ -1008,66 +1008,6 @@  rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer);
 
 /* IO operations */
 
-/**
- * Get size of quantized and dequantized input buffers.
- *
- * Calculate the size of buffers required for quantized and dequantized input data.
- * This API would return the buffer sizes for the number of batches provided and would
- * consider the alignment requirements as per the PMD. Input sizes computed by this API can
- * be used by the application to allocate buffers.
- *
- * @param[in] dev_id
- *   The identifier of the device.
- * @param[in] model_id
- *   Identifier for the model created
- * @param[in] nb_batches
- *   Number of batches of input to be processed in a single inference job
- * @param[out] input_qsize
- *   Quantized input size pointer.
- * NULL value is allowed, in which case input_qsize is not calculated by the driver.
- * @param[out] input_dsize
- *   Dequantized input size pointer.
- * NULL value is allowed, in which case input_dsize is not calculated by the driver.
- *
- * @return
- *   - Returns 0 on success
- *   - Returns negative value on failure
- */
-__rte_experimental
-int
-rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
-			 uint64_t *input_qsize, uint64_t *input_dsize);
-
-/**
- * Get size of quantized and dequantized output buffers.
- *
- * Calculate the size of buffers required for quantized and dequantized output data.
- * This API would return the buffer sizes for the number of batches provided and would consider
- * the alignment requirements as per the PMD. Output sizes computed by this API can be used by the
- * application to allocate buffers.
- *
- * @param[in] dev_id
- *   The identifier of the device.
- * @param[in] model_id
- *   Identifier for the model created
- * @param[in] nb_batches
- *   Number of batches of input to be processed in a single inference job
- * @param[out] output_qsize
- *   Quantized output size pointer.
- * NULL value is allowed, in which case output_qsize is not calculated by the driver.
- * @param[out] output_dsize
- *   Dequantized output size pointer.
- * NULL value is allowed, in which case output_dsize is not calculated by the driver.
- *
- * @return
- *   - Returns 0 on success
- *   - Returns negative value on failure
- */
-__rte_experimental
-int
-rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
-			  uint64_t *output_qsize, uint64_t *output_dsize);
-
 /**
  * Quantize input data.
  *
diff --git a/lib/mldev/rte_mldev_core.h b/lib/mldev/rte_mldev_core.h
index 8530b073162..2279b1dcecb 100644
--- a/lib/mldev/rte_mldev_core.h
+++ b/lib/mldev/rte_mldev_core.h
@@ -466,54 +466,6 @@  typedef int (*mldev_model_info_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
  */
 typedef int (*mldev_model_params_update_t)(struct rte_ml_dev *dev, uint16_t model_id, void *buffer);
 
-/**
- * @internal
- *
- * Get size of input buffers.
- *
- * @param dev
- *	ML device pointer.
- * @param model_id
- *	Model ID to use.
- * @param nb_batches
- *	Number of batches.
- * @param input_qsize
- *	Size of quantized input.
- * @param input_dsize
- *	Size of dequantized input.
- *
- * @return
- *	- 0 on success.
- *	- <0, error on failure.
- */
-typedef int (*mldev_io_input_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
-					 uint32_t nb_batches, uint64_t *input_qsize,
-					 uint64_t *input_dsize);
-
-/**
- * @internal
- *
- * Get size of output buffers.
- *
- * @param dev
- *	ML device pointer.
- * @param model_id
- *	Model ID to use.
- * @param nb_batches
- *	Number of batches.
- * @param output_qsize
- *	Size of quantized output.
- * @param output_dsize
- *	Size of dequantized output.
- *
- * @return
- *	- 0 on success.
- *	- <0, error on failure.
- */
-typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
-					  uint32_t nb_batches, uint64_t *output_qsize,
-					  uint64_t *output_dsize);
-
 /**
  * @internal
  *
@@ -627,12 +579,6 @@  struct rte_ml_dev_ops {
 	/** Update model params. */
 	mldev_model_params_update_t model_params_update;
 
-	/** Get input buffer size. */
-	mldev_io_input_size_get_t io_input_size_get;
-
-	/** Get output buffer size. */
-	mldev_io_output_size_get_t io_output_size_get;
-
 	/** Quantize data */
 	mldev_io_quantize_t io_quantize;
 
diff --git a/lib/mldev/version.map b/lib/mldev/version.map
index 40ff27f4b95..99841db6aa9 100644
--- a/lib/mldev/version.map
+++ b/lib/mldev/version.map
@@ -23,8 +23,6 @@  EXPERIMENTAL {
 	rte_ml_dev_xstats_reset;
 	rte_ml_enqueue_burst;
 	rte_ml_io_dequantize;
-	rte_ml_io_input_size_get;
-	rte_ml_io_output_size_get;
 	rte_ml_io_quantize;
 	rte_ml_model_info_get;
 	rte_ml_model_load;