[v6,09/12] app/mldev: enable support for inference batches

Message ID 20230311150905.26824-10-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Implementation of mldev test application |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Srikanth Yalavarthi March 11, 2023, 3:09 p.m. UTC
  Enabled support to execute multiple batches of inferences
per each enqueue request. Input and reference for the test
should be appropriately provided for multi-batch run. Number
of batches can be specified through "--batches" option.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
Acked-by: Anup Prabhu <aprabhu@marvell.com>
---
 app/test-mldev/ml_options.c            | 15 ++++++++++++---
 app/test-mldev/ml_options.h            |  2 ++
 app/test-mldev/test_inference_common.c | 22 +++++++++++++---------
 app/test-mldev/test_model_common.c     |  6 ++++++
 app/test-mldev/test_model_common.h     |  1 +
 5 files changed, 34 insertions(+), 12 deletions(-)
  

Comments

Thomas Monjalon March 16, 2023, 5:47 p.m. UTC | #1
11/03/2023 16:09, Srikanth Yalavarthi:
> @@ -528,8 +533,8 @@ ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned
>  	req->niters = 0;
>  
>  	/* quantize data */
> -	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
> -			   t->model[t->fid].info.batch_size, t->model[t->fid].input, req->input);
> +	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t->model[t->fid].nb_batches,
> +			   t->model[t->fid].input, req->input);
>  }
>  
>  int
> @@ -547,7 +552,7 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
>  	int ret;
>  
>  	/* get input buffer size */
> -	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].info.batch_size,
> +	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
>  				       &t->model[fid].inp_qsize, &t->model[fid].inp_dsize);
>  	if (ret != 0) {
>  		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
> @@ -555,9 +560,8 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
>  	}
>  
>  	/* get output buffer size */
> -	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id,
> -					t->model[fid].info.batch_size, &t->model[fid].out_qsize,
> -					&t->model[fid].out_dsize);
> +	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
> +					&t->model[fid].out_qsize, &t->model[fid].out_dsize);
>  	if (ret != 0) {
>  		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
>  		return ret;
> @@ -702,7 +706,7 @@ ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int
>  		return;
>  
>  	t->nb_used++;
> -	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].info.batch_size,
> +	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].nb_batches,
>  			     req->output, model->output);

These changes look unrelated with the topic of the patch.
You should probably fix it when adding those lines at first.
  
Srikanth Yalavarthi March 16, 2023, 5:52 p.m. UTC | #2
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: 16 March 2023 23:18
> To: Srikanth Yalavarthi <syalavarthi@marvell.com>
> Cc: dev@dpdk.org; Shivah Shankar Shankar Narayan Rao
> <sshankarnara@marvell.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Anup Prabhu <aprabhu@marvell.com>; Prince Takkar
> <ptakkar@marvell.com>; Parijat Shukla <pshukla@marvell.com>
> Subject: [EXT] Re: [PATCH v6 09/12] app/mldev: enable support for inference
> batches
> 
> External Email
> 
> ----------------------------------------------------------------------
> 11/03/2023 16:09, Srikanth Yalavarthi:
> > @@ -528,8 +533,8 @@ ml_request_initialize(struct rte_mempool *mp,
> void *opaque, void *obj, unsigned
> >  	req->niters = 0;
> >
> >  	/* quantize data */
> > -	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
> > -			   t->model[t->fid].info.batch_size, t->model[t-
> >fid].input, req->input);
> > +	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t-
> >model[t->fid].nb_batches,
> > +			   t->model[t->fid].input, req->input);
> >  }
> >
> >  int
> > @@ -547,7 +552,7 @@ ml_inference_iomem_setup(struct ml_test *test,
> struct ml_options *opt, uint16_t
> >  	int ret;
> >
> >  	/* get input buffer size */
> > -	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t-
> >model[fid].info.batch_size,
> > +	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id,
> > +t->model[fid].nb_batches,
> >  				       &t->model[fid].inp_qsize, &t-
> >model[fid].inp_dsize);
> >  	if (ret != 0) {
> >  		ml_err("Failed to get input size, model : %s\n",
> > opt->filelist[fid].model); @@ -555,9 +560,8 @@
> ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt,
> uint16_t
> >  	}
> >
> >  	/* get output buffer size */
> > -	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id,
> > -					t->model[fid].info.batch_size, &t-
> >model[fid].out_qsize,
> > -					&t->model[fid].out_dsize);
> > +	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t-
> >model[fid].nb_batches,
> > +					&t->model[fid].out_qsize, &t-
> >model[fid].out_dsize);
> >  	if (ret != 0) {
> >  		ml_err("Failed to get input size, model : %s\n", opt-
> >filelist[fid].model);
> >  		return ret;
> > @@ -702,7 +706,7 @@ ml_request_finish(struct rte_mempool *mp, void
> *opaque, void *obj, unsigned int
> >  		return;
> >
> >  	t->nb_used++;
> > -	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req-
> >fid].info.batch_size,
> > +	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id,
> > +t->model[req->fid].nb_batches,
> >  			     req->output, model->output);
> 
> These changes look unrelated with the topic of the patch.
> You should probably fix it when adding those lines at first.

The changes are related to the patch. Initially the number of batches run per inference is set to the default batch_size value of the mode, which is reported to the user through rte_ml_model_info_get.

This patch adds support to specify the number of batches to be run per inference. Hence, the default batch_size is replace with nb_batches values specified by the user.
>
  

Patch

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index c81dec6e30..499bfde899 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -27,6 +27,7 @@  ml_options_default(struct ml_options *opt)
 	opt->burst_size = 1;
 	opt->queue_pairs = 1;
 	opt->queue_size = 1;
+	opt->batches = 0;
 	opt->debug = false;
 }
 
@@ -170,6 +171,12 @@  ml_parse_queue_size(struct ml_options *opt, const char *arg)
 	return parser_read_uint16(&opt->queue_size, arg);
 }
 
+static int
+ml_parse_batches(struct ml_options *opt, const char *arg)
+{
+	return parser_read_uint16(&opt->batches, arg);
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
@@ -190,7 +197,8 @@  ml_dump_test_options(const char *testname)
 		       "\t\t--repetitions      : number of inference repetitions\n"
 		       "\t\t--burst_size       : inference burst size\n"
 		       "\t\t--queue_pairs      : number of queue pairs to create\n"
-		       "\t\t--queue_size       : size fo queue-pair\n");
+		       "\t\t--queue_size       : size fo queue-pair\n"
+		       "\t\t--batches          : number of batches of input\n");
 		printf("\n");
 	}
 }
@@ -214,7 +222,8 @@  static struct option lgopts[] = {
 	{ML_TEST, 1, 0, 0},	  {ML_DEVICE_ID, 1, 0, 0},   {ML_SOCKET_ID, 1, 0, 0},
 	{ML_MODELS, 1, 0, 0},	  {ML_FILELIST, 1, 0, 0},    {ML_REPETITIONS, 1, 0, 0},
 	{ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0}, {ML_QUEUE_SIZE, 1, 0, 0},
-	{ML_DEBUG, 0, 0, 0},	  {ML_HELP, 0, 0, 0},	     {NULL, 0, 0, 0}};
+	{ML_BATCHES, 1, 0, 0},	  {ML_DEBUG, 0, 0, 0},	     {ML_HELP, 0, 0, 0},
+	{NULL, 0, 0, 0}};
 
 static int
 ml_opts_parse_long(int opt_idx, struct ml_options *opt)
@@ -226,7 +235,7 @@  ml_opts_parse_long(int opt_idx, struct ml_options *opt)
 		{ML_SOCKET_ID, ml_parse_socket_id},   {ML_MODELS, ml_parse_models},
 		{ML_FILELIST, ml_parse_filelist},     {ML_REPETITIONS, ml_parse_repetitions},
 		{ML_BURST_SIZE, ml_parse_burst_size}, {ML_QUEUE_PAIRS, ml_parse_queue_pairs},
-		{ML_QUEUE_SIZE, ml_parse_queue_size},
+		{ML_QUEUE_SIZE, ml_parse_queue_size}, {ML_BATCHES, ml_parse_batches},
 	};
 
 	for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index c4018ee9d1..48fe064150 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -21,6 +21,7 @@ 
 #define ML_BURST_SIZE  ("burst_size")
 #define ML_QUEUE_PAIRS ("queue_pairs")
 #define ML_QUEUE_SIZE  ("queue_size")
+#define ML_BATCHES     ("batches")
 #define ML_DEBUG       ("debug")
 #define ML_HELP	       ("help")
 
@@ -40,6 +41,7 @@  struct ml_options {
 	uint16_t burst_size;
 	uint16_t queue_pairs;
 	uint16_t queue_size;
+	uint16_t batches;
 	bool debug;
 };
 
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index b4ad3c4b72..0f281aed6c 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -50,7 +50,7 @@  ml_enqueue_single(void *arg)
 		goto retry;
 
 	op->model_id = t->model[fid].id;
-	op->nb_batches = t->model[fid].info.batch_size;
+	op->nb_batches = t->model[fid].nb_batches;
 	op->mempool = t->op_pool;
 
 	op->input.addr = req->input;
@@ -163,7 +163,7 @@  ml_enqueue_burst(void *arg)
 
 	for (i = 0; i < ops_count; i++) {
 		args->enq_ops[i]->model_id = t->model[fid].id;
-		args->enq_ops[i]->nb_batches = t->model[fid].info.batch_size;
+		args->enq_ops[i]->nb_batches = t->model[fid].nb_batches;
 		args->enq_ops[i]->mempool = t->op_pool;
 
 		args->enq_ops[i]->input.addr = args->reqs[i]->input;
@@ -359,6 +359,11 @@  test_inference_opt_dump(struct ml_options *opt)
 	ml_dump("queue_pairs", "%u", opt->queue_pairs);
 	ml_dump("queue_size", "%u", opt->queue_size);
 
+	if (opt->batches == 0)
+		ml_dump("batches", "%u (default)", opt->batches);
+	else
+		ml_dump("batches", "%u", opt->batches);
+
 	ml_dump_begin("filelist");
 	for (i = 0; i < opt->nb_filelist; i++) {
 		ml_dump_list("model", i, opt->filelist[i].model);
@@ -528,8 +533,8 @@  ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned
 	req->niters = 0;
 
 	/* quantize data */
-	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
-			   t->model[t->fid].info.batch_size, t->model[t->fid].input, req->input);
+	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t->model[t->fid].nb_batches,
+			   t->model[t->fid].input, req->input);
 }
 
 int
@@ -547,7 +552,7 @@  ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	int ret;
 
 	/* get input buffer size */
-	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].info.batch_size,
+	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
 				       &t->model[fid].inp_qsize, &t->model[fid].inp_dsize);
 	if (ret != 0) {
 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
@@ -555,9 +560,8 @@  ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	}
 
 	/* get output buffer size */
-	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id,
-					t->model[fid].info.batch_size, &t->model[fid].out_qsize,
-					&t->model[fid].out_dsize);
+	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
+					&t->model[fid].out_qsize, &t->model[fid].out_dsize);
 	if (ret != 0) {
 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
 		return ret;
@@ -702,7 +706,7 @@  ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int
 		return;
 
 	t->nb_used++;
-	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].info.batch_size,
+	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].nb_batches,
 			     req->output, model->output);
 }
 
diff --git a/app/test-mldev/test_model_common.c b/app/test-mldev/test_model_common.c
index b94d46154d..c28e452f29 100644
--- a/app/test-mldev/test_model_common.c
+++ b/app/test-mldev/test_model_common.c
@@ -71,6 +71,12 @@  ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *mod
 		return ret;
 	}
 
+	/* Update number of batches */
+	if (opt->batches == 0)
+		model->nb_batches = model->info.batch_size;
+	else
+		model->nb_batches = opt->batches;
+
 	model->state = MODEL_LOADED;
 
 	return 0;
diff --git a/app/test-mldev/test_model_common.h b/app/test-mldev/test_model_common.h
index 5ee975109d..19429ce142 100644
--- a/app/test-mldev/test_model_common.h
+++ b/app/test-mldev/test_model_common.h
@@ -30,6 +30,7 @@  struct ml_model {
 	uint8_t *output;
 
 	struct rte_mempool *io_pool;
+	uint32_t nb_batches;
 };
 
 int ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *model,