get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131707/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131707,
    "url": "http://patches.dpdk.org/api/patches/131707/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230920072528.14185-34-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230920072528.14185-34-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230920072528.14185-34-syalavarthi@marvell.com",
    "date": "2023-09-20T07:25:24",
    "name": "[v2,33/34] ml/cnxk: enable fast-path ops for TVM models",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "5ff0bf8b89644bfe973824b89d2695858b798007",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230920072528.14185-34-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 29567,
            "url": "http://patches.dpdk.org/api/series/29567/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29567",
            "date": "2023-09-20T07:24:51",
            "name": "Implemenation of revised ml/cnxk driver",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/29567/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/131707/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/131707/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6FF05425E4;\n\tWed, 20 Sep 2023 09:29:36 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C45A242E2F;\n\tWed, 20 Sep 2023 09:26:19 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 9022B40ED1\n for <dev@dpdk.org>; Wed, 20 Sep 2023 09:25:45 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 38K631id001603 for <dev@dpdk.org>; Wed, 20 Sep 2023 00:25:44 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3t7u4d89jw-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 20 Sep 2023 00:25:44 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Wed, 20 Sep 2023 00:25:42 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Wed, 20 Sep 2023 00:25:42 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id C05975B6922;\n Wed, 20 Sep 2023 00:25:42 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=Wd2QYxQ2phGB9JI/TnXnc1DJtQtC9F7OnXwZBKnctCE=;\n b=RmaY9/SbmLjNCIURXariJ6jrMXoTfmNtTE1/LQMLsT5f/hIcCBddKjV5RjTuRgpXjjfT\n rPrEJStKNOZD+C126hVXHchRVQiOBwsF3qNuJJJliCYjDaXTbVnpEbPCC6Ws8NVYTDg4\n wzewBc5JCxF/Y5h7BoPw5MxJ2plrHFOf+4aEmaVMaEBam+Biwz229b6Fi88e6ytLpfga\n m9LW5YZkADkFNqPphsqW06D8q2JAwrSH7rkNL8ZqFNNA5pX9q58j/3/XadwUgp/hXU2A\n S/eW9dU/hycTeXfX6ZojZody4QbKkJ5Mi7T8TvtoDZNsbPRNPOJ4zk4xl5Xr8A77Iiwh ZA==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v2 33/34] ml/cnxk: enable fast-path ops for TVM models",
        "Date": "Wed, 20 Sep 2023 00:25:24 -0700",
        "Message-ID": "<20230920072528.14185-34-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.41.0",
        "In-Reply-To": "<20230920072528.14185-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20230920072528.14185-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "4Vjb1xmdjcgkOJQwxhaP7TdW34Arvu01",
        "X-Proofpoint-GUID": "4Vjb1xmdjcgkOJQwxhaP7TdW34Arvu01",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.601,FMLib:17.11.176.26\n definitions=2023-09-20_02,2023-09-19_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Anup Prabhu <aprabhu@marvell.com>\n\nEnable fast-path ops support for TVM models. Models would\nuse TVMDP library function calls to execute inference\noperations for Hybrid and LLVM model sub-types.\n\nFor TVM MRVL model subtypes that have a single MRVL layer,\nthe inference requests are directly enqueued to hardware\nby the driver.\n\nSigned-off-by: Anup Prabhu <aprabhu@marvell.com>\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_ops.c   |   4 -\n drivers/ml/cnxk/cnxk_ml_io.h     |   6 ++\n drivers/ml/cnxk/cnxk_ml_ops.c    |   4 +\n drivers/ml/cnxk/cnxk_ml_ops.h    |   9 +++\n drivers/ml/cnxk/mvtvm_ml_model.c |  20 +++++\n drivers/ml/cnxk/mvtvm_ml_model.h |   6 ++\n drivers/ml/cnxk/mvtvm_ml_ops.c   | 124 +++++++++++++++++++++++++++++++\n drivers/ml/cnxk/mvtvm_ml_ops.h   |  43 +++++++++++\n 8 files changed, 212 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 140f7a343f..c1353fb0c8 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -287,10 +287,6 @@ cn10k_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_c\n \telse\n \t\tcn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;\n \n-\tcnxk_mldev->mldev->enqueue_burst = cnxk_ml_enqueue_burst;\n-\tcnxk_mldev->mldev->dequeue_burst = cnxk_ml_dequeue_burst;\n-\tcnxk_mldev->mldev->op_error_get = cn10k_ml_op_error_get;\n-\n \treturn 0;\n }\n \ndiff --git a/drivers/ml/cnxk/cnxk_ml_io.h b/drivers/ml/cnxk/cnxk_ml_io.h\nindex 5de166c252..6d5d25a7c9 100644\n--- a/drivers/ml/cnxk/cnxk_ml_io.h\n+++ b/drivers/ml/cnxk/cnxk_ml_io.h\n@@ -47,6 +47,12 @@ struct cnxk_ml_io {\n \n \t/* Scale */\n \tfloat scale;\n+\n+\t/* Dequantized offset */\n+\tuint32_t offset_d;\n+\n+\t/* Quantized offset */\n+\tuint32_t offset_q;\n };\n \n /* Model / Layer IO structure */\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nindex f281e6070f..274d152b81 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.c\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -770,6 +770,10 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co\n \tcnxk_mldev->max_nb_layers =\n \t\tcnxk_mldev->cn10k_mldev.fw.req->cn10k_req.jd.fw_load.cap.s.max_models;\n \n+\tcnxk_mldev->mldev->enqueue_burst = cnxk_ml_enqueue_burst;\n+\tcnxk_mldev->mldev->dequeue_burst = cnxk_ml_dequeue_burst;\n+\tcnxk_mldev->mldev->op_error_get = cn10k_ml_op_error_get;\n+\n \t/* Allocate and initialize index_map */\n \tif (cnxk_mldev->index_map == NULL) {\n \t\tcnxk_mldev->index_map =\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h\nindex 2575f4c6e1..62e2b17e35 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.h\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.h\n@@ -12,12 +12,21 @@\n \n #include \"cn10k_ml_ops.h\"\n \n+#ifdef RTE_MLDEV_CNXK_ENABLE_MVTVM\n+#include \"mvtvm_ml_ops.h\"\n+#endif\n+\n /* Request structure */\n struct cnxk_ml_req {\n \t/* Device specific request */\n \tunion {\n \t\t/* CN10K */\n \t\tstruct cn10k_ml_req cn10k_req;\n+\n+#ifdef RTE_MLDEV_CNXK_ENABLE_MVTVM\n+\t\t/* MVTVM */\n+\t\tstruct mvtvm_ml_req mvtvm_req;\n+#endif\n \t};\n \n \t/* Address of status field */\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_model.c b/drivers/ml/cnxk/mvtvm_ml_model.c\nindex 7086c7a407..8af84b6972 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_model.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_model.c\n@@ -136,6 +136,16 @@ mvtvm_ml_model_io_info_update(struct cnxk_ml_model *model)\n \t\tmodel->mvtvm.info.total_input_sz_d += model->mvtvm.info.input[i].sz_d;\n \t\tmodel->mvtvm.info.total_input_sz_q += model->mvtvm.info.input[i].sz_q;\n \n+\t\tmodel->mvtvm.info.input[i].offset_d = model->mvtvm.info.total_input_sz_d;\n+\t\tmodel->mvtvm.info.input[i].offset_q = model->mvtvm.info.total_input_sz_q;\n+\n+\t\tmodel->mvtvm.input_tensor[i].device = metadata->input[i].device;\n+\t\tmodel->mvtvm.input_tensor[i].ndim = metadata->input[i].ndim;\n+\t\tmodel->mvtvm.input_tensor[i].dtype = metadata->input[i].datatype;\n+\t\tmodel->mvtvm.input_tensor[i].shape = metadata->input[i].shape;\n+\t\tmodel->mvtvm.input_tensor[i].strides = NULL;\n+\t\tmodel->mvtvm.input_tensor[i].byte_offset = model->mvtvm.info.input[i].offset_q;\n+\n \t\tplt_ml_dbg(\"model_id = %u, input[%u] - sz_d = %u sz_q = %u\", model->model_id, i,\n \t\t\t   model->mvtvm.info.input[i].sz_d, model->mvtvm.info.input[i].sz_q);\n \t}\n@@ -169,6 +179,16 @@ mvtvm_ml_model_io_info_update(struct cnxk_ml_model *model)\n \t\tmodel->mvtvm.info.total_output_sz_d += model->mvtvm.info.output[i].sz_d;\n \t\tmodel->mvtvm.info.total_output_sz_q += model->mvtvm.info.output[i].sz_q;\n \n+\t\tmodel->mvtvm.info.output[i].offset_d = model->mvtvm.info.total_output_sz_d;\n+\t\tmodel->mvtvm.info.output[i].offset_q = model->mvtvm.info.total_output_sz_q;\n+\n+\t\tmodel->mvtvm.output_tensor[i].device = metadata->output[i].device;\n+\t\tmodel->mvtvm.output_tensor[i].ndim = metadata->output[i].ndim;\n+\t\tmodel->mvtvm.output_tensor[i].dtype = metadata->output[i].datatype;\n+\t\tmodel->mvtvm.output_tensor[i].shape = metadata->output[i].shape;\n+\t\tmodel->mvtvm.output_tensor[i].strides = NULL;\n+\t\tmodel->mvtvm.output_tensor[i].byte_offset = model->mvtvm.info.output[i].offset_q;\n+\n \t\tplt_ml_dbg(\"model_id = %u, output[%u] - sz_d = %u sz_q = %u\", model->model_id, i,\n \t\t\t   model->mvtvm.info.output[i].sz_d, model->mvtvm.info.output[i].sz_q);\n \t}\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_model.h b/drivers/ml/cnxk/mvtvm_ml_model.h\nindex 57a6ce0bb1..08e101bbe7 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_model.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_model.h\n@@ -71,6 +71,12 @@ struct mvtvm_ml_model_data {\n \n \t/* Stats for burst ops */\n \tstruct mvtvm_ml_model_xstats *burst_xstats;\n+\n+\t/* Input Tensor */\n+\tDLTensor input_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Output Tensor */\n+\tDLTensor output_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n };\n \n int mvtvm_ml_model_blob_parse(struct rte_ml_model_params *params,\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.c b/drivers/ml/cnxk/mvtvm_ml_ops.c\nindex 5292ac97fe..2baac8f72f 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.c\n@@ -21,6 +21,12 @@\n /* ML model macros */\n #define MVTVM_ML_MODEL_MEMZONE_NAME \"ml_mvtvm_model_mz\"\n \n+__rte_hot static void\n+mvtvm_ml_set_poll_addr(struct cnxk_ml_req *req)\n+{\n+\treq->status = &req->mvtvm_req.status;\n+}\n+\n int\n mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf)\n {\n@@ -172,6 +178,7 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \t\tcallback->tvmrt_free = cn10k_ml_free;\n \t\tcallback->tvmrt_quantize = mvtvm_ml_io_quantize;\n \t\tcallback->tvmrt_dequantize = mvtvm_ml_io_dequantize;\n+\t\tcallback->tvmrt_inference = cn10k_ml_inference_sync;\n \t} else {\n \t\tcallback = NULL;\n \t}\n@@ -215,6 +222,19 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \t\tmodel->mvtvm.burst_xstats[qp_id].dequeued_count = 0;\n \t}\n \n+\t/* Set model specific fast path functions */\n+\tif (model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) {\n+\t\tmodel->enqueue_single = cn10k_ml_enqueue_single;\n+\t\tmodel->result_update = cn10k_ml_result_update;\n+\t\tmodel->set_error_code = cn10k_ml_set_error_code;\n+\t\tmodel->set_poll_addr = cn10k_ml_set_poll_addr;\n+\t} else {\n+\t\tmodel->enqueue_single = mvtvm_ml_enqueue_single;\n+\t\tmodel->result_update = mvtvm_ml_result_update;\n+\t\tmodel->set_error_code = mvtvm_ml_set_error_code;\n+\t\tmodel->set_poll_addr = mvtvm_ml_set_poll_addr;\n+\t}\n+\n \treturn 0;\n \n error:\n@@ -425,3 +445,107 @@ mvtvm_ml_io_dequantize(void *device, uint16_t model_id, const char *layer_name,\n \n \treturn 0;\n }\n+\n+static int\n+mvtvm_ml_model_run(struct cnxk_ml_model *model, struct rte_ml_op *op, struct cnxk_ml_req *req)\n+{\n+\tuint8_t i;\n+\n+\trte_memcpy(req->mvtvm_req.input_tensor, model->mvtvm.input_tensor,\n+\t\t   model->mvtvm.metadata.model.num_input * sizeof(DLTensor));\n+\tfor (i = 0; i < model->mvtvm.metadata.model.num_input; i++) {\n+\t\treq->mvtvm_req.input_tensor[i].data = op->input[i]->addr;\n+\t\treq->mvtvm_req.input_tensor[i].byte_offset = 0;\n+\t}\n+\n+\trte_memcpy(req->mvtvm_req.output_tensor, model->mvtvm.output_tensor,\n+\t\t   model->mvtvm.metadata.model.num_output * sizeof(DLTensor));\n+\tfor (i = 0; i < model->mvtvm.metadata.model.num_output; i++) {\n+\t\treq->mvtvm_req.output_tensor[i].data = op->output[i]->addr;\n+\t\treq->mvtvm_req.output_tensor[i].byte_offset = 0;\n+\t}\n+\n+\ttvmdp_model_run(model->model_id, model->mvtvm.metadata.model.num_input,\n+\t\t\treq->mvtvm_req.input_tensor, model->mvtvm.metadata.model.num_output,\n+\t\t\treq->mvtvm_req.output_tensor, &req->mvtvm_req.result,\n+\t\t\t&req->mvtvm_req.status);\n+\n+\tplt_write64(ML_CNXK_POLL_JOB_FINISH, req->status);\n+\n+\treturn 0;\n+}\n+\n+__rte_hot void\n+mvtvm_ml_set_error_code(struct cnxk_ml_req *req, uint64_t etype, uint64_t stype)\n+{\n+\tRTE_SET_USED(stype);\n+\n+\treq->mvtvm_req.result.error_code = etype;\n+}\n+\n+__rte_hot bool\n+mvtvm_ml_enqueue_single(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op, uint16_t layer_id,\n+\t\t\tstruct cnxk_ml_qp *qp, uint64_t head)\n+{\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_queue *queue;\n+\tstruct cnxk_ml_req *req;\n+\n+\tRTE_SET_USED(layer_id);\n+\n+\tqueue = &qp->queue;\n+\treq = &queue->reqs[head];\n+\tmodel = cnxk_mldev->mldev->data->models[op->model_id];\n+\n+\tmodel->set_poll_addr(req);\n+\tmemset(&req->mvtvm_req.result, 0, sizeof(struct mvtvm_ml_result));\n+\treq->mvtvm_req.result.error_code = 0x0;\n+\treq->mvtvm_req.result.user_ptr = op->user_ptr;\n+\n+\tcnxk_ml_set_poll_ptr(req);\n+\tmvtvm_ml_model_run(model, op, req);\n+\treq->timeout = plt_tsc_cycles() + queue->wait_cycles;\n+\treq->op = op;\n+\n+\treturn true;\n+}\n+\n+__rte_hot void\n+mvtvm_ml_result_update(struct cnxk_ml_dev *cnxk_mldev, int qp_id, void *request)\n+{\n+\tstruct mvtvm_ml_model_xstats *xstats;\n+\tstruct mvtvm_ml_result *result;\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_req *req;\n+\tuint64_t tvm_rt_latency;\n+\tstruct cnxk_ml_qp *qp;\n+\tstruct rte_ml_op *op;\n+\n+\treq = (struct cnxk_ml_req *)request;\n+\tresult = &req->mvtvm_req.result;\n+\top = req->op;\n+\tqp = cnxk_mldev->mldev->data->queue_pairs[qp_id];\n+\top->impl_opaque = result->error_code;\n+\n+\tif (likely(result->error_code == 0)) {\n+\t\tqp->stats.dequeued_count++;\n+\t\top->status = RTE_ML_OP_STATUS_SUCCESS;\n+\n+\t\tmodel = cnxk_mldev->mldev->data->models[op->model_id];\n+\t\txstats = &model->mvtvm.burst_xstats[qp_id];\n+\n+\t\tif (unlikely(xstats->dequeued_count == xstats->tvm_rt_reset_count)) {\n+\t\t\txstats->tvm_rt_latency_min = UINT64_MAX;\n+\t\t\txstats->tvm_rt_latency_max = 0;\n+\t\t}\n+\t\ttvm_rt_latency = result->stats.end_ns - result->stats.start_ns;\n+\t\txstats->tvm_rt_latency = tvm_rt_latency;\n+\t\txstats->tvm_rt_latency_tot += tvm_rt_latency;\n+\t\txstats->tvm_rt_latency_min = RTE_MIN(xstats->tvm_rt_latency_min, tvm_rt_latency);\n+\t\txstats->tvm_rt_latency_max = RTE_MAX(xstats->tvm_rt_latency_max, tvm_rt_latency);\n+\t\txstats->dequeued_count++;\n+\t} else {\n+\t\tqp->stats.dequeue_err_count++;\n+\t\top->status = RTE_ML_OP_STATUS_ERROR;\n+\t}\n+}\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.h b/drivers/ml/cnxk/mvtvm_ml_ops.h\nindex a1a868ef4b..82292ceadd 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.h\n@@ -13,6 +13,44 @@\n \n struct cnxk_ml_dev;\n struct cnxk_ml_model;\n+struct cnxk_ml_qp;\n+struct cnxk_ml_req;\n+\n+/* Inference stats */\n+struct mvtvm_ml_stats {\n+\t/* Start ns */\n+\tuint64_t start_ns;\n+\n+\t/* Start ns */\n+\tuint64_t end_ns;\n+};\n+\n+/* Result structure */\n+struct mvtvm_ml_result {\n+\t/* Job error code */\n+\tuint64_t error_code;\n+\n+\t/* Inference stats */\n+\tstruct mvtvm_ml_stats stats;\n+\n+\t/* User context pointer */\n+\tvoid *user_ptr;\n+};\n+\n+/* MVTVM specific request */\n+struct mvtvm_ml_req {\n+\t/* Input tensors */\n+\tDLTensor input_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Output tensors */\n+\tDLTensor output_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Status field for poll mode requests */\n+\tvolatile uint64_t status;\n+\n+\t/* Result */\n+\tstruct mvtvm_ml_result result;\n+};\n \n int mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf);\n int mvtvm_ml_dev_close(struct cnxk_ml_dev *cnxk_mldev);\n@@ -26,4 +64,9 @@ int mvtvm_ml_io_quantize(void *device, uint16_t model_id, const char *layer_name\n int mvtvm_ml_io_dequantize(void *device, uint16_t model_id, const char *layer_name, void *qbuffer,\n \t\t\t   const DLTensor **deq_tensor);\n \n+__rte_hot bool mvtvm_ml_enqueue_single(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op,\n+\t\t\t\t       uint16_t layer_id, struct cnxk_ml_qp *qp, uint64_t head);\n+__rte_hot void mvtvm_ml_result_update(struct cnxk_ml_dev *cnxk_mldev, int qp_id, void *request);\n+__rte_hot void mvtvm_ml_set_error_code(struct cnxk_ml_req *req, uint64_t etype, uint64_t stype);\n+\n #endif /* _MVTVM_ML_OPS_H_ */\n",
    "prefixes": [
        "v2",
        "33/34"
    ]
}