get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133416/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133416,
    "url": "http://patches.dpdk.org/api/patches/133416/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-34-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231026124347.22477-34-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231026124347.22477-34-syalavarthi@marvell.com",
    "date": "2023-10-26T12:43:42",
    "name": "[v9,33/34] ml/cnxk: enable fast-path ops for TVM models",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1be7b7db487b844dc95695ecb65b5489245fb0a2",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-34-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 30002,
            "url": "http://patches.dpdk.org/api/series/30002/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=30002",
            "date": "2023-10-26T12:43:09",
            "name": "Implementation of revised ml/cnxk driver",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/30002/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133416/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/133416/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DB60C43208;\n\tThu, 26 Oct 2023 14:49:39 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1317442EBE;\n\tThu, 26 Oct 2023 14:44:33 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id C92AB42E2B\n for <dev@dpdk.org>; Thu, 26 Oct 2023 14:44:05 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 39QAKqcf006841 for <dev@dpdk.org>; Thu, 26 Oct 2023 05:44:05 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3txcsr25pj-17\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 26 Oct 2023 05:44:05 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Thu, 26 Oct 2023 05:44:02 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Thu, 26 Oct 2023 05:44:02 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 718A13F70D5;\n Thu, 26 Oct 2023 05:44:02 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=QlHjXFjFlsbKsWlIptbQkw6s970lInWfk47Bn884Yy0=;\n b=POlEokzQKK73mp8SUecE0Vi/E53OH9T+t3htoyMU0PfaEfXOYKnLF4lgqFkYl5oG448v\n VvglNJQ6D3mbb6L5/r8V5cz0hcyPRslXnD+YCINCKDu/1NMfycTY/MuLJvYDwlYLKOop\n QXP5VU8X4dukKWOGu6O9YNUWKBJBvOuywoSR1sOl7PX5Jqp4IUwFmId/VoSqlpBZsydG\n bCLYqo+/d8Xuj/Ad1Y+z7ZuM/LtjYbk0HnazASGSvrjq50v8weAw1cPgSBcZYN1gMAOW\n /CRqOYu/hm15pmZj8trLsHI8RzD1f/mS7qjBvl+INQEfQ4c7jIqYZe3M7dqNNPJDsA3O lA==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v9 33/34] ml/cnxk: enable fast-path ops for TVM models",
        "Date": "Thu, 26 Oct 2023 05:43:42 -0700",
        "Message-ID": "<20231026124347.22477-34-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20231026124347.22477-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20231026124347.22477-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "Omdbyolg-u_VJder6Eyu00QxAz09lXsq",
        "X-Proofpoint-GUID": "Omdbyolg-u_VJder6Eyu00QxAz09lXsq",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.987,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-10-26_10,2023-10-26_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Anup Prabhu <aprabhu@marvell.com>\n\nEnable fast-path ops support for TVM models. Models would\nuse TVMDP library function calls to execute inference\noperations for Hybrid and LLVM model sub-types.\n\nFor TVM MRVL model subtypes that have a single MRVL layer,\nthe inference requests are directly enqueued to hardware\nby the driver.\n\nSigned-off-by: Anup Prabhu <aprabhu@marvell.com>\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n doc/guides/rel_notes/release_23_11.rst |   3 +\n drivers/ml/cnxk/cn10k_ml_ops.c         |   4 -\n drivers/ml/cnxk/cnxk_ml_ops.c          |   4 +\n drivers/ml/cnxk/cnxk_ml_ops.h          |   5 +\n drivers/ml/cnxk/mvtvm_ml_model.c       |  14 +++\n drivers/ml/cnxk/mvtvm_ml_model.h       |   6 ++\n drivers/ml/cnxk/mvtvm_ml_ops.c         | 124 +++++++++++++++++++++++++\n drivers/ml/cnxk/mvtvm_ml_ops.h         |  43 +++++++++\n 8 files changed, 199 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst\nindex 0a6fc76a9d..5fcf2a1897 100644\n--- a/doc/guides/rel_notes/release_23_11.rst\n+++ b/doc/guides/rel_notes/release_23_11.rst\n@@ -243,6 +243,9 @@ New Features\n   Added dispatcher library which purpose is to help decouple different\n   parts (modules) of an eventdev-based application.\n \n+* **Updated Marvell cnxk mldev driver.**\n+\n+  * Added support for models compiled using TVM framework.\n \n Removed Items\n -------------\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 01b0a44caa..b9d30278c6 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -371,10 +371,6 @@ cn10k_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_c\n \telse\n \t\tcn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;\n \n-\tcnxk_mldev->mldev->enqueue_burst = cnxk_ml_enqueue_burst;\n-\tcnxk_mldev->mldev->dequeue_burst = cnxk_ml_dequeue_burst;\n-\tcnxk_mldev->mldev->op_error_get = cn10k_ml_op_error_get;\n-\n \treturn 0;\n }\n \ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nindex 2632d70d8c..bf266d4d6e 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.c\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -632,6 +632,10 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co\n \tcnxk_mldev->max_nb_layers =\n \t\tcnxk_mldev->cn10k_mldev.fw.req->cn10k_req.jd.fw_load.cap.s.max_models;\n \n+\tcnxk_mldev->mldev->enqueue_burst = cnxk_ml_enqueue_burst;\n+\tcnxk_mldev->mldev->dequeue_burst = cnxk_ml_dequeue_burst;\n+\tcnxk_mldev->mldev->op_error_get = cn10k_ml_op_error_get;\n+\n \t/* Allocate and initialize index_map */\n \tif (cnxk_mldev->index_map == NULL) {\n \t\tcnxk_mldev->index_map =\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h\nindex ab32676b3e..7b49793a57 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.h\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.h\n@@ -24,6 +24,11 @@ struct cnxk_ml_req {\n \tunion {\n \t\t/* CN10K */\n \t\tstruct cn10k_ml_req cn10k_req;\n+\n+#ifdef RTE_MLDEV_CNXK_ENABLE_MVTVM\n+\t\t/* MVTVM */\n+\t\tstruct mvtvm_ml_req mvtvm_req;\n+#endif\n \t};\n \n \t/* Address of status field */\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_model.c b/drivers/ml/cnxk/mvtvm_ml_model.c\nindex e5ba672788..d28bd88a08 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_model.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_model.c\n@@ -220,6 +220,13 @@ mvtvm_ml_model_io_info_set(struct cnxk_ml_model *model)\n \t\tmodel->mvtvm.info.total_input_sz_d += model->mvtvm.info.input[i].sz_d;\n \t\tmodel->mvtvm.info.total_input_sz_q += model->mvtvm.info.input[i].sz_q;\n \n+\t\tmodel->mvtvm.input_tensor[i].device = metadata->input[i].device;\n+\t\tmodel->mvtvm.input_tensor[i].ndim = metadata->input[i].ndim;\n+\t\tmodel->mvtvm.input_tensor[i].dtype = metadata->input[i].datatype;\n+\t\tmodel->mvtvm.input_tensor[i].shape = metadata->input[i].shape;\n+\t\tmodel->mvtvm.input_tensor[i].strides = NULL;\n+\t\tmodel->mvtvm.input_tensor[i].byte_offset = 0;\n+\n \t\tplt_ml_dbg(\"model_id = %u, input[%u] - sz_d = %u sz_q = %u\", model->model_id, i,\n \t\t\t   model->mvtvm.info.input[i].sz_d, model->mvtvm.info.input[i].sz_q);\n \t}\n@@ -253,6 +260,13 @@ mvtvm_ml_model_io_info_set(struct cnxk_ml_model *model)\n \t\tmodel->mvtvm.info.total_output_sz_d += model->mvtvm.info.output[i].sz_d;\n \t\tmodel->mvtvm.info.total_output_sz_q += model->mvtvm.info.output[i].sz_q;\n \n+\t\tmodel->mvtvm.output_tensor[i].device = metadata->output[i].device;\n+\t\tmodel->mvtvm.output_tensor[i].ndim = metadata->output[i].ndim;\n+\t\tmodel->mvtvm.output_tensor[i].dtype = metadata->output[i].datatype;\n+\t\tmodel->mvtvm.output_tensor[i].shape = metadata->output[i].shape;\n+\t\tmodel->mvtvm.output_tensor[i].strides = NULL;\n+\t\tmodel->mvtvm.output_tensor[i].byte_offset = 0;\n+\n \t\tplt_ml_dbg(\"model_id = %u, output[%u] - sz_d = %u sz_q = %u\", model->model_id, i,\n \t\t\t   model->mvtvm.info.output[i].sz_d, model->mvtvm.info.output[i].sz_q);\n \t}\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_model.h b/drivers/ml/cnxk/mvtvm_ml_model.h\nindex 66c3af18e1..7ffce38094 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_model.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_model.h\n@@ -69,6 +69,12 @@ struct mvtvm_ml_model_data {\n \n \t/* Stats for burst ops */\n \tstruct mvtvm_ml_model_xstats *burst_xstats;\n+\n+\t/* Input Tensor */\n+\tDLTensor input_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Output Tensor */\n+\tDLTensor output_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n };\n \n enum cnxk_ml_model_type mvtvm_ml_model_type_get(struct rte_ml_model_params *params);\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.c b/drivers/ml/cnxk/mvtvm_ml_ops.c\nindex 39c8bf0f04..6b88491371 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.c\n@@ -19,6 +19,12 @@\n /* ML model macros */\n #define MVTVM_ML_MODEL_MEMZONE_NAME \"ml_mvtvm_model_mz\"\n \n+__rte_hot static void\n+mvtvm_ml_set_poll_addr(struct cnxk_ml_req *req)\n+{\n+\treq->status = &req->mvtvm_req.status;\n+}\n+\n void\n mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n \t\t\t      uint16_t stat_id, uint16_t entry, char *suffix)\n@@ -242,6 +248,7 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \t\tcallback->tvmrt_free = cn10k_ml_free;\n \t\tcallback->tvmrt_quantize = mvtvm_ml_io_quantize;\n \t\tcallback->tvmrt_dequantize = mvtvm_ml_io_dequantize;\n+\t\tcallback->tvmrt_inference = cn10k_ml_inference_sync;\n \t} else {\n \t\tcallback = NULL;\n \t}\n@@ -285,6 +292,19 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \t\tmodel->mvtvm.burst_xstats[qp_id].dequeued_count = 0;\n \t}\n \n+\t/* Set model specific fast path functions */\n+\tif (model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) {\n+\t\tmodel->enqueue_single = cn10k_ml_enqueue_single;\n+\t\tmodel->result_update = cn10k_ml_result_update;\n+\t\tmodel->set_error_code = cn10k_ml_set_error_code;\n+\t\tmodel->set_poll_addr = cn10k_ml_set_poll_addr;\n+\t} else {\n+\t\tmodel->enqueue_single = mvtvm_ml_enqueue_single;\n+\t\tmodel->result_update = mvtvm_ml_result_update;\n+\t\tmodel->set_error_code = mvtvm_ml_set_error_code;\n+\t\tmodel->set_poll_addr = mvtvm_ml_set_poll_addr;\n+\t}\n+\n \treturn 0;\n \n error:\n@@ -495,3 +515,107 @@ mvtvm_ml_io_dequantize(void *device, uint16_t model_id, const char *layer_name,\n \n \treturn 0;\n }\n+\n+static int\n+mvtvm_ml_model_run(struct cnxk_ml_model *model, struct rte_ml_op *op, struct cnxk_ml_req *req)\n+{\n+\tuint8_t i;\n+\n+\trte_memcpy(req->mvtvm_req.input_tensor, model->mvtvm.input_tensor,\n+\t\t   model->mvtvm.metadata.model.num_input * sizeof(DLTensor));\n+\tfor (i = 0; i < model->mvtvm.metadata.model.num_input; i++) {\n+\t\treq->mvtvm_req.input_tensor[i].data = op->input[i]->addr;\n+\t\treq->mvtvm_req.input_tensor[i].byte_offset = 0;\n+\t}\n+\n+\trte_memcpy(req->mvtvm_req.output_tensor, model->mvtvm.output_tensor,\n+\t\t   model->mvtvm.metadata.model.num_output * sizeof(DLTensor));\n+\tfor (i = 0; i < model->mvtvm.metadata.model.num_output; i++) {\n+\t\treq->mvtvm_req.output_tensor[i].data = op->output[i]->addr;\n+\t\treq->mvtvm_req.output_tensor[i].byte_offset = 0;\n+\t}\n+\n+\ttvmdp_model_run(model->model_id, model->mvtvm.metadata.model.num_input,\n+\t\t\treq->mvtvm_req.input_tensor, model->mvtvm.metadata.model.num_output,\n+\t\t\treq->mvtvm_req.output_tensor, &req->mvtvm_req.result,\n+\t\t\t&req->mvtvm_req.status);\n+\n+\tplt_write64(ML_CNXK_POLL_JOB_FINISH, req->status);\n+\n+\treturn 0;\n+}\n+\n+__rte_hot void\n+mvtvm_ml_set_error_code(struct cnxk_ml_req *req, uint64_t etype, uint64_t stype)\n+{\n+\tRTE_SET_USED(stype);\n+\n+\treq->mvtvm_req.result.error_code = etype;\n+}\n+\n+__rte_hot bool\n+mvtvm_ml_enqueue_single(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op, uint16_t layer_id,\n+\t\t\tstruct cnxk_ml_qp *qp, uint64_t head)\n+{\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_queue *queue;\n+\tstruct cnxk_ml_req *req;\n+\n+\tRTE_SET_USED(layer_id);\n+\n+\tqueue = &qp->queue;\n+\treq = &queue->reqs[head];\n+\tmodel = cnxk_mldev->mldev->data->models[op->model_id];\n+\n+\tmodel->set_poll_addr(req);\n+\tmemset(&req->mvtvm_req.result, 0, sizeof(struct mvtvm_ml_result));\n+\treq->mvtvm_req.result.error_code = 0x0;\n+\treq->mvtvm_req.result.user_ptr = op->user_ptr;\n+\n+\tcnxk_ml_set_poll_ptr(req);\n+\tmvtvm_ml_model_run(model, op, req);\n+\treq->timeout = plt_tsc_cycles() + queue->wait_cycles;\n+\treq->op = op;\n+\n+\treturn true;\n+}\n+\n+__rte_hot void\n+mvtvm_ml_result_update(struct cnxk_ml_dev *cnxk_mldev, int qp_id, void *request)\n+{\n+\tstruct mvtvm_ml_model_xstats *xstats;\n+\tstruct mvtvm_ml_result *result;\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_req *req;\n+\tuint64_t tvm_rt_latency;\n+\tstruct cnxk_ml_qp *qp;\n+\tstruct rte_ml_op *op;\n+\n+\treq = (struct cnxk_ml_req *)request;\n+\tresult = &req->mvtvm_req.result;\n+\top = req->op;\n+\tqp = cnxk_mldev->mldev->data->queue_pairs[qp_id];\n+\top->impl_opaque = result->error_code;\n+\n+\tif (likely(result->error_code == 0)) {\n+\t\tqp->stats.dequeued_count++;\n+\t\top->status = RTE_ML_OP_STATUS_SUCCESS;\n+\n+\t\tmodel = cnxk_mldev->mldev->data->models[op->model_id];\n+\t\txstats = &model->mvtvm.burst_xstats[qp_id];\n+\n+\t\tif (unlikely(xstats->dequeued_count == xstats->tvm_rt_reset_count)) {\n+\t\t\txstats->tvm_rt_latency_min = UINT64_MAX;\n+\t\t\txstats->tvm_rt_latency_max = 0;\n+\t\t}\n+\t\ttvm_rt_latency = result->stats.end_ns - result->stats.start_ns;\n+\t\txstats->tvm_rt_latency = tvm_rt_latency;\n+\t\txstats->tvm_rt_latency_tot += tvm_rt_latency;\n+\t\txstats->tvm_rt_latency_min = RTE_MIN(xstats->tvm_rt_latency_min, tvm_rt_latency);\n+\t\txstats->tvm_rt_latency_max = RTE_MAX(xstats->tvm_rt_latency_max, tvm_rt_latency);\n+\t\txstats->dequeued_count++;\n+\t} else {\n+\t\tqp->stats.dequeue_err_count++;\n+\t\top->status = RTE_ML_OP_STATUS_ERROR;\n+\t}\n+}\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.h b/drivers/ml/cnxk/mvtvm_ml_ops.h\nindex 4cabe30a82..cb4b219743 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.h\n@@ -16,6 +16,44 @@\n struct cnxk_ml_dev;\n struct cnxk_ml_model;\n struct cnxk_ml_layer;\n+struct cnxk_ml_qp;\n+struct cnxk_ml_req;\n+\n+/* Inference stats */\n+struct mvtvm_ml_stats {\n+\t/* Start ns */\n+\tuint64_t start_ns;\n+\n+\t/* Start ns */\n+\tuint64_t end_ns;\n+};\n+\n+/* Result structure */\n+struct mvtvm_ml_result {\n+\t/* Job error code */\n+\tuint64_t error_code;\n+\n+\t/* Inference stats */\n+\tstruct mvtvm_ml_stats stats;\n+\n+\t/* User context pointer */\n+\tvoid *user_ptr;\n+};\n+\n+/* MVTVM specific request */\n+struct mvtvm_ml_req {\n+\t/* Input tensors */\n+\tDLTensor input_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Output tensors */\n+\tDLTensor output_tensor[ML_CNXK_MODEL_MAX_INPUT_OUTPUT];\n+\n+\t/* Status field for poll mode requests */\n+\tvolatile uint64_t status;\n+\n+\t/* Result */\n+\tstruct mvtvm_ml_result result;\n+};\n \n int mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf);\n int mvtvm_ml_dev_close(struct cnxk_ml_dev *cnxk_mldev);\n@@ -29,6 +67,11 @@ int mvtvm_ml_io_quantize(void *device, uint16_t model_id, const char *layer_name\n int mvtvm_ml_io_dequantize(void *device, uint16_t model_id, const char *layer_name, void *qbuffer,\n \t\t\t   const DLTensor **deq_tensor);\n \n+__rte_hot bool mvtvm_ml_enqueue_single(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op,\n+\t\t\t\t       uint16_t layer_id, struct cnxk_ml_qp *qp, uint64_t head);\n+__rte_hot void mvtvm_ml_result_update(struct cnxk_ml_dev *cnxk_mldev, int qp_id, void *request);\n+__rte_hot void mvtvm_ml_set_error_code(struct cnxk_ml_req *req, uint64_t etype, uint64_t stype);\n+\n void mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n \t\t\t\t   uint16_t stat_id, uint16_t entry, char *suffix);\n uint64_t mvtvm_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n",
    "prefixes": [
        "v9",
        "33/34"
    ]
}