get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132944/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132944,
    "url": "http://patches.dpdk.org/api/patches/132944/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231019041726.19243-11-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231019041726.19243-11-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231019041726.19243-11-syalavarthi@marvell.com",
    "date": "2023-10-19T04:16:59",
    "name": "[v7,10/34] ml/cnxk: update model start and stop functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "527bab385356b1ec5d5f19054a1c33e1b3f52c46",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231019041726.19243-11-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 29913,
            "url": "http://patches.dpdk.org/api/series/29913/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29913",
            "date": "2023-10-19T04:16:49",
            "name": "Implementation of revised ml/cnxk driver",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/29913/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132944/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132944/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9C1CA431A4;\n\tThu, 19 Oct 2023 06:19:23 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CBA2342DA1;\n\tThu, 19 Oct 2023 06:17:48 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 44BD440A84\n for <dev@dpdk.org>; Thu, 19 Oct 2023 06:17:36 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 39J01Wv6008136 for <dev@dpdk.org>; Wed, 18 Oct 2023 21:17:35 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3ttshu8rva-9\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 18 Oct 2023 21:17:35 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Wed, 18 Oct 2023 21:17:32 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Wed, 18 Oct 2023 21:17:32 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 34D715B6926;\n Wed, 18 Oct 2023 21:17:32 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=UwFkRBrlecUL0pNorlPwbQ0WzcroCIVug1/8/fpMCp0=;\n b=QGIrPtzTNBz21hrkf74m7NyEJ2OSj3mQrRwWpWmfx+nJAC1eQ07NHHo8MD8iDSw0E3M/\n 8DRtXaH50jjmIJGy91cQ5kWLhdjPWDScgiY3qjjMTqvUiGqpvHOcPh1p+iPqM1ooMSUT\n wPwQspDtR6Xg5g1SN1wJ6gvn83FHNh9bH2kjpZEWdoavY89u1SUZPNFauQWorrESWTPc\n NlhILoAWa/Ep5u4sOkwwbvyfBvuAktfoY3wzjskZE5K3d1WgJ30NaEcEPZDwU2CqM8Vb\n ZvhKXE3DjECl66qgAZRfOFjv2Mnz5a5jI+FYTLw1qkWnk+WFM0CzFNaxtsLbivpOurxT xA==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v7 10/34] ml/cnxk: update model start and stop functions",
        "Date": "Wed, 18 Oct 2023 21:16:59 -0700",
        "Message-ID": "<20231019041726.19243-11-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20231019041726.19243-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20231019041726.19243-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "P8o0XlN7PDzQnLh_5zAdc8xaxdf51cbN",
        "X-Proofpoint-ORIG-GUID": "P8o0XlN7PDzQnLh_5zAdc8xaxdf51cbN",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-10-19_02,2023-10-18_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Implemented cnxk wrapper functions to start and stop\nML models. Wrapper functions would invoke the cn10k\nmodel start and stop functions.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_ocm.c |  28 ++--\n drivers/ml/cnxk/cn10k_ml_ocm.h |  12 +-\n drivers/ml/cnxk/cn10k_ml_ops.c | 282 ++++++++++++++++++++-------------\n drivers/ml/cnxk/cn10k_ml_ops.h |   8 +-\n drivers/ml/cnxk/cnxk_ml_ops.c  |  48 +++++-\n drivers/ml/cnxk/cnxk_ml_ops.h  |   1 +\n 6 files changed, 240 insertions(+), 139 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_ocm.c b/drivers/ml/cnxk/cn10k_ml_ocm.c\nindex d71c36eae6..2197e5e0ed 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ocm.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ocm.c\n@@ -215,11 +215,10 @@ cn10k_ml_ocm_tilecount(uint64_t tilemask, int *start, int *end)\n  * scratch & WB pages and OCM allocation mode.\n  */\n int\n-cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t wb_pages,\n+cn10k_ml_ocm_tilemask_find(struct cnxk_ml_dev *cnxk_mldev, uint8_t num_tiles, uint16_t wb_pages,\n \t\t\t   uint16_t scratch_pages, uint64_t *tilemask)\n {\n \tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \n \tuint16_t used_scratch_pages_max;\n@@ -238,7 +237,6 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w\n \tint max_slot_sz;\n \tint page_id;\n \n-\tcnxk_mldev = dev->data->dev_private;\n \tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tocm = &cn10k_mldev->ocm;\n \n@@ -333,12 +331,10 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w\n }\n \n void\n-cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t layer_id,\n+cn10k_ml_ocm_reserve_pages(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id, uint16_t layer_id,\n \t\t\t   uint64_t tilemask, int wb_page_start, uint16_t wb_pages,\n \t\t\t   uint16_t scratch_pages)\n {\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tstruct cnxk_ml_layer *layer;\n \tstruct cn10k_ml_ocm *ocm;\n@@ -351,10 +347,8 @@ cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t l\n \tint tile_id;\n \tint page_id;\n \n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tocm = &cn10k_mldev->ocm;\n-\tmodel = dev->data->models[model_id];\n+\tocm = &cnxk_mldev->cn10k_mldev.ocm;\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n \tlayer = &model->layer[layer_id];\n \n \t/* Get first set bit, tile_start */\n@@ -396,12 +390,10 @@ cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t l\n }\n \n void\n-cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t layer_id)\n+cn10k_ml_ocm_free_pages(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id, uint16_t layer_id)\n {\n \tstruct cnxk_ml_model *local_model;\n \tstruct cnxk_ml_layer *local_layer;\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tstruct cnxk_ml_layer *layer;\n \tstruct cn10k_ml_ocm *ocm;\n@@ -416,10 +408,8 @@ cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t laye\n \tuint16_t i;\n \tuint16_t j;\n \n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tocm = &cn10k_mldev->ocm;\n-\tmodel = dev->data->models[model_id];\n+\tocm = &cnxk_mldev->cn10k_mldev.ocm;\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n \tlayer = &model->layer[layer_id];\n \n \t/* Update OCM info for WB memory */\n@@ -438,8 +428,8 @@ cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t laye\n \n \t\t/* Get max scratch pages required, excluding the current model */\n \t\tscratch_resize_pages = 0;\n-\t\tfor (i = 0; i < dev->data->nb_models; i++) {\n-\t\t\tlocal_model = dev->data->models[i];\n+\t\tfor (i = 0; i < cnxk_mldev->mldev->data->nb_models; i++) {\n+\t\t\tlocal_model = cnxk_mldev->mldev->data->models[i];\n \t\t\tif (local_model == NULL)\n \t\t\t\tcontinue;\n \ndiff --git a/drivers/ml/cnxk/cn10k_ml_ocm.h b/drivers/ml/cnxk/cn10k_ml_ocm.h\nindex 720f8caf76..97b723a56a 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ocm.h\n+++ b/drivers/ml/cnxk/cn10k_ml_ocm.h\n@@ -8,6 +8,8 @@\n #include <rte_mldev.h>\n #include <rte_mldev_pmd.h>\n \n+struct cnxk_ml_dev;\n+\n /* Number of OCM tiles. */\n #define ML_CN10K_OCM_NUMTILES 0x8\n \n@@ -75,12 +77,12 @@ struct cn10k_ml_ocm {\n };\n \n int cn10k_ml_ocm_tilecount(uint64_t tilemask, int *start, int *end);\n-int cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t wb_pages,\n+int cn10k_ml_ocm_tilemask_find(struct cnxk_ml_dev *cnxk_mldev, uint8_t num_tiles, uint16_t wb_pages,\n \t\t\t       uint16_t scratch_pages, uint64_t *tilemask);\n-void cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t layer_id,\n-\t\t\t\tuint64_t tilemask, int wb_page_start, uint16_t wb_pages,\n-\t\t\t\tuint16_t scratch_pages);\n-void cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id, uint16_t layer_id);\n+void cn10k_ml_ocm_reserve_pages(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id,\n+\t\t\t\tuint16_t layer_id, uint64_t tilemask, int wb_page_start,\n+\t\t\t\tuint16_t wb_pages, uint16_t scratch_pages);\n+void cn10k_ml_ocm_free_pages(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id, uint16_t layer_id);\n void cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp);\n \n #endif /* _CN10K_ML_OCM_H_ */\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex ab05896b5e..40f484158a 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -248,26 +248,28 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n }\n \n static void\n-cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml_model *model,\n+cn10k_ml_prep_sp_job_descriptor(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer,\n \t\t\t\tstruct cnxk_ml_req *req, enum cn10k_ml_job_type job_type)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n \tstruct cn10k_ml_layer_addr *addr;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \n-\tmetadata = &model->glow.metadata;\n-\taddr = &model->layer[0].glow.addr;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tmetadata = &layer->glow.metadata;\n+\taddr = &layer->glow.addr;\n \n \tmemset(&req->cn10k_req.jd, 0, sizeof(struct cn10k_ml_jd));\n \treq->cn10k_req.jd.hdr.jce.w0.u64 = 0;\n \treq->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->cn10k_req.status);\n-\treq->cn10k_req.jd.hdr.model_id = model->model_id;\n+\treq->cn10k_req.jd.hdr.model_id = layer->index;\n \treq->cn10k_req.jd.hdr.job_type = job_type;\n \treq->cn10k_req.jd.hdr.fp_flags = 0x0;\n \treq->cn10k_req.jd.hdr.result =\n \t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->cn10k_req.result);\n \n \tif (job_type == ML_CN10K_JOB_TYPE_MODEL_START) {\n-\t\tif (!model->glow.metadata.model.ocm_relocatable)\n+\t\tif (!layer->glow.metadata.model.ocm_relocatable)\n \t\t\treq->cn10k_req.jd.hdr.sp_flags = ML_CN10K_SP_FLAGS_OCM_NONRELOCATABLE;\n \t\telse\n \t\t\treq->cn10k_req.jd.hdr.sp_flags = 0x0;\n@@ -291,7 +293,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml\n \t\treq->cn10k_req.jd.model_start.num_gather_entries = 0;\n \t\treq->cn10k_req.jd.model_start.num_scatter_entries = 0;\n \t\treq->cn10k_req.jd.model_start.tilemask = 0; /* Updated after reserving pages */\n-\t\treq->cn10k_req.jd.model_start.batch_size = model->batch_size;\n+\t\treq->cn10k_req.jd.model_start.batch_size = layer->batch_size;\n \t\treq->cn10k_req.jd.model_start.ocm_wb_base_address =\n \t\t\t0; /* Updated after reserving pages */\n \t\treq->cn10k_req.jd.model_start.ocm_wb_range_start =\n@@ -323,9 +325,13 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml\n }\n \n static __rte_always_inline void\n-cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml_req *req,\n+cn10k_ml_prep_fp_job_descriptor(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_req *req,\n \t\t\t\tstruct rte_ml_op *op)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\n \treq->cn10k_req.jd.hdr.jce.w0.u64 = 0;\n \treq->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(req->status);\n \treq->cn10k_req.jd.hdr.model_id = op->model_id;\n@@ -714,10 +720,8 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint\n }\n \n static int\n-cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)\n+cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer)\n {\n-\tstruct rte_ml_model_info *info;\n-\tstruct cnxk_ml_model *model;\n \tstruct rte_ml_buff_seg seg[2];\n \tstruct rte_ml_buff_seg *inp;\n \tstruct rte_ml_buff_seg *out;\n@@ -730,22 +734,20 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)\n \tint ret = 0;\n \tuint32_t i;\n \n-\tmodel = dev->data->models[model_id];\n-\tinfo = (struct rte_ml_model_info *)model->info;\n \tinp = &seg[0];\n \tout = &seg[1];\n \n \t/* Create input and output buffers. */\n-\tfor (i = 0; i < info->nb_inputs; i++)\n-\t\tisize += info->input_info[i].size;\n+\tfor (i = 0; i < layer->info.nb_inputs; i++)\n+\t\tisize += layer->info.input[i].sz_q;\n \n-\tfor (i = 0; i < info->nb_outputs; i++)\n-\t\tosize += info->output_info[i].size;\n+\tfor (i = 0; i < layer->info.nb_outputs; i++)\n+\t\tosize += layer->info.output[i].sz_q;\n \n-\tisize = model->batch_size * isize;\n-\tosize = model->batch_size * osize;\n+\tisize = layer->batch_size * isize;\n+\tosize = layer->batch_size * osize;\n \n-\tsnprintf(str, RTE_MEMZONE_NAMESIZE, \"%s_%u\", \"ml_dummy_io\", model_id);\n+\tsnprintf(str, RTE_MEMZONE_NAMESIZE, \"%s_%u\", \"ml_dummy_io\", layer->index);\n \tmz = plt_memzone_reserve_aligned(str, isize + osize, 0, ML_CN10K_ALIGN_SIZE);\n \tif (mz == NULL)\n \t\treturn -ENOMEM;\n@@ -761,15 +763,15 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)\n \tseg[1].length = osize;\n \tseg[1].next = NULL;\n \n-\top.model_id = model_id;\n-\top.nb_batches = model->batch_size;\n+\top.model_id = layer->index;\n+\top.nb_batches = layer->batch_size;\n \top.mempool = NULL;\n \n \top.input = &inp;\n \top.output = &out;\n \n-\tmemset(model->layer[0].glow.req, 0, sizeof(struct cnxk_ml_req));\n-\tret = cn10k_ml_inference_sync(dev, &op);\n+\tmemset(layer->glow.req, 0, sizeof(struct cnxk_ml_req));\n+\tret = cn10k_ml_inference_sync(cnxk_mldev, &op);\n \tplt_memzone_free(mz);\n \n \treturn ret;\n@@ -1506,14 +1508,16 @@ cn10k_ml_model_unload(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *mode\n }\n \n int\n-cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n+cn10k_ml_layer_start(void *device, uint16_t model_id, const char *layer_name)\n {\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n \tstruct cn10k_ml_ocm *ocm;\n \tstruct cnxk_ml_req *req;\n \n+\tuint16_t layer_id = 0;\n \tbool job_enqueued;\n \tbool job_dequeued;\n \tuint8_t num_tiles;\n@@ -1524,85 +1528,89 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tbool locked;\n \tint ret = 0;\n \n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tocm = &cn10k_mldev->ocm;\n-\tmodel = dev->data->models[model_id];\n+\tPLT_SET_USED(layer_name);\n \n+\tcnxk_mldev = (struct cnxk_ml_dev *)device;\n+\tif (cnxk_mldev == NULL) {\n+\t\tplt_err(\"Invalid device = %p\", device);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n \tif (model == NULL) {\n \t\tplt_err(\"Invalid model_id = %u\", model_id);\n \t\treturn -EINVAL;\n \t}\n \n+\tlayer = &model->layer[layer_id];\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n+\n \t/* Prepare JD */\n-\treq = model->layer[0].glow.req;\n-\tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);\n+\treq = layer->glow.req;\n+\tcn10k_ml_prep_sp_job_descriptor(cnxk_mldev, layer, req, ML_CN10K_JOB_TYPE_MODEL_START);\n \treq->cn10k_req.result.error_code = 0x0;\n \treq->cn10k_req.result.user_ptr = NULL;\n \n \tplt_write64(ML_CNXK_POLL_JOB_START, &req->cn10k_req.status);\n \tplt_wmb();\n \n-\tnum_tiles = model->layer[0].glow.metadata.model.tile_end -\n-\t\t    model->layer[0].glow.metadata.model.tile_start + 1;\n+\tnum_tiles = layer->glow.metadata.model.tile_end - layer->glow.metadata.model.tile_start + 1;\n \n \tlocked = false;\n \twhile (!locked) {\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n-\t\t\tif (model->state == ML_CNXK_MODEL_STATE_STARTED) {\n-\t\t\t\tplt_ml_dbg(\"Model already started, model = 0x%016lx\",\n-\t\t\t\t\t   PLT_U64_CAST(model));\n+\t\t\tif (layer->state == ML_CNXK_LAYER_STATE_STARTED) {\n+\t\t\t\tplt_ml_dbg(\"Layer already started, model_id = %u, layer_id = %u\",\n+\t\t\t\t\t   model->model_id, layer_id);\n \t\t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\t\treturn 1;\n \t\t\t}\n \n-\t\t\tif (model->state == ML_CNXK_MODEL_STATE_JOB_ACTIVE) {\n-\t\t\t\tplt_err(\"A slow-path job is active for the model = 0x%016lx\",\n-\t\t\t\t\tPLT_U64_CAST(model));\n+\t\t\tif (layer->state == ML_CNXK_LAYER_STATE_JOB_ACTIVE) {\n+\t\t\t\tplt_err(\"A slow-path job is active for the model_id = %u\",\n+\t\t\t\t\tmodel->model_id);\n \t\t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\t\treturn -EBUSY;\n \t\t\t}\n \n-\t\t\tmodel->state = ML_CNXK_MODEL_STATE_JOB_ACTIVE;\n+\t\t\tlayer->state = ML_CNXK_LAYER_STATE_JOB_ACTIVE;\n \t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\tlocked = true;\n \t\t}\n \t}\n \n-\twhile (!model->layer[0].glow.ocm_map.ocm_reserved) {\n+\twhile (!layer->glow.ocm_map.ocm_reserved) {\n \t\tif (plt_spinlock_trylock(&ocm->lock) != 0) {\n \t\t\twb_page_start = cn10k_ml_ocm_tilemask_find(\n-\t\t\t\tdev, num_tiles, model->layer[0].glow.ocm_map.wb_pages,\n-\t\t\t\tmodel->layer[0].glow.ocm_map.scratch_pages, &tilemask);\n+\t\t\t\tcnxk_mldev, num_tiles, layer->glow.ocm_map.wb_pages,\n+\t\t\t\tlayer->glow.ocm_map.scratch_pages, &tilemask);\n \n \t\t\tif (wb_page_start == -1) {\n \t\t\t\tplt_err(\"Free pages not available on OCM tiles\");\n-\t\t\t\tplt_err(\"Failed to start model = 0x%016lx, name = %s\",\n-\t\t\t\t\tPLT_U64_CAST(model),\n-\t\t\t\t\tmodel->layer[0].glow.metadata.model.name);\n-\n+\t\t\t\tplt_err(\"Failed to start layer, model_id = %u, layer_id = %u\",\n+\t\t\t\t\tmodel->model_id, layer_id);\n \t\t\t\tplt_spinlock_unlock(&ocm->lock);\n \t\t\t\treturn -ENOMEM;\n \t\t\t}\n \n-\t\t\tmodel->layer[0].glow.ocm_map.tilemask = tilemask;\n-\t\t\tmodel->layer[0].glow.ocm_map.wb_page_start = wb_page_start;\n+\t\t\tlayer->glow.ocm_map.tilemask = tilemask;\n+\t\t\tlayer->glow.ocm_map.wb_page_start = wb_page_start;\n \n-\t\t\tcn10k_ml_ocm_reserve_pages(dev, model->model_id, 0,\n-\t\t\t\t\t\t   model->layer[0].glow.ocm_map.tilemask,\n-\t\t\t\t\t\t   model->layer[0].glow.ocm_map.wb_page_start,\n-\t\t\t\t\t\t   model->layer[0].glow.ocm_map.wb_pages,\n-\t\t\t\t\t\t   model->layer[0].glow.ocm_map.scratch_pages);\n-\t\t\tmodel->layer[0].glow.ocm_map.ocm_reserved = true;\n+\t\t\tcn10k_ml_ocm_reserve_pages(\n+\t\t\t\tcnxk_mldev, model->model_id, layer_id, layer->glow.ocm_map.tilemask,\n+\t\t\t\tlayer->glow.ocm_map.wb_page_start, layer->glow.ocm_map.wb_pages,\n+\t\t\t\tlayer->glow.ocm_map.scratch_pages);\n+\t\t\tlayer->glow.ocm_map.ocm_reserved = true;\n \t\t\tplt_spinlock_unlock(&ocm->lock);\n \t\t}\n \t}\n \n \t/* Update JD */\n-\tcn10k_ml_ocm_tilecount(model->layer[0].glow.ocm_map.tilemask, &tile_start, &tile_end);\n+\tcn10k_ml_ocm_tilecount(layer->glow.ocm_map.tilemask, &tile_start, &tile_end);\n \treq->cn10k_req.jd.model_start.tilemask = GENMASK_ULL(tile_end, tile_start);\n \treq->cn10k_req.jd.model_start.ocm_wb_base_address =\n-\t\tmodel->layer[0].glow.ocm_map.wb_page_start * ocm->page_size;\n+\t\tlayer->glow.ocm_map.wb_page_start * ocm->page_size;\n \n \tjob_enqueued = false;\n \tjob_dequeued = false;\n@@ -1636,66 +1644,94 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tlocked = false;\n \twhile (!locked) {\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n-\t\t\tif (ret == 0) {\n-\t\t\t\tmodel->state = ML_CNXK_MODEL_STATE_STARTED;\n-\t\t\t\tcnxk_mldev->nb_models_started++;\n-\t\t\t} else {\n-\t\t\t\tmodel->state = ML_CNXK_MODEL_STATE_UNKNOWN;\n-\t\t\t}\n+\t\t\tif (ret == 0)\n+\t\t\t\tlayer->state = ML_CNXK_LAYER_STATE_STARTED;\n+\t\t\telse\n+\t\t\t\tlayer->state = ML_CNXK_LAYER_STATE_UNKNOWN;\n \n \t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\tlocked = true;\n \t\t}\n \t}\n \n-\tif (model->state == ML_CNXK_MODEL_STATE_UNKNOWN) {\n-\t\twhile (model->layer[0].glow.ocm_map.ocm_reserved) {\n+\tif (layer->state == ML_CNXK_LAYER_STATE_UNKNOWN) {\n+\t\twhile (layer->glow.ocm_map.ocm_reserved) {\n \t\t\tif (plt_spinlock_trylock(&ocm->lock) != 0) {\n-\t\t\t\tcn10k_ml_ocm_free_pages(dev, model->model_id, 0);\n-\t\t\t\tmodel->layer[0].glow.ocm_map.ocm_reserved = false;\n-\t\t\t\tmodel->layer[0].glow.ocm_map.tilemask = 0x0;\n+\t\t\t\tcn10k_ml_ocm_free_pages(cnxk_mldev, model->model_id, layer_id);\n+\t\t\t\tlayer->glow.ocm_map.ocm_reserved = false;\n+\t\t\t\tlayer->glow.ocm_map.tilemask = 0x0;\n \t\t\t\tplt_spinlock_unlock(&ocm->lock);\n \t\t\t}\n \t\t}\n \t}\n \n-\tif (ret < 0) { /* Call unload to update model and FW state, ignore error */\n-\t\trte_ml_model_stop(dev->data->dev_id, model_id);\n+\tif (ret < 0) {\n+\t\tcn10k_ml_layer_stop(device, model_id, layer_name);\n \t} else {\n-\t\tif (cn10k_mldev->cache_model_data && roc_model_is_cn10ka())\n-\t\t\tret = cn10k_ml_cache_model_data(dev, model_id);\n+\t\tif (cn10k_mldev->cache_model_data)\n+\t\t\tret = cn10k_ml_cache_model_data(cnxk_mldev, layer);\n \t}\n \n \treturn ret;\n }\n \n int\n-cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n+cn10k_ml_model_start(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model)\n+{\n+\tstruct cnxk_ml_layer *layer;\n+\tint ret;\n+\n+\tlayer = &model->layer[0];\n+\tret = cn10k_ml_layer_start(cnxk_mldev, model->model_id, layer->name);\n+\tif (ret != 0) {\n+\t\tplt_err(\"CN10K Model start failed, model_id = %u, error = %d\", model->model_id,\n+\t\t\tret);\n+\t\treturn ret;\n+\t}\n+\n+\tcnxk_mldev->nb_models_started++;\n+\tmodel->state = ML_CNXK_MODEL_STATE_STARTED;\n+\n+\treturn 0;\n+}\n+\n+int\n+cn10k_ml_layer_stop(void *device, uint16_t model_id, const char *layer_name)\n {\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n \tstruct cn10k_ml_ocm *ocm;\n \tstruct cnxk_ml_req *req;\n \n+\tuint16_t layer_id = 0;\n \tbool job_enqueued;\n \tbool job_dequeued;\n \tbool locked;\n \tint ret = 0;\n \n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tocm = &cn10k_mldev->ocm;\n-\tmodel = dev->data->models[model_id];\n+\tPLT_SET_USED(layer_name);\n+\n+\tcnxk_mldev = (struct cnxk_ml_dev *)device;\n+\tif (cnxk_mldev == NULL) {\n+\t\tplt_err(\"Invalid device = %p\", device);\n+\t\treturn -EINVAL;\n+\t}\n \n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n \tif (model == NULL) {\n \t\tplt_err(\"Invalid model_id = %u\", model_id);\n \t\treturn -EINVAL;\n \t}\n \n+\tlayer = &model->layer[layer_id];\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n+\n \t/* Prepare JD */\n-\treq = model->layer[0].glow.req;\n-\tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);\n+\treq = layer->glow.req;\n+\tcn10k_ml_prep_sp_job_descriptor(cnxk_mldev, layer, req, ML_CN10K_JOB_TYPE_MODEL_STOP);\n \treq->cn10k_req.result.error_code = 0x0;\n \treq->cn10k_req.result.user_ptr = NULL;\n \n@@ -1705,31 +1741,31 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tlocked = false;\n \twhile (!locked) {\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n-\t\t\tif (model->state == ML_CNXK_MODEL_STATE_LOADED) {\n-\t\t\t\tplt_ml_dbg(\"Model not started, model = 0x%016lx\",\n-\t\t\t\t\t   PLT_U64_CAST(model));\n+\t\t\tif (layer->state == ML_CNXK_LAYER_STATE_LOADED) {\n+\t\t\t\tplt_ml_dbg(\"Layer not started, model_id = %u, layer_id = %u\",\n+\t\t\t\t\t   model->model_id, layer_id);\n \t\t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\t\treturn 1;\n \t\t\t}\n \n-\t\t\tif (model->state == ML_CNXK_MODEL_STATE_JOB_ACTIVE) {\n-\t\t\t\tplt_err(\"A slow-path job is active for the model = 0x%016lx\",\n-\t\t\t\t\tPLT_U64_CAST(model));\n+\t\t\tif (layer->state == ML_CNXK_LAYER_STATE_JOB_ACTIVE) {\n+\t\t\t\tplt_err(\"A slow-path job is active for the layer, model_id = %u, layer_id = %u\",\n+\t\t\t\t\tmodel->model_id, layer_id);\n \t\t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\t\treturn -EBUSY;\n \t\t\t}\n \n-\t\t\tmodel->state = ML_CNXK_MODEL_STATE_JOB_ACTIVE;\n+\t\t\tlayer->state = ML_CNXK_LAYER_STATE_JOB_ACTIVE;\n \t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\tlocked = true;\n \t\t}\n \t}\n \n-\twhile (model->layer[0].glow.ocm_map.ocm_reserved) {\n+\twhile (layer->glow.ocm_map.ocm_reserved) {\n \t\tif (plt_spinlock_trylock(&ocm->lock) != 0) {\n-\t\t\tcn10k_ml_ocm_free_pages(dev, model->model_id, 0);\n-\t\t\tmodel->layer[0].glow.ocm_map.ocm_reserved = false;\n-\t\t\tmodel->layer[0].glow.ocm_map.tilemask = 0x0;\n+\t\t\tcn10k_ml_ocm_free_pages(cnxk_mldev, model->model_id, layer_id);\n+\t\t\tlayer->glow.ocm_map.ocm_reserved = false;\n+\t\t\tlayer->glow.ocm_map.tilemask = 0x0;\n \t\t\tplt_spinlock_unlock(&ocm->lock);\n \t\t}\n \t}\n@@ -1766,8 +1802,11 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tlocked = false;\n \twhile (!locked) {\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n-\t\t\tcnxk_mldev->nb_models_stopped++;\n-\t\t\tmodel->state = ML_CNXK_MODEL_STATE_LOADED;\n+\t\t\tif (ret == 0)\n+\t\t\t\tlayer->state = ML_CNXK_LAYER_STATE_LOADED;\n+\t\t\telse\n+\t\t\t\tlayer->state = ML_CNXK_LAYER_STATE_UNKNOWN;\n+\n \t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\tlocked = true;\n \t\t}\n@@ -1776,6 +1815,25 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \treturn ret;\n }\n \n+int\n+cn10k_ml_model_stop(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model)\n+{\n+\tstruct cnxk_ml_layer *layer;\n+\tint ret;\n+\n+\tlayer = &model->layer[0];\n+\tret = cn10k_ml_layer_stop(cnxk_mldev, model->model_id, layer->name);\n+\tif (ret != 0) {\n+\t\tplt_err(\"CN10K Model stop failed, model_id = %u, error = %d\", model->model_id, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tcnxk_mldev->nb_models_stopped++;\n+\tmodel->state = ML_CNXK_MODEL_STATE_LOADED;\n+\n+\treturn 0;\n+}\n+\n int\n cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,\n \t\t\tstruct rte_ml_model_info *model_info)\n@@ -2003,30 +2061,35 @@ queue_free_count(uint64_t head, uint64_t tail, uint64_t nb_desc)\n }\n \n static __rte_always_inline void\n-cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *req)\n+cn10k_ml_result_update(struct cnxk_ml_dev *cnxk_mldev, int qp_id, struct cnxk_ml_req *req)\n {\n \tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_layer_xstats *xstats;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_result *result;\n \tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n \tstruct cnxk_ml_qp *qp;\n \tstruct rte_ml_op *op;\n \tuint64_t hw_latency;\n \tuint64_t fw_latency;\n+\tuint16_t model_id;\n+\tuint16_t layer_id;\n \n \tresult = &req->cn10k_req.result;\n \top = req->op;\n \n \tif (likely(result->error_code == 0)) {\n-\t\tmodel = dev->data->models[op->model_id];\n+\t\tmodel_id = cnxk_mldev->index_map[op->model_id].model_id;\n+\t\tlayer_id = cnxk_mldev->index_map[op->model_id].layer_id;\n+\t\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\t\tlayer = &model->layer[layer_id];\n \t\tif (likely(qp_id >= 0)) {\n-\t\t\tqp = dev->data->queue_pairs[qp_id];\n+\t\t\tqp = cnxk_mldev->mldev->data->queue_pairs[qp_id];\n \t\t\tqp->stats.dequeued_count++;\n-\t\t\txstats = &model->layer[0].glow.burst_xstats[qp_id];\n+\t\t\txstats = &layer->glow.burst_xstats[qp_id];\n \t\t} else {\n-\t\t\txstats = model->layer[0].glow.sync_xstats;\n+\t\t\txstats = layer->glow.sync_xstats;\n \t\t}\n \n \t\tif (unlikely(xstats->dequeued_count == xstats->hw_reset_count)) {\n@@ -2054,14 +2117,13 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *re\n \t\top->status = RTE_ML_OP_STATUS_SUCCESS;\n \t} else {\n \t\tif (likely(qp_id >= 0)) {\n-\t\t\tqp = dev->data->queue_pairs[qp_id];\n+\t\t\tqp = cnxk_mldev->mldev->data->queue_pairs[qp_id];\n \t\t\tqp->stats.dequeue_err_count++;\n \t\t}\n \n \t\t/* Handle driver error */\n \t\terror_code = (union cn10k_ml_error_code *)&result->error_code;\n \t\tif (error_code->s.etype == ML_ETYPE_DRIVER) {\n-\t\t\tcnxk_mldev = dev->data->dev_private;\n \t\t\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \t\t\t/* Check for exception */\n@@ -2116,7 +2178,7 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \treq = &queue->reqs[head];\n \n \tcn10k_mldev->set_poll_addr(req);\n-\tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n+\tcn10k_ml_prep_fp_job_descriptor(cnxk_mldev, req, op);\n \n \tmemset(&req->cn10k_req.result, 0, sizeof(struct cn10k_ml_result));\n \terror_code = (union cn10k_ml_error_code *)&req->cn10k_req.result.error_code;\n@@ -2183,7 +2245,7 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \t\t}\n \t}\n \n-\tcn10k_ml_result_update(dev, qp_id, req);\n+\tcn10k_ml_result_update(cnxk_mldev, qp_id, req);\n \tops[count] = req->op;\n \n \tqueue_index_advance(&tail, qp->nb_desc);\n@@ -2232,23 +2294,27 @@ cn10k_ml_op_error_get(struct rte_ml_dev *dev, struct rte_ml_op *op, struct rte_m\n }\n \n __rte_hot int\n-cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n+cn10k_ml_inference_sync(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op)\n {\n \tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n \tstruct cnxk_ml_req *req;\n+\tuint16_t model_id;\n+\tuint16_t layer_id;\n \tbool timeout;\n \tint ret = 0;\n \n-\tcnxk_mldev = dev->data->dev_private;\n \tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tmodel = dev->data->models[op->model_id];\n-\treq = model->layer[0].glow.req;\n+\tmodel_id = cnxk_mldev->index_map[op->model_id].model_id;\n+\tlayer_id = cnxk_mldev->index_map[op->model_id].layer_id;\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\tlayer = &model->layer[layer_id];\n+\treq = layer->glow.req;\n \n \tcn10k_ml_set_poll_addr(req);\n-\tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n+\tcn10k_ml_prep_fp_job_descriptor(cnxk_mldev, req, op);\n \n \tmemset(&req->cn10k_req.result, 0, sizeof(struct cn10k_ml_result));\n \terror_code = (union cn10k_ml_error_code *)&req->cn10k_req.result.error_code;\n@@ -2284,7 +2350,7 @@ cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n \tif (timeout)\n \t\tret = -ETIME;\n \telse\n-\t\tcn10k_ml_result_update(dev, -1, req);\n+\t\tcn10k_ml_result_update(cnxk_mldev, -1, req);\n \n error_enqueue:\n \treturn ret;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.h b/drivers/ml/cnxk/cn10k_ml_ops.h\nindex 677219dfdf..a222a43d55 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.h\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.h\n@@ -315,8 +315,8 @@ int cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod\n int cn10k_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *params,\n \t\t\tstruct cnxk_ml_model *model);\n int cn10k_ml_model_unload(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model);\n-int cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id);\n-int cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id);\n+int cn10k_ml_model_start(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model);\n+int cn10k_ml_model_stop(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model);\n int cn10k_ml_model_info_get(struct rte_ml_dev *dev, uint16_t model_id,\n \t\t\t    struct rte_ml_model_info *model_info);\n int cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *buffer);\n@@ -335,7 +335,7 @@ __rte_hot uint16_t cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id\n \t\t\t\t\t  struct rte_ml_op **ops, uint16_t nb_ops);\n __rte_hot int cn10k_ml_op_error_get(struct rte_ml_dev *dev, struct rte_ml_op *op,\n \t\t\t\t    struct rte_ml_op_error *error);\n-__rte_hot int cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op);\n+__rte_hot int cn10k_ml_inference_sync(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_op *op);\n \n /* Misc ops */\n void cn10k_ml_qp_initialize(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_qp *qp);\n@@ -344,5 +344,7 @@ void cn10k_ml_qp_initialize(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_qp *q\n int cn10k_ml_layer_load(void *device, uint16_t model_id, const char *layer_name, uint8_t *buffer,\n \t\t\tsize_t size, uint16_t *index);\n int cn10k_ml_layer_unload(void *device, uint16_t model_id, const char *layer_name);\n+int cn10k_ml_layer_start(void *device, uint16_t model_id, const char *layer_name);\n+int cn10k_ml_layer_stop(void *device, uint16_t model_id, const char *layer_name);\n \n #endif /* _CN10K_ML_OPS_H_ */\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nindex 1d8b84269d..b61ed45876 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.c\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -240,7 +240,7 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co\n \t\t\tmodel = dev->data->models[model_id];\n \t\t\tif (model != NULL) {\n \t\t\t\tif (model->state == ML_CNXK_MODEL_STATE_STARTED) {\n-\t\t\t\t\tif (cn10k_ml_model_stop(dev, model_id) != 0)\n+\t\t\t\t\tif (cnxk_ml_model_stop(dev, model_id) != 0)\n \t\t\t\t\t\tplt_err(\"Could not stop model %u\", model_id);\n \t\t\t\t}\n \t\t\t\tif (model->state == ML_CNXK_MODEL_STATE_LOADED) {\n@@ -332,7 +332,7 @@ cnxk_ml_dev_close(struct rte_ml_dev *dev)\n \t\tmodel = dev->data->models[model_id];\n \t\tif (model != NULL) {\n \t\t\tif (model->state == ML_CNXK_MODEL_STATE_STARTED) {\n-\t\t\t\tif (cn10k_ml_model_stop(dev, model_id) != 0)\n+\t\t\t\tif (cnxk_ml_model_stop(dev, model_id) != 0)\n \t\t\t\t\tplt_err(\"Could not stop model %u\", model_id);\n \t\t\t}\n \t\t\tif (model->state == ML_CNXK_MODEL_STATE_LOADED) {\n@@ -564,6 +564,46 @@ cnxk_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)\n \treturn plt_memzone_free(plt_memzone_lookup(str));\n }\n \n+static int\n+cnxk_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n+{\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\tstruct cnxk_ml_model *model;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\n+\tmodel = dev->data->models[model_id];\n+\tif (model == NULL) {\n+\t\tplt_err(\"Invalid model_id = %u\", model_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn cn10k_ml_model_start(cnxk_mldev, model);\n+}\n+\n+int\n+cnxk_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n+{\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\tstruct cnxk_ml_model *model;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\n+\tmodel = dev->data->models[model_id];\n+\tif (model == NULL) {\n+\t\tplt_err(\"Invalid model_id = %u\", model_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn cn10k_ml_model_stop(cnxk_mldev, model);\n+}\n+\n struct rte_ml_dev_ops cnxk_ml_ops = {\n \t/* Device control ops */\n \t.dev_info_get = cnxk_ml_dev_info_get,\n@@ -589,8 +629,8 @@ struct rte_ml_dev_ops cnxk_ml_ops = {\n \t/* Model ops */\n \t.model_load = cnxk_ml_model_load,\n \t.model_unload = cnxk_ml_model_unload,\n-\t.model_start = cn10k_ml_model_start,\n-\t.model_stop = cn10k_ml_model_stop,\n+\t.model_start = cnxk_ml_model_start,\n+\t.model_stop = cnxk_ml_model_stop,\n \t.model_info_get = cn10k_ml_model_info_get,\n \t.model_params_update = cn10k_ml_model_params_update,\n \ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h\nindex bc14f6e5b9..d27ca0d0cb 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.h\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.h\n@@ -63,5 +63,6 @@ struct cnxk_ml_qp {\n extern struct rte_ml_dev_ops cnxk_ml_ops;\n \n int cnxk_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id);\n+int cnxk_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id);\n \n #endif /* _CNXK_ML_OPS_H_ */\n",
    "prefixes": [
        "v7",
        "10/34"
    ]
}