get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133390/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133390,
    "url": "http://patches.dpdk.org/api/patches/133390/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-5-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231026124347.22477-5-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231026124347.22477-5-syalavarthi@marvell.com",
    "date": "2023-10-26T12:43:13",
    "name": "[v9,04/34] ml/cnxk: add generic cnxk request structure",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "4590e7bcac36fb95847a697d374ab2da93618f64",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-5-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 30002,
            "url": "http://patches.dpdk.org/api/series/30002/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=30002",
            "date": "2023-10-26T12:43:09",
            "name": "Implementation of revised ml/cnxk driver",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/30002/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133390/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/133390/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7640443208;\n\tThu, 26 Oct 2023 14:44:21 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A342C42DF0;\n\tThu, 26 Oct 2023 14:44:00 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 55C41427D7\n for <dev@dpdk.org>; Thu, 26 Oct 2023 14:43:56 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 39QAKqcQ006841 for <dev@dpdk.org>; Thu, 26 Oct 2023 05:43:55 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3txcsr25pj-2\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 26 Oct 2023 05:43:55 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Thu, 26 Oct 2023 05:43:53 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Thu, 26 Oct 2023 05:43:53 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 060FB3F709C;\n Thu, 26 Oct 2023 05:43:53 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=P1zSH+foOzFeeRkHfps1+SFEBB9XgjT2Z9KxNkxtac4=;\n b=c/Oze0wckjSaXRecBPeJty6AdU6EgVhIV90f5Vv7ABcrhZgA6mzLDEkldErJm8+qJqPj\n I0ij4srOVjTc6JsAw+iAkYQ3GuqGQ/NEO10mDivTgU8zl6SlXvHKqiCT+4IEbygAv3LC\n 1jI9C4frGQw+v2n14ZWdhXOW+QKCCf/xHLfxs8bXivvMXvJ4f+8ZSLbgegquA7pldbQd\n 9e0M24EQlAg3F3LMBwhk6d28rrN+4AhBZq9jzgj3BQ1bzEvX/2hnSeHouWfd9P2G8H9h\n Xkcg13sS1F7Mft3NY0AbZa7VTR0QgQQyK4+ZPGsrnubrNBbPkN6aP/pMalCbxMgFMlJ3 eQ==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v9 04/34] ml/cnxk: add generic cnxk request structure",
        "Date": "Thu, 26 Oct 2023 05:43:13 -0700",
        "Message-ID": "<20231026124347.22477-5-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20231026124347.22477-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20231026124347.22477-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "vHrqxEWeZVZp7Aqvj6tZisWZTShmpgm4",
        "X-Proofpoint-GUID": "vHrqxEWeZVZp7Aqvj6tZisWZTShmpgm4",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.987,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-10-26_10,2023-10-26_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added generic cnxk request structure. Moved common fields\nfrom cn10k structures to cnxk structure. Moved job related\nstructures and enumerations to ops headers.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_dev.c   |  72 +++----\n drivers/ml/cnxk/cn10k_ml_dev.h   | 269 +------------------------\n drivers/ml/cnxk/cn10k_ml_model.c |   6 +-\n drivers/ml/cnxk/cn10k_ml_model.h |   4 +-\n drivers/ml/cnxk/cn10k_ml_ops.c   | 331 +++++++++++++++++--------------\n drivers/ml/cnxk/cn10k_ml_ops.h   | 296 +++++++++++++++++++++++----\n drivers/ml/cnxk/cnxk_ml_ops.c    |   7 +\n drivers/ml/cnxk/cnxk_ml_ops.h    |  63 ++++++\n drivers/ml/cnxk/meson.build      |   1 +\n 9 files changed, 557 insertions(+), 492 deletions(-)\n create mode 100644 drivers/ml/cnxk/cnxk_ml_ops.c\n create mode 100644 drivers/ml/cnxk/cnxk_ml_ops.h",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_dev.c b/drivers/ml/cnxk/cn10k_ml_dev.c\nindex 3bc61443d8..fc6f78d414 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.c\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.c\n@@ -14,9 +14,8 @@\n \n #include <roc_api.h>\n \n-#include \"cn10k_ml_ops.h\"\n-\n #include \"cnxk_ml_dev.h\"\n+#include \"cnxk_ml_ops.h\"\n \n #define CN10K_ML_FW_PATH\t\t\"fw_path\"\n #define CN10K_ML_FW_ENABLE_DPE_WARNINGS \"enable_dpe_warnings\"\n@@ -400,20 +399,23 @@ cn10k_ml_pci_remove(struct rte_pci_device *pci_dev)\n static void\n cn10k_ml_fw_print_info(struct cn10k_ml_fw *fw)\n {\n-\tplt_info(\"ML Firmware Version = %s\", fw->req->jd.fw_load.version);\n-\n-\tplt_ml_dbg(\"Firmware capabilities = 0x%016lx\", fw->req->jd.fw_load.cap.u64);\n-\tplt_ml_dbg(\"Version = %s\", fw->req->jd.fw_load.version);\n-\tplt_ml_dbg(\"core0_debug_ptr = 0x%016lx\", fw->req->jd.fw_load.debug.core0_debug_ptr);\n-\tplt_ml_dbg(\"core1_debug_ptr = 0x%016lx\", fw->req->jd.fw_load.debug.core1_debug_ptr);\n-\tplt_ml_dbg(\"debug_buffer_size = %u bytes\", fw->req->jd.fw_load.debug.debug_buffer_size);\n+\tplt_info(\"ML Firmware Version = %s\", fw->req->cn10k_req.jd.fw_load.version);\n+\n+\tplt_ml_dbg(\"Firmware capabilities = 0x%016lx\", fw->req->cn10k_req.jd.fw_load.cap.u64);\n+\tplt_ml_dbg(\"Version = %s\", fw->req->cn10k_req.jd.fw_load.version);\n+\tplt_ml_dbg(\"core0_debug_ptr = 0x%016lx\",\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.core0_debug_ptr);\n+\tplt_ml_dbg(\"core1_debug_ptr = 0x%016lx\",\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.core1_debug_ptr);\n+\tplt_ml_dbg(\"debug_buffer_size = %u bytes\",\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.debug_buffer_size);\n \tplt_ml_dbg(\"core0_exception_buffer = 0x%016lx\",\n-\t\t   fw->req->jd.fw_load.debug.core0_exception_buffer);\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.core0_exception_buffer);\n \tplt_ml_dbg(\"core1_exception_buffer = 0x%016lx\",\n-\t\t   fw->req->jd.fw_load.debug.core1_exception_buffer);\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.core1_exception_buffer);\n \tplt_ml_dbg(\"exception_state_size = %u bytes\",\n-\t\t   fw->req->jd.fw_load.debug.exception_state_size);\n-\tplt_ml_dbg(\"flags = 0x%016lx\", fw->req->jd.fw_load.flags);\n+\t\t   fw->req->cn10k_req.jd.fw_load.debug.exception_state_size);\n+\tplt_ml_dbg(\"flags = 0x%016lx\", fw->req->cn10k_req.jd.fw_load.flags);\n }\n \n uint64_t\n@@ -458,29 +460,30 @@ cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)\n \troc_ml_reg_save(&cn10k_mldev->roc, ML_MLR_BASE);\n \n \t/* Update FW load completion structure */\n-\tfw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);\n-\tfw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n-\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);\n-\tfw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n-\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);\n+\tfw->req->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->cn10k_req.status);\n+\tfw->req->cn10k_req.jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n+\tfw->req->cn10k_req.jd.hdr.result =\n+\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->cn10k_req.result);\n+\tfw->req->cn10k_req.jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->cn10k_req.status);\n \tplt_wmb();\n \n \t/* Enqueue FW load through scratch registers */\n \ttimeout = true;\n \ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->cn10k_req.jd);\n \n \tplt_rmb();\n \tdo {\n \t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n-\t\t    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n+\t\t    (plt_read64(&fw->req->cn10k_req.status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n \t} while (plt_tsc_cycles() < timeout_cycle);\n \n \t/* Check firmware load status, clean-up and exit on failure. */\n-\tif ((!timeout) && (fw->req->result.error_code.u64 == 0)) {\n+\tif ((!timeout) && (fw->req->cn10k_req.result.error_code == 0)) {\n \t\tcn10k_ml_fw_print_info(fw);\n \t} else {\n \t\t/* Set ML to disable new jobs */\n@@ -654,29 +657,30 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \tplt_ml_dbg(\"ML_SW_RST_CTRL => 0x%08x\", reg_val32);\n \n \t/* (12) Wait for notification from firmware that ML is ready for job execution. */\n-\tfw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);\n-\tfw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n-\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);\n-\tfw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n-\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);\n+\tfw->req->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->cn10k_req.status);\n+\tfw->req->cn10k_req.jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n+\tfw->req->cn10k_req.jd.hdr.result =\n+\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->cn10k_req.result);\n+\tfw->req->cn10k_req.jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->cn10k_req.status);\n \tplt_wmb();\n \n \t/* Enqueue FW load through scratch registers */\n \ttimeout = true;\n \ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->cn10k_req.jd);\n \n \tplt_rmb();\n \tdo {\n \t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n-\t\t    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n+\t\t    (plt_read64(&fw->req->cn10k_req.status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n \t} while (plt_tsc_cycles() < timeout_cycle);\n \n \t/* Check firmware load status, clean-up and exit on failure. */\n-\tif ((!timeout) && (fw->req->result.error_code.u64 == 0)) {\n+\tif ((!timeout) && (fw->req->cn10k_req.result.error_code == 0)) {\n \t\tcn10k_ml_fw_print_info(fw);\n \t} else {\n \t\t/* Set ML to disable new jobs */\n@@ -766,11 +770,11 @@ cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev)\n \t\t}\n \n \t\t/* Reserve memzone for firmware load completion and data */\n-\t\tmz_size = sizeof(struct cn10k_ml_req) + fw_size + FW_STACK_BUFFER_SIZE +\n+\t\tmz_size = sizeof(struct cnxk_ml_req) + fw_size + FW_STACK_BUFFER_SIZE +\n \t\t\t  FW_DEBUG_BUFFER_SIZE + FW_EXCEPTION_BUFFER_SIZE;\n \t} else if (roc_env_is_asim()) {\n \t\t/* Reserve memzone for firmware load completion */\n-\t\tmz_size = sizeof(struct cn10k_ml_req);\n+\t\tmz_size = sizeof(struct cnxk_ml_req);\n \t}\n \n \tmz = plt_memzone_reserve_aligned(FW_MEMZONE_NAME, mz_size, 0, ML_CN10K_ALIGN_SIZE);\n@@ -782,8 +786,8 @@ cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev)\n \tfw->req = mz->addr;\n \n \t/* Reset firmware load completion structure */\n-\tmemset(&fw->req->jd, 0, sizeof(struct cn10k_ml_jd));\n-\tmemset(&fw->req->jd.fw_load.version[0], '\\0', MLDEV_FIRMWARE_VERSION_LENGTH);\n+\tmemset(&fw->req->cn10k_req.jd, 0, sizeof(struct cn10k_ml_jd));\n+\tmemset(&fw->req->cn10k_req.jd.fw_load.version[0], '\\0', MLDEV_FIRMWARE_VERSION_LENGTH);\n \n \t/* Reset device, if in active state */\n \tif (roc_ml_mlip_is_enabled(&cn10k_mldev->roc))\n@@ -791,7 +795,7 @@ cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev)\n \n \t/* Load firmware */\n \tif (roc_env_is_emulator() || roc_env_is_hw()) {\n-\t\tfw->data = PLT_PTR_ADD(mz->addr, sizeof(struct cn10k_ml_req));\n+\t\tfw->data = PLT_PTR_ADD(mz->addr, sizeof(struct cnxk_ml_req));\n \t\tret = cn10k_ml_fw_load_cn10ka(fw, fw_buffer, fw_size);\n \t\tfree(fw_buffer);\n \t} else if (roc_env_is_asim()) {\ndiff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h\nindex 99ff0a344a..1852d4f6c9 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.h\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.h\n@@ -17,9 +17,6 @@ extern struct rte_ml_dev_ops ml_dev_dummy_ops;\n /* Marvell OCTEON CN10K ML PMD device name */\n #define MLDEV_NAME_CN10K_PMD ml_cn10k\n \n-/* Firmware version string length */\n-#define MLDEV_FIRMWARE_VERSION_LENGTH 32\n-\n /* Device alignment size */\n #define ML_CN10K_ALIGN_SIZE 128\n \n@@ -52,17 +49,8 @@ extern struct rte_ml_dev_ops ml_dev_dummy_ops;\n #endif\n \n struct cnxk_ml_dev;\n-struct cn10k_ml_req;\n-struct cn10k_ml_qp;\n-\n-/* Job types */\n-enum cn10k_ml_job_type {\n-\tML_CN10K_JOB_TYPE_MODEL_RUN = 0,\n-\tML_CN10K_JOB_TYPE_MODEL_STOP,\n-\tML_CN10K_JOB_TYPE_MODEL_START,\n-\tML_CN10K_JOB_TYPE_FIRMWARE_LOAD,\n-\tML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST,\n-};\n+struct cnxk_ml_req;\n+struct cnxk_ml_qp;\n \n /* Error types enumeration */\n enum cn10k_ml_error_etype {\n@@ -112,251 +100,6 @@ union cn10k_ml_error_code {\n \tuint64_t u64;\n };\n \n-/* Firmware stats */\n-struct cn10k_ml_fw_stats {\n-\t/* Firmware start cycle */\n-\tuint64_t fw_start;\n-\n-\t/* Firmware end cycle */\n-\tuint64_t fw_end;\n-\n-\t/* Hardware start cycle */\n-\tuint64_t hw_start;\n-\n-\t/* Hardware end cycle */\n-\tuint64_t hw_end;\n-};\n-\n-/* Result structure */\n-struct cn10k_ml_result {\n-\t/* Job error code */\n-\tunion cn10k_ml_error_code error_code;\n-\n-\t/* Firmware stats */\n-\tstruct cn10k_ml_fw_stats stats;\n-\n-\t/* User context pointer */\n-\tvoid *user_ptr;\n-};\n-\n-/* Firmware capability structure */\n-union cn10k_ml_fw_cap {\n-\tuint64_t u64;\n-\n-\tstruct {\n-\t\t/* CMPC completion support */\n-\t\tuint64_t cmpc_completions : 1;\n-\n-\t\t/* Poll mode completion support */\n-\t\tuint64_t poll_completions : 1;\n-\n-\t\t/* SSO completion support */\n-\t\tuint64_t sso_completions : 1;\n-\n-\t\t/* Support for model side loading */\n-\t\tuint64_t side_load_model : 1;\n-\n-\t\t/* Batch execution */\n-\t\tuint64_t batch_run : 1;\n-\n-\t\t/* Max number of models to be loaded in parallel */\n-\t\tuint64_t max_models : 8;\n-\n-\t\t/* Firmware statistics */\n-\t\tuint64_t fw_stats : 1;\n-\n-\t\t/* Hardware statistics */\n-\t\tuint64_t hw_stats : 1;\n-\n-\t\t/* Max number of batches */\n-\t\tuint64_t max_num_batches : 16;\n-\n-\t\tuint64_t rsvd : 33;\n-\t} s;\n-};\n-\n-/* Firmware debug info structure */\n-struct cn10k_ml_fw_debug {\n-\t/* ACC core 0 debug buffer */\n-\tuint64_t core0_debug_ptr;\n-\n-\t/* ACC core 1 debug buffer */\n-\tuint64_t core1_debug_ptr;\n-\n-\t/* ACC core 0 exception state buffer */\n-\tuint64_t core0_exception_buffer;\n-\n-\t/* ACC core 1 exception state buffer */\n-\tuint64_t core1_exception_buffer;\n-\n-\t/* Debug buffer size per core */\n-\tuint32_t debug_buffer_size;\n-\n-\t/* Exception state dump size */\n-\tuint32_t exception_state_size;\n-};\n-\n-/* Job descriptor header (32 bytes) */\n-struct cn10k_ml_jd_header {\n-\t/* Job completion structure */\n-\tstruct ml_jce_s jce;\n-\n-\t/* Model ID */\n-\tuint64_t model_id : 8;\n-\n-\t/* Job type */\n-\tuint64_t job_type : 8;\n-\n-\t/* Flags for fast-path jobs */\n-\tuint64_t fp_flags : 16;\n-\n-\t/* Flags for slow-path jobs */\n-\tuint64_t sp_flags : 16;\n-\tuint64_t rsvd : 16;\n-\n-\t/* Job result pointer */\n-\tuint64_t *result;\n-};\n-\n-/* Extra arguments for job descriptor */\n-union cn10k_ml_jd_extended_args {\n-\tstruct cn10k_ml_jd_extended_args_section_start {\n-\t\t/** DDR Scratch base address */\n-\t\tuint64_t ddr_scratch_base_address;\n-\n-\t\t/** DDR Scratch range start */\n-\t\tuint64_t ddr_scratch_range_start;\n-\n-\t\t/** DDR Scratch range end */\n-\t\tuint64_t ddr_scratch_range_end;\n-\n-\t\tuint8_t rsvd[104];\n-\t} start;\n-};\n-\n-/* Job descriptor structure */\n-struct cn10k_ml_jd {\n-\t/* Job descriptor header (32 bytes) */\n-\tstruct cn10k_ml_jd_header hdr;\n-\n-\tunion {\n-\t\tstruct cn10k_ml_jd_section_fw_load {\n-\t\t\t/* Firmware capability structure (8 bytes) */\n-\t\t\tunion cn10k_ml_fw_cap cap;\n-\n-\t\t\t/* Firmware version (32 bytes) */\n-\t\t\tuint8_t version[MLDEV_FIRMWARE_VERSION_LENGTH];\n-\n-\t\t\t/* Debug capability structure (40 bytes) */\n-\t\t\tstruct cn10k_ml_fw_debug debug;\n-\n-\t\t\t/* Flags to control error handling */\n-\t\t\tuint64_t flags;\n-\n-\t\t\tuint8_t rsvd[8];\n-\t\t} fw_load;\n-\n-\t\tstruct cn10k_ml_jd_section_model_start {\n-\t\t\t/* Extended arguments */\n-\t\t\tuint64_t extended_args;\n-\n-\t\t\t/* Destination model start address in DDR relative to ML_MLR_BASE */\n-\t\t\tuint64_t model_dst_ddr_addr;\n-\n-\t\t\t/* Offset to model init section in the model */\n-\t\t\tuint64_t model_init_offset : 32;\n-\n-\t\t\t/* Size of init section in the model */\n-\t\t\tuint64_t model_init_size : 32;\n-\n-\t\t\t/* Offset to model main section in the model */\n-\t\t\tuint64_t model_main_offset : 32;\n-\n-\t\t\t/* Size of main section in the model */\n-\t\t\tuint64_t model_main_size : 32;\n-\n-\t\t\t/* Offset to model finish section in the model */\n-\t\t\tuint64_t model_finish_offset : 32;\n-\n-\t\t\t/* Size of finish section in the model */\n-\t\t\tuint64_t model_finish_size : 32;\n-\n-\t\t\t/* Offset to WB in model bin */\n-\t\t\tuint64_t model_wb_offset : 32;\n-\n-\t\t\t/* Number of model layers */\n-\t\t\tuint64_t num_layers : 8;\n-\n-\t\t\t/* Number of gather entries, 0 means linear input mode (= no gather) */\n-\t\t\tuint64_t num_gather_entries : 8;\n-\n-\t\t\t/* Number of scatter entries 0 means linear input mode (= no scatter) */\n-\t\t\tuint64_t num_scatter_entries : 8;\n-\n-\t\t\t/* Tile mask to load model */\n-\t\t\tuint64_t tilemask : 8;\n-\n-\t\t\t/* Batch size of model  */\n-\t\t\tuint64_t batch_size : 32;\n-\n-\t\t\t/* OCM WB base address */\n-\t\t\tuint64_t ocm_wb_base_address : 32;\n-\n-\t\t\t/* OCM WB range start */\n-\t\t\tuint64_t ocm_wb_range_start : 32;\n-\n-\t\t\t/* OCM WB range End */\n-\t\t\tuint64_t ocm_wb_range_end : 32;\n-\n-\t\t\t/* DDR WB address */\n-\t\t\tuint64_t ddr_wb_base_address;\n-\n-\t\t\t/* DDR WB range start */\n-\t\t\tuint64_t ddr_wb_range_start : 32;\n-\n-\t\t\t/* DDR WB range end */\n-\t\t\tuint64_t ddr_wb_range_end : 32;\n-\n-\t\t\tunion {\n-\t\t\t\t/* Points to gather list if num_gather_entries > 0 */\n-\t\t\t\tvoid *gather_list;\n-\t\t\t\tstruct {\n-\t\t\t\t\t/* Linear input mode */\n-\t\t\t\t\tuint64_t ddr_range_start : 32;\n-\t\t\t\t\tuint64_t ddr_range_end : 32;\n-\t\t\t\t} s;\n-\t\t\t} input;\n-\n-\t\t\tunion {\n-\t\t\t\t/* Points to scatter list if num_scatter_entries > 0 */\n-\t\t\t\tvoid *scatter_list;\n-\t\t\t\tstruct {\n-\t\t\t\t\t/* Linear output mode */\n-\t\t\t\t\tuint64_t ddr_range_start : 32;\n-\t\t\t\t\tuint64_t ddr_range_end : 32;\n-\t\t\t\t} s;\n-\t\t\t} output;\n-\t\t} model_start;\n-\n-\t\tstruct cn10k_ml_jd_section_model_stop {\n-\t\t\tuint8_t rsvd[96];\n-\t\t} model_stop;\n-\n-\t\tstruct cn10k_ml_jd_section_model_run {\n-\t\t\t/* Address of the input for the run relative to ML_MLR_BASE */\n-\t\t\tuint64_t input_ddr_addr;\n-\n-\t\t\t/* Address of the output for the run relative to ML_MLR_BASE */\n-\t\t\tuint64_t output_ddr_addr;\n-\n-\t\t\t/* Number of batches to run in variable batch processing */\n-\t\t\tuint16_t num_batches;\n-\n-\t\t\tuint8_t rsvd[78];\n-\t\t} model_run;\n-\t};\n-};\n-\n /* ML firmware structure */\n struct cn10k_ml_fw {\n \t/* Device reference */\n@@ -375,7 +118,7 @@ struct cn10k_ml_fw {\n \tuint8_t *data;\n \n \t/* Firmware load / handshake request structure */\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n };\n \n /* Extended stats types enum */\n@@ -488,9 +231,9 @@ struct cn10k_ml_dev {\n \tbool (*ml_jcmdq_enqueue)(struct roc_ml *roc_ml, struct ml_job_cmd_s *job_cmd);\n \n \t/* Poll handling function pointers */\n-\tvoid (*set_poll_addr)(struct cn10k_ml_req *req);\n-\tvoid (*set_poll_ptr)(struct cn10k_ml_req *req);\n-\tuint64_t (*get_poll_ptr)(struct cn10k_ml_req *req);\n+\tvoid (*set_poll_addr)(struct cnxk_ml_req *req);\n+\tvoid (*set_poll_ptr)(struct cnxk_ml_req *req);\n+\tuint64_t (*get_poll_ptr)(struct cnxk_ml_req *req);\n };\n \n uint64_t cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw);\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c\nindex d033d6deff..d2f1c761be 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.c\n+++ b/drivers/ml/cnxk/cn10k_ml_model.c\n@@ -10,6 +10,7 @@\n \n #include \"cnxk_ml_dev.h\"\n #include \"cnxk_ml_model.h\"\n+#include \"cnxk_ml_ops.h\"\n \n static enum rte_ml_io_type\n cn10k_ml_io_type_map(uint8_t type)\n@@ -551,7 +552,6 @@ void\n cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cnxk_ml_model *model)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct rte_ml_model_info *info;\n \tstruct rte_ml_io_info *output;\n@@ -560,7 +560,6 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cnxk_ml_model *model)\n \tuint8_t i;\n \n \tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tmetadata = &model->glow.metadata;\n \tinfo = PLT_PTR_CAST(model->info);\n \tinput = PLT_PTR_ADD(info, sizeof(struct rte_ml_model_info));\n@@ -577,7 +576,8 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cnxk_ml_model *model)\n \tinfo->io_layout = RTE_ML_IO_LAYOUT_PACKED;\n \tinfo->min_batches = model->batch_size;\n \tinfo->max_batches =\n-\t\tcn10k_mldev->fw.req->jd.fw_load.cap.s.max_num_batches / model->batch_size;\n+\t\tcnxk_mldev->cn10k_mldev.fw.req->cn10k_req.jd.fw_load.cap.s.max_num_batches /\n+\t\tmodel->batch_size;\n \tinfo->nb_inputs = metadata->model.num_input;\n \tinfo->input_info = input;\n \tinfo->nb_outputs = metadata->model.num_output;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h\nindex 206a369ca7..74ada1531a 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.h\n+++ b/drivers/ml/cnxk/cn10k_ml_model.h\n@@ -11,10 +11,10 @@\n \n #include \"cn10k_ml_dev.h\"\n #include \"cn10k_ml_ocm.h\"\n-#include \"cn10k_ml_ops.h\"\n \n struct cnxk_ml_model;\n struct cnxk_ml_layer;\n+struct cnxk_ml_req;\n \n /* Model Metadata : v 2.3.0.1 */\n #define MRVL_ML_MODEL_MAGIC_STRING \"MRVL\"\n@@ -444,7 +444,7 @@ struct cn10k_ml_layer_data {\n \tstruct cn10k_ml_ocm_layer_map ocm_map;\n \n \t/* Layer: Slow-path operations request pointer */\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n \n \t/* Layer: Stats for burst ops */\n \tstruct cn10k_ml_layer_stats *burst_stats;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex b226a9b5a2..25ebb28993 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -7,10 +7,9 @@\n \n #include <mldev_utils.h>\n \n-#include \"cn10k_ml_ops.h\"\n-\n #include \"cnxk_ml_dev.h\"\n #include \"cnxk_ml_model.h\"\n+#include \"cnxk_ml_ops.h\"\n \n /* ML model macros */\n #define CN10K_ML_MODEL_MEMZONE_NAME \"ml_cn10k_model_mz\"\n@@ -78,31 +77,31 @@ print_line(FILE *fp, int len)\n }\n \n static inline void\n-cn10k_ml_set_poll_addr(struct cn10k_ml_req *req)\n+cn10k_ml_set_poll_addr(struct cnxk_ml_req *req)\n {\n-\treq->compl_W1 = PLT_U64_CAST(&req->status);\n+\treq->status = &req->cn10k_req.status;\n }\n \n static inline void\n-cn10k_ml_set_poll_ptr(struct cn10k_ml_req *req)\n+cn10k_ml_set_poll_ptr(struct cnxk_ml_req *req)\n {\n-\tplt_write64(ML_CNXK_POLL_JOB_START, req->compl_W1);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, req->status);\n }\n \n static inline uint64_t\n-cn10k_ml_get_poll_ptr(struct cn10k_ml_req *req)\n+cn10k_ml_get_poll_ptr(struct cnxk_ml_req *req)\n {\n-\treturn plt_read64(req->compl_W1);\n+\treturn plt_read64(req->status);\n }\n \n static void\n qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)\n {\n-\tsnprintf(name, size, \"cn10k_ml_qp_mem_%u:%u\", dev_id, qp_id);\n+\tsnprintf(name, size, \"cnxk_ml_qp_mem_%u:%u\", dev_id, qp_id);\n }\n \n static int\n-cn10k_ml_qp_destroy(const struct rte_ml_dev *dev, struct cn10k_ml_qp *qp)\n+cnxk_ml_qp_destroy(const struct rte_ml_dev *dev, struct cnxk_ml_qp *qp)\n {\n \tconst struct rte_memzone *qp_mem;\n \tchar name[RTE_MEMZONE_NAMESIZE];\n@@ -122,14 +121,14 @@ cn10k_ml_qp_destroy(const struct rte_ml_dev *dev, struct cn10k_ml_qp *qp)\n static int\n cn10k_ml_dev_queue_pair_release(struct rte_ml_dev *dev, uint16_t queue_pair_id)\n {\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tint ret;\n \n \tqp = dev->data->queue_pairs[queue_pair_id];\n \tif (qp == NULL)\n \t\treturn -EINVAL;\n \n-\tret = cn10k_ml_qp_destroy(dev, qp);\n+\tret = cnxk_ml_qp_destroy(dev, qp);\n \tif (ret) {\n \t\tplt_err(\"Could not destroy queue pair %u\", queue_pair_id);\n \t\treturn ret;\n@@ -140,18 +139,18 @@ cn10k_ml_dev_queue_pair_release(struct rte_ml_dev *dev, uint16_t queue_pair_id)\n \treturn 0;\n }\n \n-static struct cn10k_ml_qp *\n-cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_desc, int socket_id)\n+static struct cnxk_ml_qp *\n+cnxk_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_desc, int socket_id)\n {\n \tconst struct rte_memzone *qp_mem;\n \tchar name[RTE_MEMZONE_NAMESIZE];\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tuint32_t len;\n \tuint8_t *va;\n \tuint64_t i;\n \n \t/* Allocate queue pair */\n-\tqp = rte_zmalloc_socket(\"cn10k_ml_pmd_queue_pair\", sizeof(struct cn10k_ml_qp), ROC_ALIGN,\n+\tqp = rte_zmalloc_socket(\"cn10k_ml_pmd_queue_pair\", sizeof(struct cnxk_ml_qp), ROC_ALIGN,\n \t\t\t\tsocket_id);\n \tif (qp == NULL) {\n \t\tplt_err(\"Could not allocate queue pair\");\n@@ -159,7 +158,7 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des\n \t}\n \n \t/* For request queue */\n-\tlen = nb_desc * sizeof(struct cn10k_ml_req);\n+\tlen = nb_desc * sizeof(struct cnxk_ml_req);\n \tqp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id, qp_id);\n \tqp_mem = rte_memzone_reserve_aligned(\n \t\tname, len, socket_id, RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB, ROC_ALIGN);\n@@ -173,7 +172,7 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des\n \n \t/* Initialize Request queue */\n \tqp->id = qp_id;\n-\tqp->queue.reqs = (struct cn10k_ml_req *)va;\n+\tqp->queue.reqs = (struct cnxk_ml_req *)va;\n \tqp->queue.head = 0;\n \tqp->queue.tail = 0;\n \tqp->queue.wait_cycles = ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n@@ -185,8 +184,9 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des\n \n \t/* Initialize job command */\n \tfor (i = 0; i < qp->nb_desc; i++) {\n-\t\tmemset(&qp->queue.reqs[i].jd, 0, sizeof(struct cn10k_ml_jd));\n-\t\tqp->queue.reqs[i].jcmd.w1.s.jobptr = PLT_U64_CAST(&qp->queue.reqs[i].jd);\n+\t\tmemset(&qp->queue.reqs[i].cn10k_req.jd, 0, sizeof(struct cn10k_ml_jd));\n+\t\tqp->queue.reqs[i].cn10k_req.jcmd.w1.s.jobptr =\n+\t\t\tPLT_U64_CAST(&qp->queue.reqs[i].cn10k_req.jd);\n \t}\n \n \treturn qp;\n@@ -333,7 +333,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \n static void\n cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml_model *model,\n-\t\t\t\tstruct cn10k_ml_req *req, enum cn10k_ml_job_type job_type)\n+\t\t\t\tstruct cnxk_ml_req *req, enum cn10k_ml_job_type job_type)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n \tstruct cn10k_ml_layer_addr *addr;\n@@ -341,79 +341,88 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml\n \tmetadata = &model->glow.metadata;\n \taddr = &model->layer[0].glow.addr;\n \n-\tmemset(&req->jd, 0, sizeof(struct cn10k_ml_jd));\n-\treq->jd.hdr.jce.w0.u64 = 0;\n-\treq->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->status);\n-\treq->jd.hdr.model_id = model->model_id;\n-\treq->jd.hdr.job_type = job_type;\n-\treq->jd.hdr.fp_flags = 0x0;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n+\tmemset(&req->cn10k_req.jd, 0, sizeof(struct cn10k_ml_jd));\n+\treq->cn10k_req.jd.hdr.jce.w0.u64 = 0;\n+\treq->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->cn10k_req.status);\n+\treq->cn10k_req.jd.hdr.model_id = model->model_id;\n+\treq->cn10k_req.jd.hdr.job_type = job_type;\n+\treq->cn10k_req.jd.hdr.fp_flags = 0x0;\n+\treq->cn10k_req.jd.hdr.result =\n+\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->cn10k_req.result);\n \n \tif (job_type == ML_CN10K_JOB_TYPE_MODEL_START) {\n \t\tif (!model->glow.metadata.model.ocm_relocatable)\n-\t\t\treq->jd.hdr.sp_flags = ML_CN10K_SP_FLAGS_OCM_NONRELOCATABLE;\n+\t\t\treq->cn10k_req.jd.hdr.sp_flags = ML_CN10K_SP_FLAGS_OCM_NONRELOCATABLE;\n \t\telse\n-\t\t\treq->jd.hdr.sp_flags = 0x0;\n+\t\t\treq->cn10k_req.jd.hdr.sp_flags = 0x0;\n \n-\t\treq->jd.hdr.sp_flags |= ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD;\n-\t\treq->jd.model_start.extended_args =\n-\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->extended_args));\n-\t\treq->jd.model_start.model_dst_ddr_addr =\n+\t\treq->cn10k_req.jd.hdr.sp_flags |= ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD;\n+\t\treq->cn10k_req.jd.model_start.extended_args = PLT_U64_CAST(\n+\t\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->cn10k_req.extended_args));\n+\t\treq->cn10k_req.jd.model_start.model_dst_ddr_addr =\n \t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, addr->init_run_addr));\n-\t\treq->jd.model_start.model_init_offset = 0x0;\n-\t\treq->jd.model_start.model_main_offset = metadata->init_model.file_size;\n-\t\treq->jd.model_start.model_finish_offset =\n+\t\treq->cn10k_req.jd.model_start.model_init_offset = 0x0;\n+\t\treq->cn10k_req.jd.model_start.model_main_offset = metadata->init_model.file_size;\n+\t\treq->cn10k_req.jd.model_start.model_finish_offset =\n \t\t\tmetadata->init_model.file_size + metadata->main_model.file_size;\n-\t\treq->jd.model_start.model_init_size = metadata->init_model.file_size;\n-\t\treq->jd.model_start.model_main_size = metadata->main_model.file_size;\n-\t\treq->jd.model_start.model_finish_size = metadata->finish_model.file_size;\n-\t\treq->jd.model_start.model_wb_offset = metadata->init_model.file_size +\n-\t\t\t\t\t\t      metadata->main_model.file_size +\n-\t\t\t\t\t\t      metadata->finish_model.file_size;\n-\t\treq->jd.model_start.num_layers = metadata->model.num_layers;\n-\t\treq->jd.model_start.num_gather_entries = 0;\n-\t\treq->jd.model_start.num_scatter_entries = 0;\n-\t\treq->jd.model_start.tilemask = 0; /* Updated after reserving pages */\n-\t\treq->jd.model_start.batch_size = model->batch_size;\n-\t\treq->jd.model_start.ocm_wb_base_address = 0; /* Updated after reserving pages */\n-\t\treq->jd.model_start.ocm_wb_range_start = metadata->model.ocm_wb_range_start;\n-\t\treq->jd.model_start.ocm_wb_range_end = metadata->model.ocm_wb_range_end;\n-\t\treq->jd.model_start.ddr_wb_base_address = PLT_U64_CAST(roc_ml_addr_ap2mlip(\n-\t\t\t&cn10k_mldev->roc,\n-\t\t\tPLT_PTR_ADD(addr->finish_load_addr, metadata->finish_model.file_size)));\n-\t\treq->jd.model_start.ddr_wb_range_start = metadata->model.ddr_wb_range_start;\n-\t\treq->jd.model_start.ddr_wb_range_end = metadata->model.ddr_wb_range_end;\n-\t\treq->jd.model_start.input.s.ddr_range_start = metadata->model.ddr_input_range_start;\n-\t\treq->jd.model_start.input.s.ddr_range_end = metadata->model.ddr_input_range_end;\n-\t\treq->jd.model_start.output.s.ddr_range_start =\n+\t\treq->cn10k_req.jd.model_start.model_init_size = metadata->init_model.file_size;\n+\t\treq->cn10k_req.jd.model_start.model_main_size = metadata->main_model.file_size;\n+\t\treq->cn10k_req.jd.model_start.model_finish_size = metadata->finish_model.file_size;\n+\t\treq->cn10k_req.jd.model_start.model_wb_offset = metadata->init_model.file_size +\n+\t\t\t\t\t\t\t\tmetadata->main_model.file_size +\n+\t\t\t\t\t\t\t\tmetadata->finish_model.file_size;\n+\t\treq->cn10k_req.jd.model_start.num_layers = metadata->model.num_layers;\n+\t\treq->cn10k_req.jd.model_start.num_gather_entries = 0;\n+\t\treq->cn10k_req.jd.model_start.num_scatter_entries = 0;\n+\t\treq->cn10k_req.jd.model_start.tilemask = 0; /* Updated after reserving pages */\n+\t\treq->cn10k_req.jd.model_start.batch_size = model->batch_size;\n+\t\treq->cn10k_req.jd.model_start.ocm_wb_base_address =\n+\t\t\t0; /* Updated after reserving pages */\n+\t\treq->cn10k_req.jd.model_start.ocm_wb_range_start =\n+\t\t\tmetadata->model.ocm_wb_range_start;\n+\t\treq->cn10k_req.jd.model_start.ocm_wb_range_end = metadata->model.ocm_wb_range_end;\n+\t\treq->cn10k_req.jd.model_start.ddr_wb_base_address =\n+\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(\n+\t\t\t\t&cn10k_mldev->roc, PLT_PTR_ADD(addr->finish_load_addr,\n+\t\t\t\t\t\t\t       metadata->finish_model.file_size)));\n+\t\treq->cn10k_req.jd.model_start.ddr_wb_range_start =\n+\t\t\tmetadata->model.ddr_wb_range_start;\n+\t\treq->cn10k_req.jd.model_start.ddr_wb_range_end = metadata->model.ddr_wb_range_end;\n+\t\treq->cn10k_req.jd.model_start.input.s.ddr_range_start =\n+\t\t\tmetadata->model.ddr_input_range_start;\n+\t\treq->cn10k_req.jd.model_start.input.s.ddr_range_end =\n+\t\t\tmetadata->model.ddr_input_range_end;\n+\t\treq->cn10k_req.jd.model_start.output.s.ddr_range_start =\n \t\t\tmetadata->model.ddr_output_range_start;\n-\t\treq->jd.model_start.output.s.ddr_range_end = metadata->model.ddr_output_range_end;\n+\t\treq->cn10k_req.jd.model_start.output.s.ddr_range_end =\n+\t\t\tmetadata->model.ddr_output_range_end;\n \n-\t\treq->extended_args.start.ddr_scratch_base_address = PLT_U64_CAST(\n+\t\treq->cn10k_req.extended_args.start.ddr_scratch_base_address = PLT_U64_CAST(\n \t\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, addr->scratch_base_addr));\n-\t\treq->extended_args.start.ddr_scratch_range_start =\n+\t\treq->cn10k_req.extended_args.start.ddr_scratch_range_start =\n \t\t\tmetadata->model.ddr_scratch_range_start;\n-\t\treq->extended_args.start.ddr_scratch_range_end =\n+\t\treq->cn10k_req.extended_args.start.ddr_scratch_range_end =\n \t\t\tmetadata->model.ddr_scratch_range_end;\n \t}\n }\n \n static __rte_always_inline void\n-cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cn10k_ml_req *req,\n+cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cnxk_ml_req *req,\n \t\t\t\tstruct rte_ml_op *op)\n {\n-\treq->jd.hdr.jce.w0.u64 = 0;\n-\treq->jd.hdr.jce.w1.u64 = req->compl_W1;\n-\treq->jd.hdr.model_id = op->model_id;\n-\treq->jd.hdr.job_type = ML_CN10K_JOB_TYPE_MODEL_RUN;\n-\treq->jd.hdr.fp_flags = ML_FLAGS_POLL_COMPL;\n-\treq->jd.hdr.sp_flags = 0x0;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n-\treq->jd.model_run.input_ddr_addr =\n+\treq->cn10k_req.jd.hdr.jce.w0.u64 = 0;\n+\treq->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(req->status);\n+\treq->cn10k_req.jd.hdr.model_id = op->model_id;\n+\treq->cn10k_req.jd.hdr.job_type = ML_CN10K_JOB_TYPE_MODEL_RUN;\n+\treq->cn10k_req.jd.hdr.fp_flags = ML_FLAGS_POLL_COMPL;\n+\treq->cn10k_req.jd.hdr.sp_flags = 0x0;\n+\treq->cn10k_req.jd.hdr.result =\n+\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->cn10k_req.result);\n+\treq->cn10k_req.jd.model_run.input_ddr_addr =\n \t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->input[0]->addr));\n-\treq->jd.model_run.output_ddr_addr =\n+\treq->cn10k_req.jd.model_run.output_ddr_addr =\n \t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->output[0]->addr));\n-\treq->jd.model_run.num_batches = op->nb_batches;\n+\treq->cn10k_req.jd.model_run.num_batches = op->nb_batches;\n }\n \n struct xstat_info {\n@@ -861,7 +870,7 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)\n \top.input = &inp;\n \top.output = &out;\n \n-\tmemset(model->layer[0].glow.req, 0, sizeof(struct cn10k_ml_req));\n+\tmemset(model->layer[0].glow.req, 0, sizeof(struct cnxk_ml_req));\n \tret = cn10k_ml_inference_sync(dev, &op);\n \tplt_memzone_free(mz);\n \n@@ -904,7 +913,7 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tstruct cn10k_ml_ocm *ocm;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tuint16_t model_id;\n \tuint32_t mz_size;\n \tuint16_t tile_id;\n@@ -1101,7 +1110,7 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tuint16_t model_id;\n \tuint16_t qp_id;\n \n@@ -1136,7 +1145,7 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)\n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n \t\tqp = dev->data->queue_pairs[qp_id];\n \t\tif (qp != NULL) {\n-\t\t\tif (cn10k_ml_qp_destroy(dev, qp) != 0)\n+\t\t\tif (cnxk_ml_qp_destroy(dev, qp) != 0)\n \t\t\t\tplt_err(\"Could not destroy queue pair %u\", qp_id);\n \t\t\tdev->data->queue_pairs[qp_id] = NULL;\n \t\t}\n@@ -1213,7 +1222,7 @@ cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,\n \t\t\t      const struct rte_ml_dev_qp_conf *qp_conf, int socket_id)\n {\n \tstruct rte_ml_dev_info dev_info;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tuint32_t nb_desc;\n \n \tif (queue_pair_id >= dev->data->nb_queue_pairs) {\n@@ -1239,7 +1248,7 @@ cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,\n \t */\n \tnb_desc =\n \t\t(qp_conf->nb_desc == dev_info.max_desc) ? dev_info.max_desc : qp_conf->nb_desc + 1;\n-\tqp = cn10k_ml_qp_create(dev, queue_pair_id, nb_desc, socket_id);\n+\tqp = cnxk_ml_qp_create(dev, queue_pair_id, nb_desc, socket_id);\n \tif (qp == NULL) {\n \t\tplt_err(\"Could not create queue pair %u\", queue_pair_id);\n \t\treturn -ENOMEM;\n@@ -1252,7 +1261,7 @@ cn10k_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id,\n static int\n cn10k_ml_dev_stats_get(struct rte_ml_dev *dev, struct rte_ml_dev_stats *stats)\n {\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tint qp_id;\n \n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n@@ -1269,7 +1278,7 @@ cn10k_ml_dev_stats_get(struct rte_ml_dev *dev, struct rte_ml_dev_stats *stats)\n static void\n cn10k_ml_dev_stats_reset(struct rte_ml_dev *dev)\n {\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n \tint qp_id;\n \n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n@@ -1485,20 +1494,22 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n \n \t/* Dump debug buffer */\n \tfor (core_id = 0; core_id <= 1; core_id++) {\n-\t\tbufsize = fw->req->jd.fw_load.debug.debug_buffer_size;\n+\t\tbufsize = fw->req->cn10k_req.jd.fw_load.debug.debug_buffer_size;\n \t\tif (core_id == 0) {\n \t\t\thead_loc =\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n \t\t\ttail_loc =\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n-\t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_debug_ptr);\n+\t\t\thead_ptr =\n+\t\t\t\tPLT_PTR_CAST(fw->req->cn10k_req.jd.fw_load.debug.core0_debug_ptr);\n \t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t} else {\n \t\t\thead_loc =\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n \t\t\ttail_loc =\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n-\t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_debug_ptr);\n+\t\t\thead_ptr =\n+\t\t\t\tPLT_PTR_CAST(fw->req->cn10k_req.jd.fw_load.debug.core1_debug_ptr);\n \t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t}\n \t\tif (head_loc < tail_loc) {\n@@ -1511,17 +1522,19 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n \n \t/* Dump exception info */\n \tfor (core_id = 0; core_id <= 1; core_id++) {\n-\t\tbufsize = fw->req->jd.fw_load.debug.exception_state_size;\n+\t\tbufsize = fw->req->cn10k_req.jd.fw_load.debug.exception_state_size;\n \t\tif ((core_id == 0) &&\n \t\t    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0)) {\n-\t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_exception_buffer);\n+\t\t\thead_ptr = PLT_PTR_CAST(\n+\t\t\t\tfw->req->cn10k_req.jd.fw_load.debug.core0_exception_buffer);\n \t\t\tfprintf(fp, \"ML_SCRATCH_EXCEPTION_SP_C0 = 0x%016lx\",\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0));\n \t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t\tfprintf(fp, \"%.*s\", bufsize, head_ptr);\n \t\t} else if ((core_id == 1) && (roc_ml_reg_read64(&cn10k_mldev->roc,\n \t\t\t\t\t\t\t\tML_SCRATCH_EXCEPTION_SP_C1) != 0)) {\n-\t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_exception_buffer);\n+\t\t\thead_ptr = PLT_PTR_CAST(\n+\t\t\t\tfw->req->cn10k_req.jd.fw_load.debug.core1_exception_buffer);\n \t\t\tfprintf(fp, \"ML_SCRATCH_EXCEPTION_SP_C1 = 0x%016lx\",\n \t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1));\n \t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n@@ -1538,14 +1551,14 @@ cn10k_ml_dev_selftest(struct rte_ml_dev *dev)\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tconst struct plt_memzone *mz;\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n \tuint64_t timeout_cycle;\n \tbool timeout;\n \tint ret;\n \n \tcnxk_mldev = dev->data->dev_private;\n \tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tmz = plt_memzone_reserve_aligned(\"dev_selftest\", sizeof(struct cn10k_ml_req), 0,\n+\tmz = plt_memzone_reserve_aligned(\"dev_selftest\", sizeof(struct cnxk_ml_req), 0,\n \t\t\t\t\t ML_CN10K_ALIGN_SIZE);\n \tif (mz == NULL) {\n \t\tplt_err(\"Could not allocate reserved memzone\");\n@@ -1554,23 +1567,24 @@ cn10k_ml_dev_selftest(struct rte_ml_dev *dev)\n \treq = mz->addr;\n \n \t/* Prepare load completion structure */\n-\tmemset(&req->jd, 0, sizeof(struct cn10k_ml_jd));\n-\treq->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->status);\n-\treq->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n-\treq->jd.fw_load.flags = cn10k_ml_fw_flags_get(&cn10k_mldev->fw);\n-\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n+\tmemset(&req->cn10k_req.jd, 0, sizeof(struct cn10k_ml_jd));\n+\treq->cn10k_req.jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->cn10k_req.status);\n+\treq->cn10k_req.jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST;\n+\treq->cn10k_req.jd.hdr.result =\n+\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->cn10k_req.result);\n+\treq->cn10k_req.jd.fw_load.flags = cn10k_ml_fw_flags_get(&cn10k_mldev->fw);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->cn10k_req.status);\n \tplt_wmb();\n \n \t/* Enqueue firmware selftest request through scratch registers */\n \ttimeout = true;\n \ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->cn10k_req.jd);\n \n \tplt_rmb();\n \tdo {\n \t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n-\t\t    (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n+\t\t    (plt_read64(&req->cn10k_req.status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n@@ -1581,7 +1595,7 @@ cn10k_ml_dev_selftest(struct rte_ml_dev *dev)\n \tif (timeout) {\n \t\tret = -ETIME;\n \t} else {\n-\t\tif (req->result.error_code.u64 != 0)\n+\t\tif (req->cn10k_req.result.error_code != 0)\n \t\t\tret = -1;\n \t}\n \n@@ -1654,7 +1668,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \n \tmz_size = PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_model), ML_CN10K_ALIGN_SIZE) +\n \t\t  2 * model_data_size + model_scratch_size + model_info_size +\n-\t\t  PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE) +\n+\t\t  PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_req), ML_CN10K_ALIGN_SIZE) +\n \t\t  model_stats_size;\n \n \t/* Allocate memzone for model object and model data */\n@@ -1726,7 +1740,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \t/* Reset burst and sync stats */\n \tmodel->layer[0].glow.burst_stats =\n \t\tPLT_PTR_ADD(model->layer[0].glow.req,\n-\t\t\t    PLT_ALIGN_CEIL(sizeof(struct cn10k_ml_req), ML_CN10K_ALIGN_SIZE));\n+\t\t\t    PLT_ALIGN_CEIL(sizeof(struct cnxk_ml_req), ML_CN10K_ALIGN_SIZE));\n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs + 1; qp_id++) {\n \t\tmodel->layer[0].glow.burst_stats[qp_id].hw_latency_tot = 0;\n \t\tmodel->layer[0].glow.burst_stats[qp_id].hw_latency_min = UINT64_MAX;\n@@ -1790,7 +1804,7 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tstruct cn10k_ml_ocm *ocm;\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n \n \tbool job_enqueued;\n \tbool job_dequeued;\n@@ -1815,10 +1829,10 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \t/* Prepare JD */\n \treq = model->layer[0].glow.req;\n \tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);\n-\treq->result.error_code.u64 = 0x0;\n-\treq->result.user_ptr = NULL;\n+\treq->cn10k_req.result.error_code = 0x0;\n+\treq->cn10k_req.result.user_ptr = NULL;\n \n-\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->cn10k_req.status);\n \tplt_wmb();\n \n \tnum_tiles = model->layer[0].glow.metadata.model.tile_end -\n@@ -1878,8 +1892,8 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \n \t/* Update JD */\n \tcn10k_ml_ocm_tilecount(model->layer[0].glow.ocm_map.tilemask, &tile_start, &tile_end);\n-\treq->jd.model_start.tilemask = GENMASK_ULL(tile_end, tile_start);\n-\treq->jd.model_start.ocm_wb_base_address =\n+\treq->cn10k_req.jd.model_start.tilemask = GENMASK_ULL(tile_end, tile_start);\n+\treq->cn10k_req.jd.model_start.ocm_wb_base_address =\n \t\tmodel->layer[0].glow.ocm_map.wb_page_start * ocm->page_size;\n \n \tjob_enqueued = false;\n@@ -1887,19 +1901,21 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tdo {\n \t\tif (!job_enqueued) {\n \t\t\treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n-\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n+\t\t\tjob_enqueued =\n+\t\t\t\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->cn10k_req.jd);\n \t\t}\n \n \t\tif (job_enqueued && !job_dequeued)\n-\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);\n+\t\t\tjob_dequeued =\n+\t\t\t\troc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->cn10k_req.jd);\n \n \t\tif (job_dequeued)\n \t\t\tbreak;\n \t} while (plt_tsc_cycles() < req->timeout);\n \n \tif (job_dequeued) {\n-\t\tif (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {\n-\t\t\tif (req->result.error_code.u64 == 0)\n+\t\tif (plt_read64(&req->cn10k_req.status) == ML_CNXK_POLL_JOB_FINISH) {\n+\t\t\tif (req->cn10k_req.result.error_code == 0)\n \t\t\t\tret = 0;\n \t\t\telse\n \t\t\t\tret = -1;\n@@ -1952,7 +1968,7 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n \tstruct cn10k_ml_ocm *ocm;\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n \n \tbool job_enqueued;\n \tbool job_dequeued;\n@@ -1972,10 +1988,10 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \t/* Prepare JD */\n \treq = model->layer[0].glow.req;\n \tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);\n-\treq->result.error_code.u64 = 0x0;\n-\treq->result.user_ptr = NULL;\n+\treq->cn10k_req.result.error_code = 0x0;\n+\treq->cn10k_req.result.user_ptr = NULL;\n \n-\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->cn10k_req.status);\n \tplt_wmb();\n \n \tlocked = false;\n@@ -2015,19 +2031,21 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tdo {\n \t\tif (!job_enqueued) {\n \t\t\treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n-\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n+\t\t\tjob_enqueued =\n+\t\t\t\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->cn10k_req.jd);\n \t\t}\n \n \t\tif (job_enqueued && !job_dequeued)\n-\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);\n+\t\t\tjob_dequeued =\n+\t\t\t\troc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->cn10k_req.jd);\n \n \t\tif (job_dequeued)\n \t\t\tbreak;\n \t} while (plt_tsc_cycles() < req->timeout);\n \n \tif (job_dequeued) {\n-\t\tif (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {\n-\t\t\tif (req->result.error_code.u64 == 0x0)\n+\t\tif (plt_read64(&req->cn10k_req.status) == ML_CNXK_POLL_JOB_FINISH) {\n+\t\t\tif (req->cn10k_req.result.error_code == 0x0)\n \t\t\t\tret = 0;\n \t\t\telse\n \t\t\t\tret = -1;\n@@ -2287,18 +2305,23 @@ queue_free_count(uint64_t head, uint64_t tail, uint64_t nb_desc)\n }\n \n static __rte_always_inline void\n-cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result *result,\n-\t\t       struct rte_ml_op *op)\n+cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cnxk_ml_req *req)\n {\n+\tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_layer_stats *stats;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n+\tstruct cn10k_ml_result *result;\n \tstruct cnxk_ml_model *model;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_qp *qp;\n+\tstruct rte_ml_op *op;\n \tuint64_t hw_latency;\n \tuint64_t fw_latency;\n \n-\tif (likely(result->error_code.u64 == 0)) {\n+\tresult = &req->cn10k_req.result;\n+\top = req->op;\n+\n+\tif (likely(result->error_code == 0)) {\n \t\tmodel = dev->data->models[op->model_id];\n \t\tif (likely(qp_id >= 0)) {\n \t\t\tqp = dev->data->queue_pairs[qp_id];\n@@ -2329,7 +2352,7 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result\n \t\tstats->fw_latency_max = PLT_MAX(stats->fw_latency_max, fw_latency);\n \t\tstats->dequeued_count++;\n \n-\t\top->impl_opaque = result->error_code.u64;\n+\t\top->impl_opaque = result->error_code;\n \t\top->status = RTE_ML_OP_STATUS_SUCCESS;\n \t} else {\n \t\tif (likely(qp_id >= 0)) {\n@@ -2338,7 +2361,8 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result\n \t\t}\n \n \t\t/* Handle driver error */\n-\t\tif (result->error_code.s.etype == ML_ETYPE_DRIVER) {\n+\t\terror_code = (union cn10k_ml_error_code *)&result->error_code;\n+\t\tif (error_code->s.etype == ML_ETYPE_DRIVER) {\n \t\t\tcnxk_mldev = dev->data->dev_private;\n \t\t\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n@@ -2346,15 +2370,15 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result\n \t\t\tif ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) !=\n \t\t\t     0) ||\n \t\t\t    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0))\n-\t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_EXCEPTION;\n+\t\t\t\terror_code->s.stype = ML_DRIVER_ERR_EXCEPTION;\n \t\t\telse if ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_LO) != 0) ||\n \t\t\t\t (roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_HI) != 0))\n-\t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_FW_ERROR;\n+\t\t\t\terror_code->s.stype = ML_DRIVER_ERR_FW_ERROR;\n \t\t\telse\n-\t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_UNKNOWN;\n+\t\t\t\terror_code->s.stype = ML_DRIVER_ERR_UNKNOWN;\n \t\t}\n \n-\t\top->impl_opaque = result->error_code.u64;\n+\t\top->impl_opaque = result->error_code;\n \t\top->status = RTE_ML_OP_STATUS_ERROR;\n \t}\n \n@@ -2365,11 +2389,12 @@ __rte_hot uint16_t\n cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,\n \t\t       uint16_t nb_ops)\n {\n+\tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n-\tstruct cn10k_ml_queue *queue;\n-\tstruct cn10k_ml_req *req;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_queue *queue;\n+\tstruct cnxk_ml_req *req;\n+\tstruct cnxk_ml_qp *qp;\n \tstruct rte_ml_op *op;\n \n \tuint16_t count;\n@@ -2395,12 +2420,13 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \tcn10k_mldev->set_poll_addr(req);\n \tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n \n-\tmemset(&req->result, 0, sizeof(struct cn10k_ml_result));\n-\treq->result.error_code.s.etype = ML_ETYPE_UNKNOWN;\n-\treq->result.user_ptr = op->user_ptr;\n+\tmemset(&req->cn10k_req.result, 0, sizeof(struct cn10k_ml_result));\n+\terror_code = (union cn10k_ml_error_code *)&req->cn10k_req.result.error_code;\n+\terror_code->s.etype = ML_ETYPE_UNKNOWN;\n+\treq->cn10k_req.result.user_ptr = op->user_ptr;\n \n \tcn10k_mldev->set_poll_ptr(req);\n-\tenqueued = cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd);\n+\tenqueued = cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->cn10k_req.jcmd);\n \tif (unlikely(!enqueued))\n \t\tgoto jcmdq_full;\n \n@@ -2424,11 +2450,12 @@ __rte_hot uint16_t\n cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,\n \t\t       uint16_t nb_ops)\n {\n+\tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n-\tstruct cn10k_ml_queue *queue;\n-\tstruct cn10k_ml_req *req;\n-\tstruct cn10k_ml_qp *qp;\n+\tstruct cnxk_ml_queue *queue;\n+\tstruct cnxk_ml_req *req;\n+\tstruct cnxk_ml_qp *qp;\n \n \tuint64_t status;\n \tuint16_t count;\n@@ -2450,13 +2477,15 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \treq = &queue->reqs[tail];\n \tstatus = cn10k_mldev->get_poll_ptr(req);\n \tif (unlikely(status != ML_CNXK_POLL_JOB_FINISH)) {\n-\t\tif (plt_tsc_cycles() < req->timeout)\n+\t\tif (plt_tsc_cycles() < req->timeout) {\n \t\t\tgoto empty_or_active;\n-\t\telse /* Timeout, set indication of driver error */\n-\t\t\treq->result.error_code.s.etype = ML_ETYPE_DRIVER;\n+\t\t} else { /* Timeout, set indication of driver error */\n+\t\t\terror_code = (union cn10k_ml_error_code *)&req->cn10k_req.result.error_code;\n+\t\t\terror_code->s.etype = ML_ETYPE_DRIVER;\n+\t\t}\n \t}\n \n-\tcn10k_ml_result_update(dev, qp_id, &req->result, req->op);\n+\tcn10k_ml_result_update(dev, qp_id, req);\n \tops[count] = req->op;\n \n \tqueue_index_advance(&tail, qp->nb_desc);\n@@ -2507,10 +2536,11 @@ cn10k_ml_op_error_get(struct rte_ml_dev *dev, struct rte_ml_op *op, struct rte_m\n __rte_hot int\n cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n {\n+\tunion cn10k_ml_error_code *error_code;\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n-\tstruct cn10k_ml_req *req;\n+\tstruct cnxk_ml_req *req;\n \tbool timeout;\n \tint ret = 0;\n \n@@ -2522,17 +2552,18 @@ cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n \tcn10k_ml_set_poll_addr(req);\n \tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n \n-\tmemset(&req->result, 0, sizeof(struct cn10k_ml_result));\n-\treq->result.error_code.s.etype = ML_ETYPE_UNKNOWN;\n-\treq->result.user_ptr = op->user_ptr;\n+\tmemset(&req->cn10k_req.result, 0, sizeof(struct cn10k_ml_result));\n+\terror_code = (union cn10k_ml_error_code *)&req->cn10k_req.result.error_code;\n+\terror_code->s.etype = ML_ETYPE_UNKNOWN;\n+\treq->cn10k_req.result.user_ptr = op->user_ptr;\n \n \tcn10k_mldev->set_poll_ptr(req);\n-\treq->jcmd.w1.s.jobptr = PLT_U64_CAST(&req->jd);\n+\treq->cn10k_req.jcmd.w1.s.jobptr = PLT_U64_CAST(&req->cn10k_req.jd);\n \n \ttimeout = true;\n \treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n \tdo {\n-\t\tif (cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd)) {\n+\t\tif (cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->cn10k_req.jcmd)) {\n \t\t\treq->op = op;\n \t\t\ttimeout = false;\n \t\t\tbreak;\n@@ -2555,7 +2586,7 @@ cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n \tif (timeout)\n \t\tret = -ETIME;\n \telse\n-\t\tcn10k_ml_result_update(dev, -1, &req->result, req->op);\n+\t\tcn10k_ml_result_update(dev, -1, req);\n \n error_enqueue:\n \treturn ret;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.h b/drivers/ml/cnxk/cn10k_ml_ops.h\nindex 005b093e45..fd5992e192 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.h\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.h\n@@ -10,63 +10,279 @@\n \n #include <roc_api.h>\n \n-#include \"cn10k_ml_dev.h\"\n+/* Firmware version string length */\n+#define MLDEV_FIRMWARE_VERSION_LENGTH 32\n \n-/* Request structure */\n-struct cn10k_ml_req {\n-\t/* Job descriptor */\n-\tstruct cn10k_ml_jd jd;\n+/* Job types */\n+enum cn10k_ml_job_type {\n+\tML_CN10K_JOB_TYPE_MODEL_RUN = 0,\n+\tML_CN10K_JOB_TYPE_MODEL_STOP,\n+\tML_CN10K_JOB_TYPE_MODEL_START,\n+\tML_CN10K_JOB_TYPE_FIRMWARE_LOAD,\n+\tML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST,\n+};\n \n-\t/* Job descriptor extra arguments */\n-\tunion cn10k_ml_jd_extended_args extended_args;\n+/* Firmware stats */\n+struct cn10k_ml_stats {\n+\t/* Firmware start cycle */\n+\tuint64_t fw_start;\n \n-\t/* Job result */\n-\tstruct cn10k_ml_result result;\n+\t/* Firmware end cycle */\n+\tuint64_t fw_end;\n \n-\t/* Status field for poll mode requests */\n-\tvolatile uint64_t status;\n+\t/* Hardware start cycle */\n+\tuint64_t hw_start;\n \n-\t/* Job command */\n-\tstruct ml_job_cmd_s jcmd;\n+\t/* Hardware end cycle */\n+\tuint64_t hw_end;\n+};\n+\n+/* Result structure */\n+struct cn10k_ml_result {\n+\t/* Job error code */\n+\tuint64_t error_code;\n+\n+\t/* Stats */\n+\tstruct cn10k_ml_stats stats;\n+\n+\t/* User context pointer */\n+\tvoid *user_ptr;\n+};\n+\n+/* Firmware capability structure */\n+union cn10k_ml_fw_cap {\n+\tuint64_t u64;\n+\n+\tstruct {\n+\t\t/* CMPC completion support */\n+\t\tuint64_t cmpc_completions : 1;\n+\n+\t\t/* Poll mode completion support */\n+\t\tuint64_t poll_completions : 1;\n+\n+\t\t/* SSO completion support */\n+\t\tuint64_t sso_completions : 1;\n+\n+\t\t/* Support for model side loading */\n+\t\tuint64_t side_load_model : 1;\n \n-\t/* Job completion W1 */\n-\tuint64_t compl_W1;\n+\t\t/* Batch execution */\n+\t\tuint64_t batch_run : 1;\n \n-\t/* Timeout cycle */\n-\tuint64_t timeout;\n+\t\t/* Max number of models to be loaded in parallel */\n+\t\tuint64_t max_models : 8;\n \n-\t/* Op */\n-\tstruct rte_ml_op *op;\n-} __rte_aligned(ROC_ALIGN);\n+\t\t/* Firmware statistics */\n+\t\tuint64_t fw_stats : 1;\n \n-/* Request queue */\n-struct cn10k_ml_queue {\n-\t/* Array of requests */\n-\tstruct cn10k_ml_req *reqs;\n+\t\t/* Hardware statistics */\n+\t\tuint64_t hw_stats : 1;\n \n-\t/* Head of the queue, used for enqueue */\n-\tuint64_t head;\n+\t\t/* Max number of batches */\n+\t\tuint64_t max_num_batches : 16;\n \n-\t/* Tail of the queue, used for dequeue */\n-\tuint64_t tail;\n+\t\tuint64_t rsvd : 33;\n+\t} s;\n+};\n+\n+/* Firmware debug info structure */\n+struct cn10k_ml_fw_debug {\n+\t/* ACC core 0 debug buffer */\n+\tuint64_t core0_debug_ptr;\n+\n+\t/* ACC core 1 debug buffer */\n+\tuint64_t core1_debug_ptr;\n+\n+\t/* ACC core 0 exception state buffer */\n+\tuint64_t core0_exception_buffer;\n+\n+\t/* ACC core 1 exception state buffer */\n+\tuint64_t core1_exception_buffer;\n+\n+\t/* Debug buffer size per core */\n+\tuint32_t debug_buffer_size;\n \n-\t/* Wait cycles before timeout */\n-\tuint64_t wait_cycles;\n+\t/* Exception state dump size */\n+\tuint32_t exception_state_size;\n };\n \n-/* Queue-pair structure */\n-struct cn10k_ml_qp {\n-\t/* ID */\n-\tuint32_t id;\n+/* Job descriptor header (32 bytes) */\n+struct cn10k_ml_jd_header {\n+\t/* Job completion structure */\n+\tstruct ml_jce_s jce;\n+\n+\t/* Model ID */\n+\tuint64_t model_id : 8;\n+\n+\t/* Job type */\n+\tuint64_t job_type : 8;\n+\n+\t/* Flags for fast-path jobs */\n+\tuint64_t fp_flags : 16;\n+\n+\t/* Flags for slow-path jobs */\n+\tuint64_t sp_flags : 16;\n+\tuint64_t rsvd : 16;\n+\n+\t/* Job result pointer */\n+\tuint64_t *result;\n+};\n+\n+/* Extra arguments for job descriptor */\n+union cn10k_ml_jd_extended_args {\n+\tstruct cn10k_ml_jd_extended_args_section_start {\n+\t\t/* DDR Scratch base address */\n+\t\tuint64_t ddr_scratch_base_address;\n+\n+\t\t/* DDR Scratch range start */\n+\t\tuint64_t ddr_scratch_range_start;\n+\n+\t\t/* DDR Scratch range end */\n+\t\tuint64_t ddr_scratch_range_end;\n+\n+\t\tuint8_t rsvd[104];\n+\t} start;\n+};\n+\n+/* Job descriptor structure */\n+struct cn10k_ml_jd {\n+\t/* Job descriptor header (32 bytes) */\n+\tstruct cn10k_ml_jd_header hdr;\n+\n+\tunion {\n+\t\tstruct cn10k_ml_jd_section_fw_load {\n+\t\t\t/* Firmware capability structure (8 bytes) */\n+\t\t\tunion cn10k_ml_fw_cap cap;\n+\n+\t\t\t/* Firmware version (32 bytes) */\n+\t\t\tuint8_t version[MLDEV_FIRMWARE_VERSION_LENGTH];\n+\n+\t\t\t/* Debug capability structure (40 bytes) */\n+\t\t\tstruct cn10k_ml_fw_debug debug;\n \n-\t/* Number of descriptors */\n-\tuint64_t nb_desc;\n+\t\t\t/* Flags to control error handling */\n+\t\t\tuint64_t flags;\n \n-\t/* Request queue */\n-\tstruct cn10k_ml_queue queue;\n+\t\t\tuint8_t rsvd[8];\n+\t\t} fw_load;\n \n-\t/* Statistics per queue-pair */\n-\tstruct rte_ml_dev_stats stats;\n+\t\tstruct cn10k_ml_jd_section_model_start {\n+\t\t\t/* Extended arguments */\n+\t\t\tuint64_t extended_args;\n+\n+\t\t\t/* Destination model start address in DDR relative to ML_MLR_BASE */\n+\t\t\tuint64_t model_dst_ddr_addr;\n+\n+\t\t\t/* Offset to model init section in the model */\n+\t\t\tuint64_t model_init_offset : 32;\n+\n+\t\t\t/* Size of init section in the model */\n+\t\t\tuint64_t model_init_size : 32;\n+\n+\t\t\t/* Offset to model main section in the model */\n+\t\t\tuint64_t model_main_offset : 32;\n+\n+\t\t\t/* Size of main section in the model */\n+\t\t\tuint64_t model_main_size : 32;\n+\n+\t\t\t/* Offset to model finish section in the model */\n+\t\t\tuint64_t model_finish_offset : 32;\n+\n+\t\t\t/* Size of finish section in the model */\n+\t\t\tuint64_t model_finish_size : 32;\n+\n+\t\t\t/* Offset to WB in model bin */\n+\t\t\tuint64_t model_wb_offset : 32;\n+\n+\t\t\t/* Number of model layers */\n+\t\t\tuint64_t num_layers : 8;\n+\n+\t\t\t/* Number of gather entries, 0 means linear input mode (= no gather) */\n+\t\t\tuint64_t num_gather_entries : 8;\n+\n+\t\t\t/* Number of scatter entries 0 means linear input mode (= no scatter) */\n+\t\t\tuint64_t num_scatter_entries : 8;\n+\n+\t\t\t/* Tile mask to load model */\n+\t\t\tuint64_t tilemask : 8;\n+\n+\t\t\t/* Batch size of model  */\n+\t\t\tuint64_t batch_size : 32;\n+\n+\t\t\t/* OCM WB base address */\n+\t\t\tuint64_t ocm_wb_base_address : 32;\n+\n+\t\t\t/* OCM WB range start */\n+\t\t\tuint64_t ocm_wb_range_start : 32;\n+\n+\t\t\t/* OCM WB range End */\n+\t\t\tuint64_t ocm_wb_range_end : 32;\n+\n+\t\t\t/* DDR WB address */\n+\t\t\tuint64_t ddr_wb_base_address;\n+\n+\t\t\t/* DDR WB range start */\n+\t\t\tuint64_t ddr_wb_range_start : 32;\n+\n+\t\t\t/* DDR WB range end */\n+\t\t\tuint64_t ddr_wb_range_end : 32;\n+\n+\t\t\tunion {\n+\t\t\t\t/* Points to gather list if num_gather_entries > 0 */\n+\t\t\t\tvoid *gather_list;\n+\t\t\t\tstruct {\n+\t\t\t\t\t/* Linear input mode */\n+\t\t\t\t\tuint64_t ddr_range_start : 32;\n+\t\t\t\t\tuint64_t ddr_range_end : 32;\n+\t\t\t\t} s;\n+\t\t\t} input;\n+\n+\t\t\tunion {\n+\t\t\t\t/* Points to scatter list if num_scatter_entries > 0 */\n+\t\t\t\tvoid *scatter_list;\n+\t\t\t\tstruct {\n+\t\t\t\t\t/* Linear output mode */\n+\t\t\t\t\tuint64_t ddr_range_start : 32;\n+\t\t\t\t\tuint64_t ddr_range_end : 32;\n+\t\t\t\t} s;\n+\t\t\t} output;\n+\t\t} model_start;\n+\n+\t\tstruct cn10k_ml_jd_section_model_stop {\n+\t\t\tuint8_t rsvd[96];\n+\t\t} model_stop;\n+\n+\t\tstruct cn10k_ml_jd_section_model_run {\n+\t\t\t/* Address of the input for the run relative to ML_MLR_BASE */\n+\t\t\tuint64_t input_ddr_addr;\n+\n+\t\t\t/* Address of the output for the run relative to ML_MLR_BASE */\n+\t\t\tuint64_t output_ddr_addr;\n+\n+\t\t\t/* Number of batches to run in variable batch processing */\n+\t\t\tuint16_t num_batches;\n+\n+\t\t\tuint8_t rsvd[78];\n+\t\t} model_run;\n+\t};\n+} __plt_aligned(ROC_ALIGN);\n+\n+/* CN10K specific request */\n+struct cn10k_ml_req {\n+\t/* Job descriptor */\n+\tstruct cn10k_ml_jd jd;\n+\n+\t/* Job descriptor extra arguments */\n+\tunion cn10k_ml_jd_extended_args extended_args;\n+\n+\t/* Status field for poll mode requests */\n+\tvolatile uint64_t status;\n+\n+\t/* Job command */\n+\tstruct ml_job_cmd_s jcmd;\n+\n+\t/* Result */\n+\tstruct cn10k_ml_result result;\n };\n \n /* Device ops */\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nnew file mode 100644\nindex 0000000000..f1872dcf7c\n--- /dev/null\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -0,0 +1,7 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#include <rte_mldev.h>\n+\n+#include \"cnxk_ml_ops.h\"\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h\nnew file mode 100644\nindex 0000000000..b953fb0f5f\n--- /dev/null\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.h\n@@ -0,0 +1,63 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#ifndef _CNXK_ML_OPS_H_\n+#define _CNXK_ML_OPS_H_\n+\n+#include <rte_mldev.h>\n+#include <rte_mldev_core.h>\n+\n+#include <roc_api.h>\n+\n+#include \"cn10k_ml_ops.h\"\n+\n+/* Request structure */\n+struct cnxk_ml_req {\n+\t/* Device specific request */\n+\tunion {\n+\t\t/* CN10K */\n+\t\tstruct cn10k_ml_req cn10k_req;\n+\t};\n+\n+\t/* Address of status field */\n+\tvolatile uint64_t *status;\n+\n+\t/* Timeout cycle */\n+\tuint64_t timeout;\n+\n+\t/* Op */\n+\tstruct rte_ml_op *op;\n+} __rte_aligned(ROC_ALIGN);\n+\n+/* Request queue */\n+struct cnxk_ml_queue {\n+\t/* Array of requests */\n+\tstruct cnxk_ml_req *reqs;\n+\n+\t/* Head of the queue, used for enqueue */\n+\tuint64_t head;\n+\n+\t/* Tail of the queue, used for dequeue */\n+\tuint64_t tail;\n+\n+\t/* Wait cycles before timeout */\n+\tuint64_t wait_cycles;\n+};\n+\n+/* Queue-pair structure */\n+struct cnxk_ml_qp {\n+\t/* ID */\n+\tuint32_t id;\n+\n+\t/* Number of descriptors */\n+\tuint64_t nb_desc;\n+\n+\t/* Request queue */\n+\tstruct cnxk_ml_queue queue;\n+\n+\t/* Statistics per queue-pair */\n+\tstruct rte_ml_dev_stats stats;\n+};\n+\n+#endif /* _CNXK_ML_OPS_H_ */\ndiff --git a/drivers/ml/cnxk/meson.build b/drivers/ml/cnxk/meson.build\nindex a70956cceb..d652543912 100644\n--- a/drivers/ml/cnxk/meson.build\n+++ b/drivers/ml/cnxk/meson.build\n@@ -14,6 +14,7 @@ sources = files(\n         'cn10k_ml_ocm.c',\n         'cnxk_ml_dev.c',\n         'cnxk_ml_model.c',\n+        'cnxk_ml_ops.c',\n )\n \n deps += ['mldev', 'common_cnxk', 'kvargs', 'hash']\n",
    "prefixes": [
        "v9",
        "04/34"
    ]
}