get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131678/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131678,
    "url": "http://patches.dpdk.org/api/patches/131678/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230920072528.14185-4-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230920072528.14185-4-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230920072528.14185-4-syalavarthi@marvell.com",
    "date": "2023-09-20T07:24:54",
    "name": "[v2,03/34] ml/cnxk: add generic cnxk device structure",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "ff34842b43f5c041b6b8fb1a59aca1dfb35d6aa7",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230920072528.14185-4-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 29567,
            "url": "http://patches.dpdk.org/api/series/29567/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29567",
            "date": "2023-09-20T07:24:51",
            "name": "Implemenation of revised ml/cnxk driver",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/29567/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/131678/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/131678/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 765BF425E4;\n\tWed, 20 Sep 2023 09:26:02 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 31AF840EE5;\n\tWed, 20 Sep 2023 09:25:41 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id DA44940E72\n for <dev@dpdk.org>; Wed, 20 Sep 2023 09:25:36 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 38K7JW0r008355 for <dev@dpdk.org>; Wed, 20 Sep 2023 00:25:36 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3t7htasykw-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 20 Sep 2023 00:25:35 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Wed, 20 Sep 2023 00:25:33 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Wed, 20 Sep 2023 00:25:33 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id EA3A85B692B;\n Wed, 20 Sep 2023 00:25:32 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=Ce0FsptH5L7Ah0h40Dm2/Kc8yHbxwxuZutoV5gbkT7c=;\n b=FzVOQzsL2cbYw6x4c5iV+xGe/hfH+NppUyS63WHT9h0JCSt2Wj/2qTUy+oSspNDAk2YH\n bZCNunrmFQAn9Wr9hkz3chh8o1ErVkZzhjoayob4dNsVnY1mPmAuKuTLxx6X9S06Aq3o\n RWfVl93IAGXdM1NNffmdFdjLeya6CgmsZYeWXABWkMYG2SB4lHafQ4H1lP4Ag/LU9TtX\n UYKEMx/iul0IxR0ur/mh677siihyV40JNGWwm74xUXquf7Gdu/gYc457/nXnVi140d8i\n zzXAYYxKX/DYwp2UAzCWvoRIW5iynrO4VYD0KiWMdtCct5MSVBFkpdUPe145agTuAMe7 GA==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v2 03/34] ml/cnxk: add generic cnxk device structure",
        "Date": "Wed, 20 Sep 2023 00:24:54 -0700",
        "Message-ID": "<20230920072528.14185-4-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.41.0",
        "In-Reply-To": "<20230920072528.14185-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20230920072528.14185-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "TDe9mxfF9U2wtoYNt98WGMezU8gLMFcP",
        "X-Proofpoint-GUID": "TDe9mxfF9U2wtoYNt98WGMezU8gLMFcP",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.601,FMLib:17.11.176.26\n definitions=2023-09-20_02,2023-09-19_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Introduce generic cnxk device structure. This structure is\na top level device structure for the driver, which would\nencapsulate the target / platform specific device structure.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_dev.c   | 315 ++++++++++----------\n drivers/ml/cnxk/cn10k_ml_dev.h   |  47 +--\n drivers/ml/cnxk/cn10k_ml_model.c |  14 +-\n drivers/ml/cnxk/cn10k_ml_model.h |   8 +-\n drivers/ml/cnxk/cn10k_ml_ocm.c   |  56 ++--\n drivers/ml/cnxk/cn10k_ml_ops.c   | 494 +++++++++++++++++--------------\n drivers/ml/cnxk/cnxk_ml_dev.c    |  11 +\n drivers/ml/cnxk/cnxk_ml_dev.h    |  58 ++++\n drivers/ml/cnxk/meson.build      |   2 +\n 9 files changed, 562 insertions(+), 443 deletions(-)\n create mode 100644 drivers/ml/cnxk/cnxk_ml_dev.c\n create mode 100644 drivers/ml/cnxk/cnxk_ml_dev.h",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_dev.c b/drivers/ml/cnxk/cn10k_ml_dev.c\nindex b7e6ed9a00..367fb7014c 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.c\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.c\n@@ -15,13 +15,15 @@\n #include <rte_mldev_pmd.h>\n #include <rte_pci.h>\n \n-#include <roc_api.h>\n-\n #include <eal_firmware.h>\n \n+#include <roc_api.h>\n+\n #include \"cn10k_ml_dev.h\"\n #include \"cn10k_ml_ops.h\"\n \n+#include \"cnxk_ml_dev.h\"\n+\n #define CN10K_ML_FW_PATH\t\t\"fw_path\"\n #define CN10K_ML_FW_ENABLE_DPE_WARNINGS \"enable_dpe_warnings\"\n #define CN10K_ML_FW_REPORT_DPE_WARNINGS \"report_dpe_warnings\"\n@@ -63,9 +65,6 @@ static const char *const valid_args[] = {CN10K_ML_FW_PATH,\n /* Supported OCM page sizes: 1KB, 2KB, 4KB, 8KB and 16KB */\n static const int valid_ocm_page_size[] = {1024, 2048, 4096, 8192, 16384};\n \n-/* Dummy operations for ML device */\n-struct rte_ml_dev_ops ml_dev_dummy_ops = {0};\n-\n static int\n ml_read_file(const char *file, size_t *size, char **buffer)\n {\n@@ -146,7 +145,7 @@ parse_integer_arg(const char *key __rte_unused, const char *value, void *extra_a\n }\n \n static int\n-cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mldev)\n+cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10k_mldev)\n {\n \tbool enable_dpe_warnings_set = false;\n \tbool report_dpe_warnings_set = false;\n@@ -183,7 +182,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n \tif (rte_kvargs_count(kvlist, CN10K_ML_FW_ENABLE_DPE_WARNINGS) == 1) {\n \t\tret = rte_kvargs_process(kvlist, CN10K_ML_FW_ENABLE_DPE_WARNINGS,\n-\t\t\t\t\t &parse_integer_arg, &mldev->fw.enable_dpe_warnings);\n+\t\t\t\t\t &parse_integer_arg, &cn10k_mldev->fw.enable_dpe_warnings);\n \t\tif (ret < 0) {\n \t\t\tplt_err(\"Error processing arguments, key = %s\\n\",\n \t\t\t\tCN10K_ML_FW_ENABLE_DPE_WARNINGS);\n@@ -195,7 +194,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n \tif (rte_kvargs_count(kvlist, CN10K_ML_FW_REPORT_DPE_WARNINGS) == 1) {\n \t\tret = rte_kvargs_process(kvlist, CN10K_ML_FW_REPORT_DPE_WARNINGS,\n-\t\t\t\t\t &parse_integer_arg, &mldev->fw.report_dpe_warnings);\n+\t\t\t\t\t &parse_integer_arg, &cn10k_mldev->fw.report_dpe_warnings);\n \t\tif (ret < 0) {\n \t\t\tplt_err(\"Error processing arguments, key = %s\\n\",\n \t\t\t\tCN10K_ML_FW_REPORT_DPE_WARNINGS);\n@@ -207,7 +206,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n \tif (rte_kvargs_count(kvlist, CN10K_ML_DEV_CACHE_MODEL_DATA) == 1) {\n \t\tret = rte_kvargs_process(kvlist, CN10K_ML_DEV_CACHE_MODEL_DATA, &parse_integer_arg,\n-\t\t\t\t\t &mldev->cache_model_data);\n+\t\t\t\t\t &cn10k_mldev->cache_model_data);\n \t\tif (ret < 0) {\n \t\t\tplt_err(\"Error processing arguments, key = %s\\n\",\n \t\t\t\tCN10K_ML_DEV_CACHE_MODEL_DATA);\n@@ -230,7 +229,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n \tif (rte_kvargs_count(kvlist, CN10K_ML_DEV_HW_QUEUE_LOCK) == 1) {\n \t\tret = rte_kvargs_process(kvlist, CN10K_ML_DEV_HW_QUEUE_LOCK, &parse_integer_arg,\n-\t\t\t\t\t &mldev->hw_queue_lock);\n+\t\t\t\t\t &cn10k_mldev->hw_queue_lock);\n \t\tif (ret < 0) {\n \t\t\tplt_err(\"Error processing arguments, key = %s\\n\",\n \t\t\t\tCN10K_ML_DEV_HW_QUEUE_LOCK);\n@@ -242,7 +241,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n \tif (rte_kvargs_count(kvlist, CN10K_ML_OCM_PAGE_SIZE) == 1) {\n \t\tret = rte_kvargs_process(kvlist, CN10K_ML_OCM_PAGE_SIZE, &parse_integer_arg,\n-\t\t\t\t\t &mldev->ocm_page_size);\n+\t\t\t\t\t &cn10k_mldev->ocm_page_size);\n \t\tif (ret < 0) {\n \t\t\tplt_err(\"Error processing arguments, key = %s\\n\", CN10K_ML_OCM_PAGE_SIZE);\n \t\t\tret = -EINVAL;\n@@ -253,49 +252,53 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \n check_args:\n \tif (!fw_path_set)\n-\t\tmldev->fw.path = CN10K_ML_FW_PATH_DEFAULT;\n+\t\tcn10k_mldev->fw.path = CN10K_ML_FW_PATH_DEFAULT;\n \telse\n-\t\tmldev->fw.path = fw_path;\n-\tplt_info(\"ML: %s = %s\", CN10K_ML_FW_PATH, mldev->fw.path);\n+\t\tcn10k_mldev->fw.path = fw_path;\n+\tplt_info(\"ML: %s = %s\", CN10K_ML_FW_PATH, cn10k_mldev->fw.path);\n \n \tif (!enable_dpe_warnings_set) {\n-\t\tmldev->fw.enable_dpe_warnings = CN10K_ML_FW_ENABLE_DPE_WARNINGS_DEFAULT;\n+\t\tcn10k_mldev->fw.enable_dpe_warnings = CN10K_ML_FW_ENABLE_DPE_WARNINGS_DEFAULT;\n \t} else {\n-\t\tif ((mldev->fw.enable_dpe_warnings < 0) || (mldev->fw.enable_dpe_warnings > 1)) {\n+\t\tif ((cn10k_mldev->fw.enable_dpe_warnings < 0) ||\n+\t\t    (cn10k_mldev->fw.enable_dpe_warnings > 1)) {\n \t\t\tplt_err(\"Invalid argument, %s = %d\\n\", CN10K_ML_FW_ENABLE_DPE_WARNINGS,\n-\t\t\t\tmldev->fw.enable_dpe_warnings);\n+\t\t\t\tcn10k_mldev->fw.enable_dpe_warnings);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \t}\n-\tplt_info(\"ML: %s = %d\", CN10K_ML_FW_ENABLE_DPE_WARNINGS, mldev->fw.enable_dpe_warnings);\n+\tplt_info(\"ML: %s = %d\", CN10K_ML_FW_ENABLE_DPE_WARNINGS,\n+\t\t cn10k_mldev->fw.enable_dpe_warnings);\n \n \tif (!report_dpe_warnings_set) {\n-\t\tmldev->fw.report_dpe_warnings = CN10K_ML_FW_REPORT_DPE_WARNINGS_DEFAULT;\n+\t\tcn10k_mldev->fw.report_dpe_warnings = CN10K_ML_FW_REPORT_DPE_WARNINGS_DEFAULT;\n \t} else {\n-\t\tif ((mldev->fw.report_dpe_warnings < 0) || (mldev->fw.report_dpe_warnings > 1)) {\n+\t\tif ((cn10k_mldev->fw.report_dpe_warnings < 0) ||\n+\t\t    (cn10k_mldev->fw.report_dpe_warnings > 1)) {\n \t\t\tplt_err(\"Invalid argument, %s = %d\\n\", CN10K_ML_FW_REPORT_DPE_WARNINGS,\n-\t\t\t\tmldev->fw.report_dpe_warnings);\n+\t\t\t\tcn10k_mldev->fw.report_dpe_warnings);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \t}\n-\tplt_info(\"ML: %s = %d\", CN10K_ML_FW_REPORT_DPE_WARNINGS, mldev->fw.report_dpe_warnings);\n+\tplt_info(\"ML: %s = %d\", CN10K_ML_FW_REPORT_DPE_WARNINGS,\n+\t\t cn10k_mldev->fw.report_dpe_warnings);\n \n \tif (!cache_model_data_set) {\n-\t\tmldev->cache_model_data = CN10K_ML_DEV_CACHE_MODEL_DATA_DEFAULT;\n+\t\tcn10k_mldev->cache_model_data = CN10K_ML_DEV_CACHE_MODEL_DATA_DEFAULT;\n \t} else {\n-\t\tif ((mldev->cache_model_data < 0) || (mldev->cache_model_data > 1)) {\n+\t\tif ((cn10k_mldev->cache_model_data < 0) || (cn10k_mldev->cache_model_data > 1)) {\n \t\t\tplt_err(\"Invalid argument, %s = %d\\n\", CN10K_ML_DEV_CACHE_MODEL_DATA,\n-\t\t\t\tmldev->cache_model_data);\n+\t\t\t\tcn10k_mldev->cache_model_data);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \t}\n-\tplt_info(\"ML: %s = %d\", CN10K_ML_DEV_CACHE_MODEL_DATA, mldev->cache_model_data);\n+\tplt_info(\"ML: %s = %d\", CN10K_ML_DEV_CACHE_MODEL_DATA, cn10k_mldev->cache_model_data);\n \n \tif (!ocm_alloc_mode_set) {\n-\t\tmldev->ocm.alloc_mode = CN10K_ML_OCM_ALLOC_MODE_DEFAULT;\n+\t\tcn10k_mldev->ocm.alloc_mode = CN10K_ML_OCM_ALLOC_MODE_DEFAULT;\n \t} else {\n \t\tif (!((strcmp(ocm_alloc_mode, \"lowest\") == 0) ||\n \t\t      (strcmp(ocm_alloc_mode, \"largest\") == 0))) {\n@@ -304,47 +307,47 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n-\t\tmldev->ocm.alloc_mode = ocm_alloc_mode;\n+\t\tcn10k_mldev->ocm.alloc_mode = ocm_alloc_mode;\n \t}\n-\tplt_info(\"ML: %s = %s\", CN10K_ML_OCM_ALLOC_MODE, mldev->ocm.alloc_mode);\n+\tplt_info(\"ML: %s = %s\", CN10K_ML_OCM_ALLOC_MODE, cn10k_mldev->ocm.alloc_mode);\n \n \tif (!hw_queue_lock_set) {\n-\t\tmldev->hw_queue_lock = CN10K_ML_DEV_HW_QUEUE_LOCK_DEFAULT;\n+\t\tcn10k_mldev->hw_queue_lock = CN10K_ML_DEV_HW_QUEUE_LOCK_DEFAULT;\n \t} else {\n-\t\tif ((mldev->hw_queue_lock < 0) || (mldev->hw_queue_lock > 1)) {\n+\t\tif ((cn10k_mldev->hw_queue_lock < 0) || (cn10k_mldev->hw_queue_lock > 1)) {\n \t\t\tplt_err(\"Invalid argument, %s = %d\\n\", CN10K_ML_DEV_HW_QUEUE_LOCK,\n-\t\t\t\tmldev->hw_queue_lock);\n+\t\t\t\tcn10k_mldev->hw_queue_lock);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \t}\n-\tplt_info(\"ML: %s = %d\", CN10K_ML_DEV_HW_QUEUE_LOCK, mldev->hw_queue_lock);\n+\tplt_info(\"ML: %s = %d\", CN10K_ML_DEV_HW_QUEUE_LOCK, cn10k_mldev->hw_queue_lock);\n \n \tif (!ocm_page_size_set) {\n-\t\tmldev->ocm_page_size = CN10K_ML_OCM_PAGE_SIZE_DEFAULT;\n+\t\tcn10k_mldev->ocm_page_size = CN10K_ML_OCM_PAGE_SIZE_DEFAULT;\n \t} else {\n-\t\tif (mldev->ocm_page_size < 0) {\n+\t\tif (cn10k_mldev->ocm_page_size < 0) {\n \t\t\tplt_err(\"Invalid argument, %s = %d\\n\", CN10K_ML_OCM_PAGE_SIZE,\n-\t\t\t\tmldev->ocm_page_size);\n+\t\t\t\tcn10k_mldev->ocm_page_size);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \n \t\tfound = false;\n \t\tfor (i = 0; i < PLT_DIM(valid_ocm_page_size); i++) {\n-\t\t\tif (mldev->ocm_page_size == valid_ocm_page_size[i]) {\n+\t\t\tif (cn10k_mldev->ocm_page_size == valid_ocm_page_size[i]) {\n \t\t\t\tfound = true;\n \t\t\t\tbreak;\n \t\t\t}\n \t\t}\n \n \t\tif (!found) {\n-\t\t\tplt_err(\"Unsupported ocm_page_size = %d\\n\", mldev->ocm_page_size);\n+\t\t\tplt_err(\"Unsupported ocm_page_size = %d\\n\", cn10k_mldev->ocm_page_size);\n \t\t\tret = -EINVAL;\n \t\t\tgoto exit;\n \t\t}\n \t}\n-\tplt_info(\"ML: %s = %d\", CN10K_ML_OCM_PAGE_SIZE, mldev->ocm_page_size);\n+\tplt_info(\"ML: %s = %d\", CN10K_ML_OCM_PAGE_SIZE, cn10k_mldev->ocm_page_size);\n \n exit:\n \trte_kvargs_free(kvlist);\n@@ -356,7 +359,8 @@ static int\n cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n {\n \tstruct rte_ml_dev_pmd_init_params init_params;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tchar name[RTE_ML_STR_MAX];\n \tstruct rte_ml_dev *dev;\n \tint ret;\n@@ -364,7 +368,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de\n \tPLT_SET_USED(pci_drv);\n \n \tinit_params = (struct rte_ml_dev_pmd_init_params){\n-\t\t.socket_id = rte_socket_id(), .private_data_size = sizeof(struct cn10k_ml_dev)};\n+\t\t.socket_id = rte_socket_id(), .private_data_size = sizeof(struct cnxk_ml_dev)};\n \n \tret = roc_plt_init();\n \tif (ret < 0) {\n@@ -380,18 +384,20 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de\n \t}\n \n \t/* Get private data space allocated */\n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcnxk_mldev->mldev = dev;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n-\t\tmldev->roc.pci_dev = pci_dev;\n+\t\tcn10k_mldev->roc.pci_dev = pci_dev;\n \n-\t\tret = cn10k_mldev_parse_devargs(dev->device->devargs, mldev);\n+\t\tret = cn10k_mldev_parse_devargs(dev->device->devargs, cn10k_mldev);\n \t\tif (ret) {\n \t\t\tplt_err(\"Failed to parse devargs ret = %d\", ret);\n \t\t\tgoto pmd_destroy;\n \t\t}\n \n-\t\tret = roc_ml_dev_init(&mldev->roc);\n+\t\tret = roc_ml_dev_init(&cn10k_mldev->roc);\n \t\tif (ret) {\n \t\t\tplt_err(\"Failed to initialize ML ROC, ret = %d\", ret);\n \t\t\tgoto pmd_destroy;\n@@ -407,7 +413,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de\n \tdev->dequeue_burst = NULL;\n \tdev->op_error_get = NULL;\n \n-\tmldev->state = ML_CN10K_DEV_STATE_PROBED;\n+\tcnxk_mldev->state = ML_CNXK_DEV_STATE_PROBED;\n \n \treturn 0;\n \n@@ -424,7 +430,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de\n static int\n cn10k_ml_pci_remove(struct rte_pci_device *pci_dev)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tchar name[RTE_ML_STR_MAX];\n \tstruct rte_ml_dev *dev;\n \tint ret;\n@@ -439,8 +445,8 @@ cn10k_ml_pci_remove(struct rte_pci_device *pci_dev)\n \t\treturn -ENODEV;\n \n \tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n-\t\tmldev = dev->data->dev_private;\n-\t\tret = roc_ml_dev_fini(&mldev->roc);\n+\t\tcnxk_mldev = dev->data->dev_private;\n+\t\tret = roc_ml_dev_fini(&cnxk_mldev->cn10k_mldev.roc);\n \t\tif (ret)\n \t\t\treturn ret;\n \t}\n@@ -486,45 +492,45 @@ cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw)\n static int\n cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \tuint64_t timeout_cycle;\n \tuint64_t reg_val64;\n \tbool timeout;\n \tint ret = 0;\n \n-\tmldev = fw->mldev;\n+\tcn10k_mldev = fw->cn10k_mldev;\n \n \t/* Reset HEAD and TAIL debug pointer registers */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);\n \n \t/* Set ML_MLR_BASE to base IOVA of the ML region in LLC/DRAM. */\n \treg_val64 = rte_eal_get_baseaddr();\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_MLR_BASE);\n-\tplt_ml_dbg(\"ML_MLR_BASE = 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));\n-\troc_ml_reg_save(&mldev->roc, ML_MLR_BASE);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_MLR_BASE);\n+\tplt_ml_dbg(\"ML_MLR_BASE = 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));\n+\troc_ml_reg_save(&cn10k_mldev->roc, ML_MLR_BASE);\n \n \t/* Update FW load completion structure */\n \tfw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);\n \tfw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n-\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &fw->req->result);\n+\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);\n \tfw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n-\tplt_write64(ML_CN10K_POLL_JOB_START, &fw->req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);\n \tplt_wmb();\n \n \t/* Enqueue FW load through scratch registers */\n \ttimeout = true;\n-\ttimeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&mldev->roc, &fw->req->jd);\n+\ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);\n \n \tplt_rmb();\n \tdo {\n-\t\tif (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&\n-\t\t    (plt_read64(&fw->req->status) == ML_CN10K_POLL_JOB_FINISH)) {\n+\t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n+\t\t    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n@@ -536,11 +542,11 @@ cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)\n \t} else {\n \t\t/* Set ML to disable new jobs */\n \t\treg_val64 = (ROC_ML_CFG_JD_SIZE | ROC_ML_CFG_MLIP_ENA);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n \n \t\t/* Clear scratch registers */\n-\t\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n-\t\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n \n \t\tif (timeout) {\n \t\t\tplt_err(\"Firmware load timeout\");\n@@ -554,14 +560,14 @@ cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)\n \t}\n \n \t/* Reset scratch registers */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n \n \t/* Disable job execution, to be enabled in start */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 &= ~ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \treturn ret;\n }\n@@ -571,7 +577,7 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n {\n \tunion ml_a35_0_rst_vector_base_s a35_0_rst_vector_base;\n \tunion ml_a35_0_rst_vector_base_s a35_1_rst_vector_base;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \tuint64_t timeout_cycle;\n \tuint64_t reg_val64;\n \tuint32_t reg_val32;\n@@ -580,24 +586,24 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \tint ret = 0;\n \tuint8_t i;\n \n-\tmldev = fw->mldev;\n+\tcn10k_mldev = fw->cn10k_mldev;\n \n \t/* Reset HEAD and TAIL debug pointer registers */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);\n \n \t/* (1) Write firmware images for ACC's two A35 cores to the ML region in LLC / DRAM. */\n \trte_memcpy(PLT_PTR_ADD(fw->data, FW_LINKER_OFFSET), buffer, size);\n \n \t/* (2) Set ML(0)_MLR_BASE = Base IOVA of the ML region in LLC/DRAM. */\n \treg_val64 = PLT_PTR_SUB_U64_CAST(fw->data, rte_eal_get_baseaddr());\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_MLR_BASE);\n-\tplt_ml_dbg(\"ML_MLR_BASE => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));\n-\troc_ml_reg_save(&mldev->roc, ML_MLR_BASE);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_MLR_BASE);\n+\tplt_ml_dbg(\"ML_MLR_BASE => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));\n+\troc_ml_reg_save(&cn10k_mldev->roc, ML_MLR_BASE);\n \n \t/* (3) Set ML(0)_AXI_BRIDGE_CTRL(1) = 0x184003 to remove back-pressure check on DMA AXI\n \t * bridge.\n@@ -605,9 +611,9 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \treg_val64 = (ROC_ML_AXI_BRIDGE_CTRL_AXI_RESP_CTRL |\n \t\t     ROC_ML_AXI_BRIDGE_CTRL_BRIDGE_CTRL_MODE | ROC_ML_AXI_BRIDGE_CTRL_NCB_WR_BLK |\n \t\t     ROC_ML_AXI_BRIDGE_CTRL_FORCE_WRESP_OK | ROC_ML_AXI_BRIDGE_CTRL_FORCE_RRESP_OK);\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_AXI_BRIDGE_CTRL(1));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_AXI_BRIDGE_CTRL(1));\n \tplt_ml_dbg(\"ML_AXI_BRIDGE_CTRL(1) => 0x%016lx\",\n-\t\t   roc_ml_reg_read64(&mldev->roc, ML_AXI_BRIDGE_CTRL(1)));\n+\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_AXI_BRIDGE_CTRL(1)));\n \n \t/* (4) Set ML(0)_ANB(0..2)_BACKP_DISABLE = 0x3 to remove back-pressure on the AXI to NCB\n \t * bridges.\n@@ -615,9 +621,9 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \tfor (i = 0; i < ML_ANBX_NR; i++) {\n \t\treg_val64 = (ROC_ML_ANBX_BACKP_DISABLE_EXTMSTR_B_BACKP_DISABLE |\n \t\t\t     ROC_ML_ANBX_BACKP_DISABLE_EXTMSTR_R_BACKP_DISABLE);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_BACKP_DISABLE(i));\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_BACKP_DISABLE(i));\n \t\tplt_ml_dbg(\"ML_ANBX_BACKP_DISABLE(%u) => 0x%016lx\", i,\n-\t\t\t   roc_ml_reg_read64(&mldev->roc, ML_ANBX_BACKP_DISABLE(i)));\n+\t\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_BACKP_DISABLE(i)));\n \t}\n \n \t/* (5) Set ML(0)_ANB(0..2)_NCBI_P_OVR = 0x3000 and ML(0)_ANB(0..2)_NCBI_NP_OVR = 0x3000 to\n@@ -626,39 +632,40 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \tfor (i = 0; i < ML_ANBX_NR; i++) {\n \t\treg_val64 = (ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_NS_OVR |\n \t\t\t     ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_NS_OVR_VLD);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));\n \t\tplt_ml_dbg(\"ML_ANBX_NCBI_P_OVR(%u) => 0x%016lx\", i,\n-\t\t\t   roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_P_OVR(i)));\n+\t\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_P_OVR(i)));\n \n \t\treg_val64 |= (ML_ANBX_NCBI_NP_OVR_ANB_NCBI_NP_NS_OVR |\n \t\t\t      ML_ANBX_NCBI_NP_OVR_ANB_NCBI_NP_NS_OVR_VLD);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_NP_OVR(i));\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_NP_OVR(i));\n \t\tplt_ml_dbg(\"ML_ANBX_NCBI_NP_OVR(%u) => 0x%016lx\", i,\n-\t\t\t   roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_NP_OVR(i)));\n+\t\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_NP_OVR(i)));\n \t}\n \n \t/* (6) Set ML(0)_CFG[MLIP_CLK_FORCE] = 1, to force turning on the MLIP clock. */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 |= ROC_ML_CFG_MLIP_CLK_FORCE;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \t/* (7) Set ML(0)_JOB_MGR_CTRL[STALL_ON_IDLE] = 0, to make sure the boot request is accepted\n \t * when there is no job in the command queue.\n \t */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL);\n \treg_val64 &= ~ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_JOB_MGR_CTRL);\n-\tplt_ml_dbg(\"ML_JOB_MGR_CTRL => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_JOB_MGR_CTRL);\n+\tplt_ml_dbg(\"ML_JOB_MGR_CTRL => 0x%016lx\",\n+\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL));\n \n \t/* (8) Set ML(0)_CFG[ENA] = 0 and ML(0)_CFG[MLIP_ENA] = 1 to bring MLIP out of reset while\n \t * keeping the job manager disabled.\n \t */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 |= ROC_ML_CFG_MLIP_ENA;\n \treg_val64 &= ~ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \t/* (9) Wait at least 70 coprocessor clock cycles. */\n \tplt_delay_us(FW_WAIT_CYCLES);\n@@ -669,53 +676,57 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \t * AXI outbound address divided by 4. Read after write.\n \t */\n \toffset = PLT_PTR_ADD_U64_CAST(\n-\t\tfw->data, FW_LINKER_OFFSET - roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));\n+\t\tfw->data, FW_LINKER_OFFSET - roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));\n \ta35_0_rst_vector_base.s.addr = (offset + ML_AXI_START_ADDR) / 4;\n \ta35_1_rst_vector_base.s.addr = (offset + ML_AXI_START_ADDR) / 4;\n \n-\troc_ml_reg_write32(&mldev->roc, a35_0_rst_vector_base.w.w0, ML_A35_0_RST_VECTOR_BASE_W(0));\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(0));\n+\troc_ml_reg_write32(&cn10k_mldev->roc, a35_0_rst_vector_base.w.w0,\n+\t\t\t   ML_A35_0_RST_VECTOR_BASE_W(0));\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(0));\n \tplt_ml_dbg(\"ML_A35_0_RST_VECTOR_BASE_W(0) => 0x%08x\", reg_val32);\n \n-\troc_ml_reg_write32(&mldev->roc, a35_0_rst_vector_base.w.w1, ML_A35_0_RST_VECTOR_BASE_W(1));\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(1));\n+\troc_ml_reg_write32(&cn10k_mldev->roc, a35_0_rst_vector_base.w.w1,\n+\t\t\t   ML_A35_0_RST_VECTOR_BASE_W(1));\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(1));\n \tplt_ml_dbg(\"ML_A35_0_RST_VECTOR_BASE_W(1) => 0x%08x\", reg_val32);\n \n-\troc_ml_reg_write32(&mldev->roc, a35_1_rst_vector_base.w.w0, ML_A35_1_RST_VECTOR_BASE_W(0));\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(0));\n+\troc_ml_reg_write32(&cn10k_mldev->roc, a35_1_rst_vector_base.w.w0,\n+\t\t\t   ML_A35_1_RST_VECTOR_BASE_W(0));\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(0));\n \tplt_ml_dbg(\"ML_A35_1_RST_VECTOR_BASE_W(0) => 0x%08x\", reg_val32);\n \n-\troc_ml_reg_write32(&mldev->roc, a35_1_rst_vector_base.w.w1, ML_A35_1_RST_VECTOR_BASE_W(1));\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(1));\n+\troc_ml_reg_write32(&cn10k_mldev->roc, a35_1_rst_vector_base.w.w1,\n+\t\t\t   ML_A35_1_RST_VECTOR_BASE_W(1));\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(1));\n \tplt_ml_dbg(\"ML_A35_1_RST_VECTOR_BASE_W(1) => 0x%08x\", reg_val32);\n \n \t/* (11) Clear MLIP's ML(0)_SW_RST_CTRL[ACC_RST]. This will bring the ACC cores and other\n \t * MLIP components out of reset. The cores will execute firmware from the ML region as\n \t * written in step 1.\n \t */\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_SW_RST_CTRL);\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_SW_RST_CTRL);\n \treg_val32 &= ~ROC_ML_SW_RST_CTRL_ACC_RST;\n-\troc_ml_reg_write32(&mldev->roc, reg_val32, ML_SW_RST_CTRL);\n-\treg_val32 = roc_ml_reg_read32(&mldev->roc, ML_SW_RST_CTRL);\n+\troc_ml_reg_write32(&cn10k_mldev->roc, reg_val32, ML_SW_RST_CTRL);\n+\treg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_SW_RST_CTRL);\n \tplt_ml_dbg(\"ML_SW_RST_CTRL => 0x%08x\", reg_val32);\n \n \t/* (12) Wait for notification from firmware that ML is ready for job execution. */\n \tfw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);\n \tfw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;\n-\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &fw->req->result);\n+\tfw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);\n \tfw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);\n-\tplt_write64(ML_CN10K_POLL_JOB_START, &fw->req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);\n \tplt_wmb();\n \n \t/* Enqueue FW load through scratch registers */\n \ttimeout = true;\n-\ttimeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&mldev->roc, &fw->req->jd);\n+\ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);\n \n \tplt_rmb();\n \tdo {\n-\t\tif (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&\n-\t\t    (plt_read64(&fw->req->status) == ML_CN10K_POLL_JOB_FINISH)) {\n+\t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n+\t\t    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n@@ -727,11 +738,11 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \t} else {\n \t\t/* Set ML to disable new jobs */\n \t\treg_val64 = (ROC_ML_CFG_JD_SIZE | ROC_ML_CFG_MLIP_ENA);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n \n \t\t/* Clear scratch registers */\n-\t\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n-\t\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n \n \t\tif (timeout) {\n \t\t\tplt_err(\"Firmware load timeout\");\n@@ -747,49 +758,51 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)\n \t/* (13) Set ML(0)_JOB_MGR_CTRL[STALL_ON_IDLE] = 0x1; this is needed to shut down the MLIP\n \t * clock when there are no more jobs to process.\n \t */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL);\n \treg_val64 |= ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_JOB_MGR_CTRL);\n-\tplt_ml_dbg(\"ML_JOB_MGR_CTRL => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_JOB_MGR_CTRL);\n+\tplt_ml_dbg(\"ML_JOB_MGR_CTRL => 0x%016lx\",\n+\t\t   roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL));\n \n \t/* (14) Set ML(0)_CFG[MLIP_CLK_FORCE] = 0; the MLIP clock will be turned on/off based on job\n \t * activities.\n \t */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 &= ~ROC_ML_CFG_MLIP_CLK_FORCE;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \t/* (15) Set ML(0)_CFG[ENA] to enable ML job execution. */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 |= ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \t/* Reset scratch registers */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n \n \t/* Disable job execution, to be enabled in start */\n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 &= ~ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n \t/* Additional fixes: Set RO bit to fix O2D DMA bandwidth issue on cn10ka */\n \tfor (i = 0; i < ML_ANBX_NR; i++) {\n-\t\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_P_OVR(i));\n+\t\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_P_OVR(i));\n \t\treg_val64 |= (ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_RO_OVR |\n \t\t\t      ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_RO_OVR_VLD);\n-\t\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));\n+\t\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));\n \t}\n \n \treturn ret;\n }\n \n int\n-cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)\n+cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \tconst struct plt_memzone *mz;\n \tstruct cn10k_ml_fw *fw;\n \tchar *fw_buffer = NULL;\n@@ -797,8 +810,9 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)\n \tuint64_t fw_size = 0;\n \tint ret = 0;\n \n-\tfw = &mldev->fw;\n-\tfw->mldev = mldev;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tfw = &cn10k_mldev->fw;\n+\tfw->cn10k_mldev = cn10k_mldev;\n \n \tif (roc_env_is_emulator() || roc_env_is_hw()) {\n \t\t/* Read firmware image to a buffer */\n@@ -829,8 +843,8 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)\n \tmemset(&fw->req->jd.fw_load.version[0], '\\0', MLDEV_FIRMWARE_VERSION_LENGTH);\n \n \t/* Reset device, if in active state */\n-\tif (roc_ml_mlip_is_enabled(&mldev->roc))\n-\t\troc_ml_mlip_reset(&mldev->roc, true);\n+\tif (roc_ml_mlip_is_enabled(&cn10k_mldev->roc))\n+\t\troc_ml_mlip_reset(&cn10k_mldev->roc, true);\n \n \t/* Load firmware */\n \tif (roc_env_is_emulator() || roc_env_is_hw()) {\n@@ -843,22 +857,25 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)\n \t}\n \n \tif (ret < 0)\n-\t\tcn10k_ml_fw_unload(mldev);\n+\t\tcn10k_ml_fw_unload(cnxk_mldev);\n \n \treturn ret;\n }\n \n void\n-cn10k_ml_fw_unload(struct cn10k_ml_dev *mldev)\n+cn10k_ml_fw_unload(struct cnxk_ml_dev *cnxk_mldev)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \tconst struct plt_memzone *mz;\n \tuint64_t reg_val;\n \n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\n \t/* Disable and reset device */\n-\treg_val = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val &= ~ROC_ML_CFG_MLIP_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val, ML_CFG);\n-\troc_ml_mlip_reset(&mldev->roc, true);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val, ML_CFG);\n+\troc_ml_mlip_reset(&cn10k_mldev->roc, true);\n \n \tmz = plt_memzone_lookup(FW_MEMZONE_NAME);\n \tif (mz != NULL)\ndiff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h\nindex 4aaeecff03..f9da1548c4 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.h\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.h\n@@ -9,6 +9,9 @@\n \n #include \"cn10k_ml_ocm.h\"\n \n+/* Dummy Device ops */\n+extern struct rte_ml_dev_ops ml_dev_dummy_ops;\n+\n /* Marvell OCTEON CN10K ML PMD device name */\n #define MLDEV_NAME_CN10K_PMD ml_cn10k\n \n@@ -36,17 +39,10 @@\n /* Maximum number of segments for IO data */\n #define ML_CN10K_MAX_SEGMENTS 1\n \n-/* ML command timeout in seconds */\n-#define ML_CN10K_CMD_TIMEOUT 5\n-\n /* ML slow-path job flags */\n #define ML_CN10K_SP_FLAGS_OCM_NONRELOCATABLE BIT(0)\n #define ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD   BIT(1)\n \n-/* Poll mode job state */\n-#define ML_CN10K_POLL_JOB_START\t 0\n-#define ML_CN10K_POLL_JOB_FINISH 1\n-\n /* Memory barrier macros */\n #if defined(RTE_ARCH_ARM)\n #define dmb_st ({ asm volatile(\"dmb st\" : : : \"memory\"); })\n@@ -56,6 +52,7 @@\n #define dsb_st\n #endif\n \n+struct cnxk_ml_dev;\n struct cn10k_ml_req;\n struct cn10k_ml_qp;\n \n@@ -68,21 +65,6 @@ enum cn10k_ml_job_type {\n \tML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST,\n };\n \n-/* Device configuration state enum */\n-enum cn10k_ml_dev_state {\n-\t/* Probed and not configured */\n-\tML_CN10K_DEV_STATE_PROBED = 0,\n-\n-\t/* Configured */\n-\tML_CN10K_DEV_STATE_CONFIGURED,\n-\n-\t/* Started */\n-\tML_CN10K_DEV_STATE_STARTED,\n-\n-\t/* Closed */\n-\tML_CN10K_DEV_STATE_CLOSED\n-};\n-\n /* Error types enumeration */\n enum cn10k_ml_error_etype {\n \t/* 0x0 */ ML_ETYPE_NO_ERROR = 0, /* No error */\n@@ -379,7 +361,7 @@ struct cn10k_ml_jd {\n /* ML firmware structure */\n struct cn10k_ml_fw {\n \t/* Device reference */\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n \n \t/* Firmware file path */\n \tconst char *path;\n@@ -485,27 +467,12 @@ struct cn10k_ml_dev {\n \t/* Device ROC */\n \tstruct roc_ml roc;\n \n-\t/* Configuration state */\n-\tenum cn10k_ml_dev_state state;\n-\n \t/* Firmware */\n \tstruct cn10k_ml_fw fw;\n \n \t/* OCM info */\n \tstruct cn10k_ml_ocm ocm;\n \n-\t/* Number of models loaded */\n-\tuint16_t nb_models_loaded;\n-\n-\t/* Number of models unloaded */\n-\tuint16_t nb_models_unloaded;\n-\n-\t/* Number of models started */\n-\tuint16_t nb_models_started;\n-\n-\t/* Number of models stopped */\n-\tuint16_t nb_models_stopped;\n-\n \t/* Extended stats data */\n \tstruct cn10k_ml_xstats xstats;\n \n@@ -528,7 +495,7 @@ struct cn10k_ml_dev {\n };\n \n uint64_t cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw);\n-int cn10k_ml_fw_load(struct cn10k_ml_dev *mldev);\n-void cn10k_ml_fw_unload(struct cn10k_ml_dev *mldev);\n+int cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev);\n+void cn10k_ml_fw_unload(struct cnxk_ml_dev *cnxk_mldev);\n \n #endif /* _CN10K_ML_DEV_H_ */\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c\nindex e0b750cd8e..d146535866 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.c\n+++ b/drivers/ml/cnxk/cn10k_ml_model.c\n@@ -10,6 +10,8 @@\n #include \"cn10k_ml_model.h\"\n #include \"cn10k_ml_ocm.h\"\n \n+#include \"cnxk_ml_dev.h\"\n+\n static enum rte_ml_io_type\n cn10k_ml_io_type_map(uint8_t type)\n {\n@@ -461,7 +463,7 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_\n }\n \n int\n-cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, uint8_t *buffer,\n+cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *cn10k_mldev, uint16_t model_id, uint8_t *buffer,\n \t\t\t       uint16_t *wb_pages, uint16_t *scratch_pages)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n@@ -470,7 +472,7 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui\n \tuint64_t wb_size;\n \n \tmetadata = (struct cn10k_ml_model_metadata *)buffer;\n-\tocm = &mldev->ocm;\n+\tocm = &cn10k_mldev->ocm;\n \n \t/* Assume wb_size is zero for non-relocatable models */\n \tif (metadata->model.ocm_relocatable)\n@@ -494,11 +496,11 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui\n \t\t   scratch_size, *scratch_pages);\n \n \t/* Check if the model can be loaded on OCM */\n-\tif ((*wb_pages + *scratch_pages) > mldev->ocm.num_pages) {\n+\tif ((*wb_pages + *scratch_pages) > cn10k_mldev->ocm.num_pages) {\n \t\tplt_err(\"Cannot create the model, OCM relocatable = %u\",\n \t\t\tmetadata->model.ocm_relocatable);\n \t\tplt_err(\"wb_pages (%u) + scratch_pages (%u) > %u\", *wb_pages, *scratch_pages,\n-\t\t\tmldev->ocm.num_pages);\n+\t\t\tcn10k_mldev->ocm.num_pages);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -506,8 +508,8 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui\n \t * prevent the library from allocating the remaining space on the tile to other models.\n \t */\n \tif (!metadata->model.ocm_relocatable)\n-\t\t*scratch_pages =\n-\t\t\tPLT_MAX(PLT_U64_CAST(*scratch_pages), PLT_U64_CAST(mldev->ocm.num_pages));\n+\t\t*scratch_pages = PLT_MAX(PLT_U64_CAST(*scratch_pages),\n+\t\t\t\t\t PLT_U64_CAST(cn10k_mldev->ocm.num_pages));\n \n \treturn 0;\n }\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h\nindex 4cc0744891..3128b28db7 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.h\n+++ b/drivers/ml/cnxk/cn10k_ml_model.h\n@@ -13,6 +13,8 @@\n #include \"cn10k_ml_ocm.h\"\n #include \"cn10k_ml_ops.h\"\n \n+struct cnxk_ml_dev;\n+\n /* Model state */\n enum cn10k_ml_model_state {\n \tML_CN10K_MODEL_STATE_LOADED,\n@@ -489,7 +491,7 @@ struct cn10k_ml_model_stats {\n /* Model Object */\n struct cn10k_ml_model {\n \t/* Device reference */\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cnxk_ml_dev *mldev;\n \n \t/* Name */\n \tchar name[RTE_ML_STR_MAX];\n@@ -537,8 +539,8 @@ int cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size);\n void cn10k_ml_model_metadata_update(struct cn10k_ml_model_metadata *metadata);\n void cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer,\n \t\t\t\tuint8_t *base_dma_addr);\n-int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, uint8_t *buffer,\n-\t\t\t\t   uint16_t *wb_pages, uint16_t *scratch_pages);\n+int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *cn10k_mldev, uint16_t model_id,\n+\t\t\t\t   uint8_t *buffer, uint16_t *wb_pages, uint16_t *scratch_pages);\n void cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model);\n \n #endif /* _CN10K_ML_MODEL_H_ */\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ocm.c b/drivers/ml/cnxk/cn10k_ml_ocm.c\nindex 6fb0bb620e..aa376284d5 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ocm.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ocm.c\n@@ -4,11 +4,13 @@\n \n #include <rte_mldev_pmd.h>\n \n+#include <roc_api.h>\n+\n #include \"cn10k_ml_dev.h\"\n #include \"cn10k_ml_model.h\"\n #include \"cn10k_ml_ocm.h\"\n \n-#include \"roc_api.h\"\n+#include \"cnxk_ml_dev.h\"\n \n /* OCM macros */\n #define BYTE_LEN\t   8\n@@ -217,7 +219,8 @@ int\n cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t wb_pages,\n \t\t\t   uint16_t scratch_pages, uint64_t *tilemask)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \n \tuint16_t used_scratch_pages_max;\n@@ -236,8 +239,9 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w\n \tint max_slot_sz;\n \tint page_id;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \n \tif (num_tiles > ML_CN10K_OCM_NUMTILES) {\n \t\tplt_err(\"Invalid num_tiles = %u (> %u)\", num_tiles, ML_CN10K_OCM_NUMTILES);\n@@ -254,8 +258,8 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w\n \ttile_start = 0;\n \tsearch_end_tile = ocm->num_tiles - num_tiles;\n \n-\t/* allocate for local ocm mask */\n-\tlocal_ocm_mask = rte_zmalloc(\"local_ocm_mask\", mldev->ocm.mask_words, RTE_CACHE_LINE_SIZE);\n+\t/* Allocate for local ocm mask */\n+\tlocal_ocm_mask = rte_zmalloc(\"local_ocm_mask\", ocm->mask_words, RTE_CACHE_LINE_SIZE);\n \tif (local_ocm_mask == NULL) {\n \t\tplt_err(\"Unable to allocate memory for local_ocm_mask\");\n \t\treturn -1;\n@@ -271,7 +275,7 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w\n \t\t\tPLT_MAX(ocm->tile_ocm_info[tile_id].last_wb_page, used_last_wb_page_max);\n \t}\n \n-\tmemset(local_ocm_mask, 0, mldev->ocm.mask_words);\n+\tmemset(local_ocm_mask, 0, ocm->mask_words);\n \tfor (tile_id = tile_start; tile_id < tile_start + num_tiles; tile_id++) {\n \t\tfor (word_id = 0; word_id < ocm->mask_words; word_id++)\n \t\t\tlocal_ocm_mask[word_id] |= ocm->tile_ocm_info[tile_id].ocm_mask[word_id];\n@@ -333,8 +337,9 @@ void\n cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint64_t tilemask,\n \t\t\t   int wb_page_start, uint16_t wb_pages, uint16_t scratch_pages)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \n \tint scratch_page_start;\n@@ -345,8 +350,9 @@ cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint64_t t\n \tint tile_id;\n \tint page_id;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \tmodel = dev->data->models[model_id];\n \n \t/* Get first set bit, tile_start */\n@@ -391,8 +397,9 @@ void\n cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id)\n {\n \tstruct cn10k_ml_model *local_model;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \n \tint scratch_resize_pages;\n@@ -404,8 +411,9 @@ cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id)\n \tint page_id;\n \tuint16_t i;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \tmodel = dev->data->models[model_id];\n \n \t/* Update OCM info for WB memory */\n@@ -453,35 +461,37 @@ cn10k_ml_ocm_pagemask_to_str(struct cn10k_ml_ocm_tile_info *tile_info, uint16_t\n \tchar *p = str;\n \tint word;\n \n-\t/* add prefix 0x */\n+\t/* Add prefix 0x */\n \t*p++ = '0';\n \t*p++ = 'x';\n \n-\t/* build one word at a time */\n+\t/* Build hex string */\n \tfor (word = nwords - 1; word >= 0; word--) {\n \t\tsprintf(p, \"%02X\", tile_info->ocm_mask[word]);\n \t\tp += 2;\n \t}\n \n-\t/* terminate */\n+\t/* Terminate */\n \t*p++ = 0;\n }\n \n void\n cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp)\n {\n-\tchar *str;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tuint8_t tile_id;\n \tuint8_t word_id;\n \tint wb_pages;\n+\tchar *str;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \n-\t/* nibbles + prefix '0x' */\n-\tstr = rte_zmalloc(\"ocm_mask_str\", mldev->ocm.num_pages / 4 + 2, RTE_CACHE_LINE_SIZE);\n+\t/* Nibbles + prefix '0x' */\n+\tstr = rte_zmalloc(\"ocm_mask_str\", ocm->num_pages / 4 + 2, RTE_CACHE_LINE_SIZE);\n \tif (str == NULL) {\n \t\tplt_err(\"Unable to allocate memory for ocm_mask_str\");\n \t\treturn;\n@@ -492,7 +502,7 @@ cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp)\n \t\tcn10k_ml_ocm_pagemask_to_str(&ocm->tile_ocm_info[tile_id], ocm->mask_words, str);\n \n \t\twb_pages = 0 - ocm->tile_ocm_info[tile_id].scratch_pages;\n-\t\tfor (word_id = 0; word_id < mldev->ocm.mask_words; word_id++)\n+\t\tfor (word_id = 0; word_id < ocm->mask_words; word_id++)\n \t\t\twb_pages +=\n \t\t\t\trte_popcount32(ocm->tile_ocm_info[tile_id].ocm_mask[word_id]);\n \ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 11531afd8c..3385bf50c0 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -11,6 +11,8 @@\n #include \"cn10k_ml_model.h\"\n #include \"cn10k_ml_ops.h\"\n \n+#include \"cnxk_ml_dev.h\"\n+\n /* ML model macros */\n #define CN10K_ML_MODEL_MEMZONE_NAME \"ml_cn10k_model_mz\"\n \n@@ -85,7 +87,7 @@ cn10k_ml_set_poll_addr(struct cn10k_ml_req *req)\n static inline void\n cn10k_ml_set_poll_ptr(struct cn10k_ml_req *req)\n {\n-\tplt_write64(ML_CN10K_POLL_JOB_START, req->compl_W1);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, req->compl_W1);\n }\n \n static inline uint64_t\n@@ -175,7 +177,7 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des\n \tqp->queue.reqs = (struct cn10k_ml_req *)va;\n \tqp->queue.head = 0;\n \tqp->queue.tail = 0;\n-\tqp->queue.wait_cycles = ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n+\tqp->queue.wait_cycles = ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n \tqp->nb_desc = nb_desc;\n \tqp->stats.enqueued_count = 0;\n \tqp->stats.dequeued_count = 0;\n@@ -199,16 +201,17 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des\n static void\n cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n {\n-\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tchar str[STR_LEN];\n \tuint8_t i;\n \tuint8_t j;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \tmodel = dev->data->models[model_id];\n \n \t/* Print debug info */\n@@ -249,7 +252,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \t\tfprintf(fp, \"%*s : 0x%0*\" PRIx64 \"\\n\", FIELD_LEN, \"tilemask\",\n \t\t\tML_CN10K_OCM_NUMTILES / 4, model->model_mem_map.tilemask);\n \t\tfprintf(fp, \"%*s : 0x%\" PRIx64 \"\\n\", FIELD_LEN, \"ocm_wb_start\",\n-\t\t\tmodel->model_mem_map.wb_page_start * mldev->ocm.page_size);\n+\t\t\tmodel->model_mem_map.wb_page_start * cn10k_mldev->ocm.page_size);\n \t}\n \n \tfprintf(fp, \"%*s : %u\\n\", FIELD_LEN, \"num_inputs\", model->metadata.model.num_input);\n@@ -325,7 +328,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n }\n \n static void\n-cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_model *model,\n+cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cn10k_ml_model *model,\n \t\t\t\tstruct cn10k_ml_req *req, enum cn10k_ml_job_type job_type)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n@@ -340,7 +343,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode\n \treq->jd.hdr.model_id = model->model_id;\n \treq->jd.hdr.job_type = job_type;\n \treq->jd.hdr.fp_flags = 0x0;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);\n+\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n \n \tif (job_type == ML_CN10K_JOB_TYPE_MODEL_START) {\n \t\tif (!model->metadata.model.ocm_relocatable)\n@@ -350,9 +353,9 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode\n \n \t\treq->jd.hdr.sp_flags |= ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD;\n \t\treq->jd.model_start.extended_args =\n-\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, &req->extended_args));\n+\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->extended_args));\n \t\treq->jd.model_start.model_dst_ddr_addr =\n-\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, addr->init_run_addr));\n+\t\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, addr->init_run_addr));\n \t\treq->jd.model_start.model_init_offset = 0x0;\n \t\treq->jd.model_start.model_main_offset = metadata->init_model.file_size;\n \t\treq->jd.model_start.model_finish_offset =\n@@ -372,7 +375,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode\n \t\treq->jd.model_start.ocm_wb_range_start = metadata->model.ocm_wb_range_start;\n \t\treq->jd.model_start.ocm_wb_range_end = metadata->model.ocm_wb_range_end;\n \t\treq->jd.model_start.ddr_wb_base_address = PLT_U64_CAST(roc_ml_addr_ap2mlip(\n-\t\t\t&mldev->roc,\n+\t\t\t&cn10k_mldev->roc,\n \t\t\tPLT_PTR_ADD(addr->finish_load_addr, metadata->finish_model.file_size)));\n \t\treq->jd.model_start.ddr_wb_range_start = metadata->model.ddr_wb_range_start;\n \t\treq->jd.model_start.ddr_wb_range_end = metadata->model.ddr_wb_range_end;\n@@ -383,7 +386,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode\n \t\treq->jd.model_start.output.s.ddr_range_end = metadata->model.ddr_output_range_end;\n \n \t\treq->extended_args.start.ddr_scratch_base_address = PLT_U64_CAST(\n-\t\t\troc_ml_addr_ap2mlip(&mldev->roc, model->addr.scratch_base_addr));\n+\t\t\troc_ml_addr_ap2mlip(&cn10k_mldev->roc, model->addr.scratch_base_addr));\n \t\treq->extended_args.start.ddr_scratch_range_start =\n \t\t\tmetadata->model.ddr_scratch_range_start;\n \t\treq->extended_args.start.ddr_scratch_range_end =\n@@ -392,24 +395,20 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode\n }\n \n static __rte_always_inline void\n-cn10k_ml_prep_fp_job_descriptor(struct rte_ml_dev *dev, struct cn10k_ml_req *req,\n+cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cn10k_ml_req *req,\n \t\t\t\tstruct rte_ml_op *op)\n {\n-\tstruct cn10k_ml_dev *mldev;\n-\n-\tmldev = dev->data->dev_private;\n-\n \treq->jd.hdr.jce.w0.u64 = 0;\n \treq->jd.hdr.jce.w1.u64 = req->compl_W1;\n \treq->jd.hdr.model_id = op->model_id;\n \treq->jd.hdr.job_type = ML_CN10K_JOB_TYPE_MODEL_RUN;\n \treq->jd.hdr.fp_flags = ML_FLAGS_POLL_COMPL;\n \treq->jd.hdr.sp_flags = 0x0;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);\n+\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n \treq->jd.model_run.input_ddr_addr =\n-\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->input[0]->addr));\n+\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->input[0]->addr));\n \treq->jd.model_run.output_ddr_addr =\n-\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->output[0]->addr));\n+\t\tPLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->output[0]->addr));\n \treq->jd.model_run.num_batches = op->nb_batches;\n }\n \n@@ -436,66 +435,69 @@ static const struct xstat_info model_stats[] = {\n static int\n cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint16_t nb_stats;\n \tuint16_t stat_id;\n \tuint16_t model;\n \tuint16_t i;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \t/* Allocate memory for xstats entries. Don't allocate during reconfigure */\n \tnb_stats = RTE_DIM(device_stats) + ML_CN10K_MAX_MODELS * RTE_DIM(model_stats);\n-\tif (mldev->xstats.entries == NULL)\n-\t\tmldev->xstats.entries = rte_zmalloc(\"cn10k_ml_xstats\",\n-\t\t\t\t\t\t    sizeof(struct cn10k_ml_xstats_entry) * nb_stats,\n-\t\t\t\t\t\t    PLT_CACHE_LINE_SIZE);\n+\tif (cn10k_mldev->xstats.entries == NULL)\n+\t\tcn10k_mldev->xstats.entries = rte_zmalloc(\n+\t\t\t\"cn10k_ml_xstats\", sizeof(struct cn10k_ml_xstats_entry) * nb_stats,\n+\t\t\tPLT_CACHE_LINE_SIZE);\n \n-\tif (mldev->xstats.entries == NULL)\n+\tif (cn10k_mldev->xstats.entries == NULL)\n \t\treturn -ENOMEM;\n \n \t/* Initialize device xstats */\n \tstat_id = 0;\n \tfor (i = 0; i < RTE_DIM(device_stats); i++) {\n-\t\tmldev->xstats.entries[stat_id].map.id = stat_id;\n-\t\tsnprintf(mldev->xstats.entries[stat_id].map.name,\n-\t\t\t sizeof(mldev->xstats.entries[stat_id].map.name), \"%s\",\n+\t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n+\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s\",\n \t\t\t device_stats[i].name);\n \n-\t\tmldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;\n-\t\tmldev->xstats.entries[stat_id].type = device_stats[i].type;\n-\t\tmldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;\n-\t\tmldev->xstats.entries[stat_id].obj_idx = 0;\n-\t\tmldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;\n+\t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;\n+\t\tcn10k_mldev->xstats.entries[stat_id].type = device_stats[i].type;\n+\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;\n+\t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = 0;\n+\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;\n \t\tstat_id++;\n \t}\n-\tmldev->xstats.count_mode_device = stat_id;\n+\tcn10k_mldev->xstats.count_mode_device = stat_id;\n \n \t/* Initialize model xstats */\n \tfor (model = 0; model < ML_CN10K_MAX_MODELS; model++) {\n-\t\tmldev->xstats.offset_for_model[model] = stat_id;\n+\t\tcn10k_mldev->xstats.offset_for_model[model] = stat_id;\n \n \t\tfor (i = 0; i < RTE_DIM(model_stats); i++) {\n-\t\t\tmldev->xstats.entries[stat_id].map.id = stat_id;\n-\t\t\tmldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n-\t\t\tmldev->xstats.entries[stat_id].type = model_stats[i].type;\n-\t\t\tmldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;\n-\t\t\tmldev->xstats.entries[stat_id].obj_idx = model;\n-\t\t\tmldev->xstats.entries[stat_id].reset_allowed = model_stats[i].reset_allowed;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].type = model_stats[i].type;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = model;\n+\t\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed =\n+\t\t\t\tmodel_stats[i].reset_allowed;\n \n \t\t\t/* Name of xstat is updated during model load */\n-\t\t\tsnprintf(mldev->xstats.entries[stat_id].map.name,\n-\t\t\t\t sizeof(mldev->xstats.entries[stat_id].map.name), \"Model-%u-%s\",\n-\t\t\t\t model, model_stats[i].name);\n+\t\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name),\n+\t\t\t\t \"Model-%u-%s\", model, model_stats[i].name);\n \n \t\t\tstat_id++;\n \t\t}\n \n-\t\tmldev->xstats.count_per_model[model] = RTE_DIM(model_stats);\n+\t\tcn10k_mldev->xstats.count_per_model[model] = RTE_DIM(model_stats);\n \t}\n \n-\tmldev->xstats.count_mode_model = stat_id - mldev->xstats.count_mode_device;\n-\tmldev->xstats.count = stat_id;\n+\tcn10k_mldev->xstats.count_mode_model = stat_id - cn10k_mldev->xstats.count_mode_device;\n+\tcn10k_mldev->xstats.count = stat_id;\n \n \treturn 0;\n }\n@@ -503,28 +505,32 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n static void\n cn10k_ml_xstats_uninit(struct rte_ml_dev *dev)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n-\trte_free(mldev->xstats.entries);\n-\tmldev->xstats.entries = NULL;\n+\trte_free(cn10k_mldev->xstats.entries);\n+\tcn10k_mldev->xstats.entries = NULL;\n \n-\tmldev->xstats.count = 0;\n+\tcn10k_mldev->xstats.count = 0;\n }\n \n static void\n cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tuint16_t rclk_freq;\n \tuint16_t sclk_freq;\n \tuint16_t stat_id;\n \tchar suffix[8];\n \tuint16_t i;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tmodel = dev->data->models[model_id];\n \tstat_id = RTE_DIM(device_stats) + model_id * RTE_DIM(model_stats);\n \n@@ -536,8 +542,8 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n \n \t/* Update xstat name based on model name and sclk availability */\n \tfor (i = 0; i < RTE_DIM(model_stats); i++) {\n-\t\tsnprintf(mldev->xstats.entries[stat_id].map.name,\n-\t\t\t sizeof(mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n+\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n \t\t\t model->metadata.model.name, model_stats[i].name, suffix);\n \t\tstat_id++;\n \t}\n@@ -547,19 +553,19 @@ static uint64_t\n cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n \t\t       enum cn10k_ml_xstats_type type)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n \n \tswitch (type) {\n \tcase nb_models_loaded:\n-\t\treturn mldev->nb_models_loaded;\n+\t\treturn cnxk_mldev->nb_models_loaded;\n \tcase nb_models_unloaded:\n-\t\treturn mldev->nb_models_unloaded;\n+\t\treturn cnxk_mldev->nb_models_unloaded;\n \tcase nb_models_started:\n-\t\treturn mldev->nb_models_started;\n+\t\treturn cnxk_mldev->nb_models_started;\n \tcase nb_models_stopped:\n-\t\treturn mldev->nb_models_stopped;\n+\t\treturn cnxk_mldev->nb_models_stopped;\n \tdefault:\n \t\treturn -1;\n \t}\n@@ -651,15 +657,17 @@ static int\n cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[], uint16_t nb_ids)\n {\n \tstruct cn10k_ml_xstats_entry *xs;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint16_t nb_stats;\n \tuint16_t stat_id;\n \tuint32_t i;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \tif (stat_ids == NULL)\n-\t\tnb_stats = mldev->xstats.count_mode_device;\n+\t\tnb_stats = cn10k_mldev->xstats.count_mode_device;\n \telse\n \t\tnb_stats = nb_ids;\n \n@@ -669,10 +677,10 @@ cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[],\n \t\telse\n \t\t\tstat_id = stat_ids[i];\n \n-\t\tif (stat_id >= mldev->xstats.count_mode_device)\n+\t\tif (stat_id >= cn10k_mldev->xstats.count_mode_device)\n \t\t\treturn -EINVAL;\n \n-\t\txs = &mldev->xstats.entries[stat_id];\n+\t\txs = &cn10k_mldev->xstats.entries[stat_id];\n \t\tif (!xs->reset_allowed)\n \t\t\tcontinue;\n \n@@ -740,15 +748,17 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint\n \t\t\t    uint16_t nb_ids)\n {\n \tstruct cn10k_ml_xstats_entry *xs;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tint32_t lcl_model_id = 0;\n \tuint16_t start_id;\n \tuint16_t end_id;\n \tint32_t i;\n \tint32_t j;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tfor (i = 0; i < ML_CN10K_MAX_MODELS; i++) {\n \t\tif (model_id == -1) {\n \t\t\tmodel = dev->data->models[i];\n@@ -765,12 +775,13 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint\n \t\t\t}\n \t\t}\n \n-\t\tstart_id = mldev->xstats.offset_for_model[i];\n-\t\tend_id = mldev->xstats.offset_for_model[i] + mldev->xstats.count_per_model[i] - 1;\n+\t\tstart_id = cn10k_mldev->xstats.offset_for_model[i];\n+\t\tend_id = cn10k_mldev->xstats.offset_for_model[i] +\n+\t\t\t cn10k_mldev->xstats.count_per_model[i] - 1;\n \n \t\tif (stat_ids == NULL) {\n \t\t\tfor (j = start_id; j <= end_id; j++) {\n-\t\t\t\txs = &mldev->xstats.entries[j];\n+\t\t\t\txs = &cn10k_mldev->xstats.entries[j];\n \t\t\t\tcn10k_ml_reset_model_stat(dev, i, xs->type);\n \t\t\t}\n \t\t} else {\n@@ -780,7 +791,7 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint\n \t\t\t\t\t\tstat_ids[j], lcl_model_id);\n \t\t\t\t\treturn -EINVAL;\n \t\t\t\t}\n-\t\t\t\txs = &mldev->xstats.entries[stat_ids[j]];\n+\t\t\t\txs = &cn10k_mldev->xstats.entries[stat_ids[j]];\n \t\t\t\tcn10k_ml_reset_model_stat(dev, i, xs->type);\n \t\t\t}\n \t\t}\n@@ -854,17 +865,19 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)\n static int\n cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \n \tif (dev_info == NULL)\n \t\treturn -EINVAL;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \tmemset(dev_info, 0, sizeof(struct rte_ml_dev_info));\n \tdev_info->driver_name = dev->device->driver->name;\n \tdev_info->max_models = ML_CN10K_MAX_MODELS;\n-\tif (mldev->hw_queue_lock)\n+\tif (cn10k_mldev->hw_queue_lock)\n \t\tdev_info->max_queue_pairs = ML_CN10K_MAX_QP_PER_DEVICE_SL;\n \telse\n \t\tdev_info->max_queue_pairs = ML_CN10K_MAX_QP_PER_DEVICE_LF;\n@@ -881,8 +894,9 @@ static int\n cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *conf)\n {\n \tstruct rte_ml_dev_info dev_info;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tstruct cn10k_ml_qp *qp;\n \tuint16_t model_id;\n@@ -895,7 +909,8 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n \t\treturn -EINVAL;\n \n \t/* Get CN10K device handle */\n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \tcn10k_ml_dev_info_get(dev, &dev_info);\n \tif (conf->nb_models > dev_info.max_models) {\n@@ -908,21 +923,21 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n \t\treturn -EINVAL;\n \t}\n \n-\tif (mldev->state == ML_CN10K_DEV_STATE_PROBED) {\n+\tif (cnxk_mldev->state == ML_CNXK_DEV_STATE_PROBED) {\n \t\tplt_ml_dbg(\"Configuring ML device, nb_queue_pairs = %u, nb_models = %u\",\n \t\t\t   conf->nb_queue_pairs, conf->nb_models);\n \n \t\t/* Load firmware */\n-\t\tret = cn10k_ml_fw_load(mldev);\n+\t\tret = cn10k_ml_fw_load(cnxk_mldev);\n \t\tif (ret != 0)\n \t\t\treturn ret;\n-\t} else if (mldev->state == ML_CN10K_DEV_STATE_CONFIGURED) {\n+\t} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_CONFIGURED) {\n \t\tplt_ml_dbg(\"Re-configuring ML device, nb_queue_pairs = %u, nb_models = %u\",\n \t\t\t   conf->nb_queue_pairs, conf->nb_models);\n-\t} else if (mldev->state == ML_CN10K_DEV_STATE_STARTED) {\n+\t} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_STARTED) {\n \t\tplt_err(\"Device can't be reconfigured in started state\\n\");\n \t\treturn -ENOTSUP;\n-\t} else if (mldev->state == ML_CN10K_DEV_STATE_CLOSED) {\n+\t} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_CLOSED) {\n \t\tplt_err(\"Device can't be reconfigured after close\\n\");\n \t\treturn -ENOTSUP;\n \t}\n@@ -1013,10 +1028,10 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n \t}\n \tdev->data->nb_models = conf->nb_models;\n \n-\tocm = &mldev->ocm;\n+\tocm = &cn10k_mldev->ocm;\n \tocm->num_tiles = ML_CN10K_OCM_NUMTILES;\n \tocm->size_per_tile = ML_CN10K_OCM_TILESIZE;\n-\tocm->page_size = mldev->ocm_page_size;\n+\tocm->page_size = cn10k_mldev->ocm_page_size;\n \tocm->num_pages = ocm->size_per_tile / ocm->page_size;\n \tocm->mask_words = ocm->num_pages / (8 * sizeof(uint8_t));\n \n@@ -1044,25 +1059,25 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n \t}\n \n \t/* Set JCMDQ enqueue function */\n-\tif (mldev->hw_queue_lock == 1)\n-\t\tmldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_sl;\n+\tif (cn10k_mldev->hw_queue_lock == 1)\n+\t\tcn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_sl;\n \telse\n-\t\tmldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;\n+\t\tcn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;\n \n \t/* Set polling function pointers */\n-\tmldev->set_poll_addr = cn10k_ml_set_poll_addr;\n-\tmldev->set_poll_ptr = cn10k_ml_set_poll_ptr;\n-\tmldev->get_poll_ptr = cn10k_ml_get_poll_ptr;\n+\tcn10k_mldev->set_poll_addr = cn10k_ml_set_poll_addr;\n+\tcn10k_mldev->set_poll_ptr = cn10k_ml_set_poll_ptr;\n+\tcn10k_mldev->get_poll_ptr = cn10k_ml_get_poll_ptr;\n \n \tdev->enqueue_burst = cn10k_ml_enqueue_burst;\n \tdev->dequeue_burst = cn10k_ml_dequeue_burst;\n \tdev->op_error_get = cn10k_ml_op_error_get;\n \n-\tmldev->nb_models_loaded = 0;\n-\tmldev->nb_models_started = 0;\n-\tmldev->nb_models_stopped = 0;\n-\tmldev->nb_models_unloaded = 0;\n-\tmldev->state = ML_CN10K_DEV_STATE_CONFIGURED;\n+\tcnxk_mldev->nb_models_loaded = 0;\n+\tcnxk_mldev->nb_models_started = 0;\n+\tcnxk_mldev->nb_models_stopped = 0;\n+\tcnxk_mldev->nb_models_unloaded = 0;\n+\tcnxk_mldev->state = ML_CNXK_DEV_STATE_CONFIGURED;\n \n \treturn 0;\n \n@@ -1077,8 +1092,9 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c\n static int\n cn10k_ml_dev_close(struct rte_ml_dev *dev)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_qp *qp;\n \tuint16_t model_id;\n \tuint16_t qp_id;\n@@ -1086,10 +1102,11 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)\n \tif (dev == NULL)\n \t\treturn -EINVAL;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \t/* Release ocm_mask memory */\n-\trte_free(mldev->ocm.ocm_mask);\n+\trte_free(cn10k_mldev->ocm.ocm_mask);\n \n \t/* Stop and unload all models */\n \tfor (model_id = 0; model_id < dev->data->nb_models; model_id++) {\n@@ -1125,21 +1142,21 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)\n \tcn10k_ml_xstats_uninit(dev);\n \n \t/* Unload firmware */\n-\tcn10k_ml_fw_unload(mldev);\n+\tcn10k_ml_fw_unload(cnxk_mldev);\n \n \t/* Clear scratch registers */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n \n \t/* Reset ML_MLR_BASE */\n-\troc_ml_reg_write64(&mldev->roc, 0, ML_MLR_BASE);\n-\tplt_ml_dbg(\"ML_MLR_BASE = 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_MLR_BASE);\n+\tplt_ml_dbg(\"ML_MLR_BASE = 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));\n \n-\tmldev->state = ML_CN10K_DEV_STATE_CLOSED;\n+\tcnxk_mldev->state = ML_CNXK_DEV_STATE_CLOSED;\n \n \t/* Remove PCI device */\n \treturn rte_dev_remove(dev->device);\n@@ -1148,17 +1165,19 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)\n static int\n cn10k_ml_dev_start(struct rte_ml_dev *dev)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint64_t reg_val64;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 |= ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n-\tmldev->state = ML_CN10K_DEV_STATE_STARTED;\n+\tcnxk_mldev->state = ML_CNXK_DEV_STATE_STARTED;\n \n \treturn 0;\n }\n@@ -1166,17 +1185,19 @@ cn10k_ml_dev_start(struct rte_ml_dev *dev)\n static int\n cn10k_ml_dev_stop(struct rte_ml_dev *dev)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint64_t reg_val64;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n-\treg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);\n+\treg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);\n \treg_val64 &= ~ROC_ML_CFG_ENA;\n-\troc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);\n-\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&mldev->roc, ML_CFG));\n+\troc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);\n+\tplt_ml_dbg(\"ML_CFG => 0x%016lx\", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));\n \n-\tmldev->state = ML_CN10K_DEV_STATE_CONFIGURED;\n+\tcnxk_mldev->state = ML_CNXK_DEV_STATE_CONFIGURED;\n \n \treturn 0;\n }\n@@ -1259,22 +1280,24 @@ cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod\n \t\t\t      int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,\n \t\t\t      uint32_t size)\n {\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint32_t xstats_mode_count;\n \tuint32_t idx = 0;\n \tuint32_t i;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \txstats_mode_count = 0;\n \tswitch (mode) {\n \tcase RTE_ML_DEV_XSTATS_DEVICE:\n-\t\txstats_mode_count = mldev->xstats.count_mode_device;\n+\t\txstats_mode_count = cn10k_mldev->xstats.count_mode_device;\n \t\tbreak;\n \tcase RTE_ML_DEV_XSTATS_MODEL:\n \t\tif (model_id >= ML_CN10K_MAX_MODELS)\n \t\t\tbreak;\n-\t\txstats_mode_count = mldev->xstats.count_per_model[model_id];\n+\t\txstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n@@ -1283,16 +1306,17 @@ cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod\n \tif (xstats_mode_count > size || xstats_map == NULL)\n \t\treturn xstats_mode_count;\n \n-\tfor (i = 0; i < mldev->xstats.count && idx < size; i++) {\n-\t\tif (mldev->xstats.entries[i].mode != mode)\n+\tfor (i = 0; i < cn10k_mldev->xstats.count && idx < size; i++) {\n+\t\tif (cn10k_mldev->xstats.entries[i].mode != mode)\n \t\t\tcontinue;\n \n \t\tif (mode != RTE_ML_DEV_XSTATS_DEVICE &&\n-\t\t    model_id != mldev->xstats.entries[i].obj_idx)\n+\t\t    model_id != cn10k_mldev->xstats.entries[i].obj_idx)\n \t\t\tcontinue;\n \n-\t\tstrncpy(xstats_map[idx].name, mldev->xstats.entries[i].map.name, RTE_ML_STR_MAX);\n-\t\txstats_map[idx].id = mldev->xstats.entries[i].map.id;\n+\t\tstrncpy(xstats_map[idx].name, cn10k_mldev->xstats.entries[i].map.name,\n+\t\t\tRTE_ML_STR_MAX);\n+\t\txstats_map[idx].id = cn10k_mldev->xstats.entries[i].map.id;\n \t\tidx++;\n \t}\n \n@@ -1304,13 +1328,15 @@ cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16\n \t\t\t\tuint64_t *value)\n {\n \tstruct cn10k_ml_xstats_entry *xs;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tcn10k_ml_xstats_fn fn;\n \tuint32_t i;\n \n-\tmldev = dev->data->dev_private;\n-\tfor (i = 0; i < mldev->xstats.count; i++) {\n-\t\txs = &mldev->xstats.entries[i];\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tfor (i = 0; i < cn10k_mldev->xstats.count; i++) {\n+\t\txs = &cn10k_mldev->xstats.entries[i];\n \t\tif (strncmp(xs->map.name, name, RTE_ML_STR_MAX) == 0) {\n \t\t\tif (stat_id != NULL)\n \t\t\t\t*stat_id = xs->map.id;\n@@ -1344,24 +1370,26 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode\n \t\t\tconst uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)\n {\n \tstruct cn10k_ml_xstats_entry *xs;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tuint32_t xstats_mode_count;\n \tcn10k_ml_xstats_fn fn;\n \tuint64_t val;\n \tuint32_t idx;\n \tuint32_t i;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \txstats_mode_count = 0;\n \n \tswitch (mode) {\n \tcase RTE_ML_DEV_XSTATS_DEVICE:\n-\t\txstats_mode_count = mldev->xstats.count_mode_device;\n+\t\txstats_mode_count = cn10k_mldev->xstats.count_mode_device;\n \t\tbreak;\n \tcase RTE_ML_DEV_XSTATS_MODEL:\n \t\tif (model_id >= ML_CN10K_MAX_MODELS)\n \t\t\treturn -EINVAL;\n-\t\txstats_mode_count = mldev->xstats.count_per_model[model_id];\n+\t\txstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n@@ -1369,8 +1397,8 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode\n \n \tidx = 0;\n \tfor (i = 0; i < nb_ids && idx < xstats_mode_count; i++) {\n-\t\txs = &mldev->xstats.entries[stat_ids[i]];\n-\t\tif (stat_ids[i] > mldev->xstats.count || xs->mode != mode)\n+\t\txs = &cn10k_mldev->xstats.entries[stat_ids[i]];\n+\t\tif (stat_ids[i] > cn10k_mldev->xstats.count || xs->mode != mode)\n \t\t\tcontinue;\n \n \t\tif (mode == RTE_ML_DEV_XSTATS_MODEL && model_id != xs->obj_idx) {\n@@ -1418,8 +1446,9 @@ cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mo\n static int\n cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_fw *fw;\n \n \tuint32_t head_loc;\n@@ -1432,8 +1461,9 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n \tif (roc_env_is_asim())\n \t\treturn 0;\n \n-\tmldev = dev->data->dev_private;\n-\tfw = &mldev->fw;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tfw = &cn10k_mldev->fw;\n \n \t/* Dump model info */\n \tfor (model_id = 0; model_id < dev->data->nb_models; model_id++) {\n@@ -1451,15 +1481,19 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n \tfor (core_id = 0; core_id <= 1; core_id++) {\n \t\tbufsize = fw->req->jd.fw_load.debug.debug_buffer_size;\n \t\tif (core_id == 0) {\n-\t\t\thead_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n-\t\t\ttail_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n+\t\t\thead_loc =\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C0);\n+\t\t\ttail_loc =\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C0);\n \t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_debug_ptr);\n-\t\t\thead_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);\n+\t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t} else {\n-\t\t\thead_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n-\t\t\ttail_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n+\t\t\thead_loc =\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C1);\n+\t\t\ttail_loc =\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C1);\n \t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_debug_ptr);\n-\t\t\thead_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);\n+\t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t}\n \t\tif (head_loc < tail_loc) {\n \t\t\tfprintf(fp, \"%.*s\\n\", tail_loc - head_loc, &head_ptr[head_loc]);\n@@ -1473,18 +1507,18 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n \tfor (core_id = 0; core_id <= 1; core_id++) {\n \t\tbufsize = fw->req->jd.fw_load.debug.exception_state_size;\n \t\tif ((core_id == 0) &&\n-\t\t    (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0)) {\n+\t\t    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0)) {\n \t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_exception_buffer);\n \t\t\tfprintf(fp, \"ML_SCRATCH_EXCEPTION_SP_C0 = 0x%016lx\",\n-\t\t\t\troc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0));\n-\t\t\thead_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0));\n+\t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t\tfprintf(fp, \"%.*s\", bufsize, head_ptr);\n-\t\t} else if ((core_id == 1) &&\n-\t\t\t   (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0)) {\n+\t\t} else if ((core_id == 1) && (roc_ml_reg_read64(&cn10k_mldev->roc,\n+\t\t\t\t\t\t\t\tML_SCRATCH_EXCEPTION_SP_C1) != 0)) {\n \t\t\thead_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_exception_buffer);\n \t\t\tfprintf(fp, \"ML_SCRATCH_EXCEPTION_SP_C1 = 0x%016lx\",\n-\t\t\t\troc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1));\n-\t\t\thead_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);\n+\t\t\t\troc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1));\n+\t\t\thead_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);\n \t\t\tfprintf(fp, \"%.*s\", bufsize, head_ptr);\n \t\t}\n \t}\n@@ -1495,14 +1529,16 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)\n static int\n cn10k_ml_dev_selftest(struct rte_ml_dev *dev)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tconst struct plt_memzone *mz;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_req *req;\n \tuint64_t timeout_cycle;\n \tbool timeout;\n \tint ret;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tmz = plt_memzone_reserve_aligned(\"dev_selftest\", sizeof(struct cn10k_ml_req), 0,\n \t\t\t\t\t ML_CN10K_ALIGN_SIZE);\n \tif (mz == NULL) {\n@@ -1515,20 +1551,20 @@ cn10k_ml_dev_selftest(struct rte_ml_dev *dev)\n \tmemset(&req->jd, 0, sizeof(struct cn10k_ml_jd));\n \treq->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->status);\n \treq->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST;\n-\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);\n-\treq->jd.fw_load.flags = cn10k_ml_fw_flags_get(&mldev->fw);\n-\tplt_write64(ML_CN10K_POLL_JOB_START, &req->status);\n+\treq->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);\n+\treq->jd.fw_load.flags = cn10k_ml_fw_flags_get(&cn10k_mldev->fw);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n \tplt_wmb();\n \n \t/* Enqueue firmware selftest request through scratch registers */\n \ttimeout = true;\n-\ttimeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n-\troc_ml_scratch_enqueue(&mldev->roc, &req->jd);\n+\ttimeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n+\troc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n \n \tplt_rmb();\n \tdo {\n-\t\tif (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&\n-\t\t    (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH)) {\n+\t\tif (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&\n+\t\t    (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH)) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\n@@ -1552,8 +1588,8 @@ int\n cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params, uint16_t *model_id)\n {\n \tstruct cn10k_ml_model_metadata *metadata;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \n \tchar str[RTE_MEMZONE_NAMESIZE];\n \tconst struct plt_memzone *mz;\n@@ -1574,7 +1610,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \tif (ret != 0)\n \t\treturn ret;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n \n \t/* Find model ID */\n \tfound = false;\n@@ -1591,7 +1627,8 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \t}\n \n \t/* Get WB and scratch pages, check if model can be loaded. */\n-\tret = cn10k_ml_model_ocm_pages_count(mldev, idx, params->addr, &wb_pages, &scratch_pages);\n+\tret = cn10k_ml_model_ocm_pages_count(&cnxk_mldev->cn10k_mldev, idx, params->addr, &wb_pages,\n+\t\t\t\t\t     &scratch_pages);\n \tif (ret < 0)\n \t\treturn ret;\n \n@@ -1623,7 +1660,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \t}\n \n \tmodel = mz->addr;\n-\tmodel->mldev = mldev;\n+\tmodel->mldev = cnxk_mldev;\n \tmodel->model_id = idx;\n \n \trte_memcpy(&model->metadata, params->addr, sizeof(struct cn10k_ml_model_metadata));\n@@ -1680,7 +1717,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,\n \tplt_spinlock_init(&model->lock);\n \tmodel->state = ML_CN10K_MODEL_STATE_LOADED;\n \tdev->data->models[idx] = model;\n-\tmldev->nb_models_loaded++;\n+\tcnxk_mldev->nb_models_loaded++;\n \n \t/* Update xstats names */\n \tcn10k_ml_xstats_model_name_update(dev, idx);\n@@ -1695,9 +1732,9 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)\n {\n \tchar str[RTE_MEMZONE_NAMESIZE];\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n \tmodel = dev->data->models[model_id];\n \n \tif (model == NULL) {\n@@ -1711,7 +1748,7 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)\n \t}\n \n \tdev->data->models[model_id] = NULL;\n-\tmldev->nb_models_unloaded++;\n+\tcnxk_mldev->nb_models_unloaded++;\n \n \tsnprintf(str, RTE_MEMZONE_NAMESIZE, \"%s_%u\", CN10K_ML_MODEL_MEMZONE_NAME, model_id);\n \treturn plt_memzone_free(plt_memzone_lookup(str));\n@@ -1720,8 +1757,9 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)\n int\n cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tstruct cn10k_ml_req *req;\n \n@@ -1735,8 +1773,9 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tbool locked;\n \tint ret = 0;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \tmodel = dev->data->models[model_id];\n \n \tif (model == NULL) {\n@@ -1746,11 +1785,11 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \n \t/* Prepare JD */\n \treq = model->req;\n-\tcn10k_ml_prep_sp_job_descriptor(mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);\n+\tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);\n \treq->result.error_code.u64 = 0x0;\n \treq->result.user_ptr = NULL;\n \n-\tplt_write64(ML_CN10K_POLL_JOB_START, &req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n \tplt_wmb();\n \n \tnum_tiles = model->metadata.model.tile_end - model->metadata.model.tile_start + 1;\n@@ -1815,26 +1854,26 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tjob_dequeued = false;\n \tdo {\n \t\tif (!job_enqueued) {\n-\t\t\treq->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n-\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&mldev->roc, &req->jd);\n+\t\t\treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n+\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n \t\t}\n \n \t\tif (job_enqueued && !job_dequeued)\n-\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&mldev->roc, &req->jd);\n+\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);\n \n \t\tif (job_dequeued)\n \t\t\tbreak;\n \t} while (plt_tsc_cycles() < req->timeout);\n \n \tif (job_dequeued) {\n-\t\tif (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH) {\n+\t\tif (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {\n \t\t\tif (req->result.error_code.u64 == 0)\n \t\t\t\tret = 0;\n \t\t\telse\n \t\t\t\tret = -1;\n \t\t}\n \t} else { /* Reset scratch registers */\n-\t\troc_ml_scratch_queue_reset(&mldev->roc);\n+\t\troc_ml_scratch_queue_reset(&cn10k_mldev->roc);\n \t\tret = -ETIME;\n \t}\n \n@@ -1843,7 +1882,7 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n \t\t\tif (ret == 0) {\n \t\t\t\tmodel->state = ML_CN10K_MODEL_STATE_STARTED;\n-\t\t\t\tmldev->nb_models_started++;\n+\t\t\t\tcnxk_mldev->nb_models_started++;\n \t\t\t} else {\n \t\t\t\tmodel->state = ML_CN10K_MODEL_STATE_UNKNOWN;\n \t\t\t}\n@@ -1867,7 +1906,7 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n \tif (ret < 0) { /* Call unload to update model and FW state, ignore error */\n \t\trte_ml_model_stop(dev->data->dev_id, model_id);\n \t} else {\n-\t\tif (mldev->cache_model_data && roc_model_is_cn10ka())\n+\t\tif (cn10k_mldev->cache_model_data && roc_model_is_cn10ka())\n \t\t\tret = cn10k_ml_cache_model_data(dev, model_id);\n \t}\n \n@@ -1877,8 +1916,9 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)\n int\n cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tstruct cn10k_ml_req *req;\n \n@@ -1887,8 +1927,9 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tbool locked;\n \tint ret = 0;\n \n-\tmldev = dev->data->dev_private;\n-\tocm = &mldev->ocm;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n+\tocm = &cn10k_mldev->ocm;\n \tmodel = dev->data->models[model_id];\n \n \tif (model == NULL) {\n@@ -1898,11 +1939,11 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \n \t/* Prepare JD */\n \treq = model->req;\n-\tcn10k_ml_prep_sp_job_descriptor(mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);\n+\tcn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);\n \treq->result.error_code.u64 = 0x0;\n \treq->result.user_ptr = NULL;\n \n-\tplt_write64(ML_CN10K_POLL_JOB_START, &req->status);\n+\tplt_write64(ML_CNXK_POLL_JOB_START, &req->status);\n \tplt_wmb();\n \n \tlocked = false;\n@@ -1941,33 +1982,33 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)\n \tjob_dequeued = false;\n \tdo {\n \t\tif (!job_enqueued) {\n-\t\t\treq->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n-\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&mldev->roc, &req->jd);\n+\t\t\treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n+\t\t\tjob_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);\n \t\t}\n \n \t\tif (job_enqueued && !job_dequeued)\n-\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&mldev->roc, &req->jd);\n+\t\t\tjob_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);\n \n \t\tif (job_dequeued)\n \t\t\tbreak;\n \t} while (plt_tsc_cycles() < req->timeout);\n \n \tif (job_dequeued) {\n-\t\tif (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH) {\n+\t\tif (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {\n \t\t\tif (req->result.error_code.u64 == 0x0)\n \t\t\t\tret = 0;\n \t\t\telse\n \t\t\t\tret = -1;\n \t\t}\n \t} else {\n-\t\troc_ml_scratch_queue_reset(&mldev->roc);\n+\t\troc_ml_scratch_queue_reset(&cn10k_mldev->roc);\n \t\tret = -ETIME;\n \t}\n \n \tlocked = false;\n \twhile (!locked) {\n \t\tif (plt_spinlock_trylock(&model->lock) != 0) {\n-\t\t\tmldev->nb_models_stopped++;\n+\t\t\tcnxk_mldev->nb_models_stopped++;\n \t\t\tmodel->state = ML_CN10K_MODEL_STATE_LOADED;\n \t\t\tplt_spinlock_unlock(&model->lock);\n \t\t\tlocked = true;\n@@ -2211,8 +2252,9 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result\n \t\t       struct rte_ml_op *op)\n {\n \tstruct cn10k_ml_model_stats *stats;\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_qp *qp;\n \tuint64_t hw_latency;\n \tuint64_t fw_latency;\n@@ -2258,14 +2300,16 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result\n \n \t\t/* Handle driver error */\n \t\tif (result->error_code.s.etype == ML_ETYPE_DRIVER) {\n-\t\t\tmldev = dev->data->dev_private;\n+\t\t\tcnxk_mldev = dev->data->dev_private;\n+\t\t\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \n \t\t\t/* Check for exception */\n-\t\t\tif ((roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0) ||\n-\t\t\t    (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0))\n+\t\t\tif ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) !=\n+\t\t\t     0) ||\n+\t\t\t    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0))\n \t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_EXCEPTION;\n-\t\t\telse if ((roc_ml_reg_read64(&mldev->roc, ML_CORE_INT_LO) != 0) ||\n-\t\t\t\t (roc_ml_reg_read64(&mldev->roc, ML_CORE_INT_HI) != 0))\n+\t\t\telse if ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_LO) != 0) ||\n+\t\t\t\t (roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_HI) != 0))\n \t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_FW_ERROR;\n \t\t\telse\n \t\t\t\tresult->error_code.s.stype = ML_DRIVER_ERR_UNKNOWN;\n@@ -2282,8 +2326,9 @@ __rte_hot uint16_t\n cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,\n \t\t       uint16_t nb_ops)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_queue *queue;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_req *req;\n \tstruct cn10k_ml_qp *qp;\n \tstruct rte_ml_op *op;\n@@ -2292,7 +2337,8 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \tuint64_t head;\n \tbool enqueued;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tqp = dev->data->queue_pairs[qp_id];\n \tqueue = &qp->queue;\n \n@@ -2307,15 +2353,15 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \top = ops[count];\n \treq = &queue->reqs[head];\n \n-\tmldev->set_poll_addr(req);\n-\tcn10k_ml_prep_fp_job_descriptor(dev, req, op);\n+\tcn10k_mldev->set_poll_addr(req);\n+\tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n \n \tmemset(&req->result, 0, sizeof(struct cn10k_ml_result));\n \treq->result.error_code.s.etype = ML_ETYPE_UNKNOWN;\n \treq->result.user_ptr = op->user_ptr;\n \n-\tmldev->set_poll_ptr(req);\n-\tenqueued = mldev->ml_jcmdq_enqueue(&mldev->roc, &req->jcmd);\n+\tcn10k_mldev->set_poll_ptr(req);\n+\tenqueued = cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd);\n \tif (unlikely(!enqueued))\n \t\tgoto jcmdq_full;\n \n@@ -2339,8 +2385,9 @@ __rte_hot uint16_t\n cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,\n \t\t       uint16_t nb_ops)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_queue *queue;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_req *req;\n \tstruct cn10k_ml_qp *qp;\n \n@@ -2348,7 +2395,8 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \tuint16_t count;\n \tuint64_t tail;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tqp = dev->data->queue_pairs[qp_id];\n \tqueue = &qp->queue;\n \n@@ -2361,8 +2409,8 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op\n \n dequeue_req:\n \treq = &queue->reqs[tail];\n-\tstatus = mldev->get_poll_ptr(req);\n-\tif (unlikely(status != ML_CN10K_POLL_JOB_FINISH)) {\n+\tstatus = cn10k_mldev->get_poll_ptr(req);\n+\tif (unlikely(status != ML_CNXK_POLL_JOB_FINISH)) {\n \t\tif (plt_tsc_cycles() < req->timeout)\n \t\t\tgoto empty_or_active;\n \t\telse /* Timeout, set indication of driver error */\n@@ -2420,30 +2468,32 @@ cn10k_ml_op_error_get(struct rte_ml_dev *dev, struct rte_ml_op *op, struct rte_m\n __rte_hot int\n cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n {\n+\tstruct cn10k_ml_dev *cn10k_mldev;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cn10k_ml_model *model;\n-\tstruct cn10k_ml_dev *mldev;\n \tstruct cn10k_ml_req *req;\n \tbool timeout;\n \tint ret = 0;\n \n-\tmldev = dev->data->dev_private;\n+\tcnxk_mldev = dev->data->dev_private;\n+\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n \tmodel = dev->data->models[op->model_id];\n \treq = model->req;\n \n \tcn10k_ml_set_poll_addr(req);\n-\tcn10k_ml_prep_fp_job_descriptor(dev, req, op);\n+\tcn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);\n \n \tmemset(&req->result, 0, sizeof(struct cn10k_ml_result));\n \treq->result.error_code.s.etype = ML_ETYPE_UNKNOWN;\n \treq->result.user_ptr = op->user_ptr;\n \n-\tmldev->set_poll_ptr(req);\n+\tcn10k_mldev->set_poll_ptr(req);\n \treq->jcmd.w1.s.jobptr = PLT_U64_CAST(&req->jd);\n \n \ttimeout = true;\n-\treq->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();\n+\treq->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();\n \tdo {\n-\t\tif (mldev->ml_jcmdq_enqueue(&mldev->roc, &req->jcmd)) {\n+\t\tif (cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd)) {\n \t\t\treq->op = op;\n \t\t\ttimeout = false;\n \t\t\tbreak;\n@@ -2457,7 +2507,7 @@ cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)\n \n \ttimeout = true;\n \tdo {\n-\t\tif (mldev->get_poll_ptr(req) == ML_CN10K_POLL_JOB_FINISH) {\n+\t\tif (cn10k_mldev->get_poll_ptr(req) == ML_CNXK_POLL_JOB_FINISH) {\n \t\t\ttimeout = false;\n \t\t\tbreak;\n \t\t}\ndiff --git a/drivers/ml/cnxk/cnxk_ml_dev.c b/drivers/ml/cnxk/cnxk_ml_dev.c\nnew file mode 100644\nindex 0000000000..2a5c17c973\n--- /dev/null\n+++ b/drivers/ml/cnxk/cnxk_ml_dev.c\n@@ -0,0 +1,11 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#include <rte_mldev.h>\n+#include <rte_mldev_pmd.h>\n+\n+#include \"cnxk_ml_dev.h\"\n+\n+/* Dummy operations for ML device */\n+struct rte_ml_dev_ops ml_dev_dummy_ops = {0};\ndiff --git a/drivers/ml/cnxk/cnxk_ml_dev.h b/drivers/ml/cnxk/cnxk_ml_dev.h\nnew file mode 100644\nindex 0000000000..51315de622\n--- /dev/null\n+++ b/drivers/ml/cnxk/cnxk_ml_dev.h\n@@ -0,0 +1,58 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Marvell.\n+ */\n+\n+#ifndef _CNXK_ML_DEV_H_\n+#define _CNXK_ML_DEV_H_\n+\n+#include <roc_api.h>\n+\n+#include \"cn10k_ml_dev.h\"\n+\n+/* ML command timeout in seconds */\n+#define ML_CNXK_CMD_TIMEOUT 5\n+\n+/* Poll mode job state */\n+#define ML_CNXK_POLL_JOB_START\t0\n+#define ML_CNXK_POLL_JOB_FINISH 1\n+\n+/* Device configuration state enum */\n+enum cnxk_ml_dev_state {\n+\t/* Probed and not configured */\n+\tML_CNXK_DEV_STATE_PROBED = 0,\n+\n+\t/* Configured */\n+\tML_CNXK_DEV_STATE_CONFIGURED,\n+\n+\t/* Started */\n+\tML_CNXK_DEV_STATE_STARTED,\n+\n+\t/* Closed */\n+\tML_CNXK_DEV_STATE_CLOSED\n+};\n+\n+/* Device private data */\n+struct cnxk_ml_dev {\n+\t/* RTE device */\n+\tstruct rte_ml_dev *mldev;\n+\n+\t/* Configuration state */\n+\tenum cnxk_ml_dev_state state;\n+\n+\t/* Number of models loaded */\n+\tuint16_t nb_models_loaded;\n+\n+\t/* Number of models unloaded */\n+\tuint16_t nb_models_unloaded;\n+\n+\t/* Number of models started */\n+\tuint16_t nb_models_started;\n+\n+\t/* Number of models stopped */\n+\tuint16_t nb_models_stopped;\n+\n+\t/* CN10K device structure */\n+\tstruct cn10k_ml_dev cn10k_mldev;\n+};\n+\n+#endif /* _CNXK_ML_DEV_H_ */\ndiff --git a/drivers/ml/cnxk/meson.build b/drivers/ml/cnxk/meson.build\nindex 94fa4283b1..03a2d4ecf2 100644\n--- a/drivers/ml/cnxk/meson.build\n+++ b/drivers/ml/cnxk/meson.build\n@@ -12,6 +12,7 @@ driver_sdk_headers = files(\n         'cn10k_ml_ops.h',\n         'cn10k_ml_model.h',\n         'cn10k_ml_ocm.h',\n+        'cnxk_ml_dev.h',\n )\n \n sources = files(\n@@ -19,6 +20,7 @@ sources = files(\n         'cn10k_ml_ops.c',\n         'cn10k_ml_model.c',\n         'cn10k_ml_ocm.c',\n+        'cnxk_ml_dev.c',\n )\n \n deps += ['mldev', 'common_cnxk', 'kvargs', 'hash']\n",
    "prefixes": [
        "v2",
        "03/34"
    ]
}