get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132045/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132045,
    "url": "http://patches.dpdk.org/api/patches/132045/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230927183052.17347-16-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230927183052.17347-16-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230927183052.17347-16-syalavarthi@marvell.com",
    "date": "2023-09-27T18:30:28",
    "name": "[v3,15/35] ml/cnxk: update device and model xstats functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "6036ee5ccc92031d5b88f44fb7729c9fe06a4cb4",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230927183052.17347-16-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 29661,
            "url": "http://patches.dpdk.org/api/series/29661/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29661",
            "date": "2023-09-27T18:30:13",
            "name": "Implemenation of revised ml/cnxk driver",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/29661/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132045/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/132045/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DCBFF4262B;\n\tWed, 27 Sep 2023 20:33:13 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5F2E84161A;\n\tWed, 27 Sep 2023 20:31:26 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 7FF5940A87\n for <dev@dpdk.org>; Wed, 27 Sep 2023 20:31:10 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 38RCK78t015701 for <dev@dpdk.org>; Wed, 27 Sep 2023 11:31:09 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3tcma5sftc-5\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 27 Sep 2023 11:31:09 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Wed, 27 Sep 2023 11:31:07 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Wed, 27 Sep 2023 11:31:07 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 4837E3F7090;\n Wed, 27 Sep 2023 11:31:07 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=Xp17mEVZYM0J/bsZcCK8QmXbj9ZLvDxUguTR1mr1D2A=;\n b=Fh8w6mZHrUTTMZ2ioq/BfvCW4aMo5wayKDY4eD3zxLWgffFYAR7nhp3WVzo9jIlLmzoK\n ZV2Tlchf8NH9LZB3XHFrvm5+H7y8vz4RejB76ZUwD5j9IMq4qZu1s+qRFcaDkYYP4mjK\n g0pYW/Bb3Uwl394OyaqsJnbqqmhXOYDWcF/lTx2fG0DU7GldCw5UNFF1XLjWOcEcGWFe\n Nb/d64gcrrAGH45S34aqMtwr6s5pzl8q2qPzrP/mga78NHEVvaMwm7CX2UG32mfvc5vZ\n lURuR66xewedAAu8VxK9upzgI2F9rfkArzxvoZRYDKxk0V11ZUn29NglrINMGPPPiK0F Eg==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v3 15/35] ml/cnxk: update device and model xstats functions",
        "Date": "Wed, 27 Sep 2023 11:30:28 -0700",
        "Message-ID": "<20230927183052.17347-16-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.41.0",
        "In-Reply-To": "<20230927183052.17347-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20230927183052.17347-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "fnXf2jfsxyJOyn1FFJZtfeoYt6dFK05u",
        "X-Proofpoint-ORIG-GUID": "fnXf2jfsxyJOyn1FFJZtfeoYt6dFK05u",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-09-27_12,2023-09-27_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added cnxk wrapper function to handle ML device and model\nextended stats. Handling resources for the xstats is done\nin the cnxk layer. Introduced internal xstats group.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_dev.h   |   4 -\n drivers/ml/cnxk/cn10k_ml_ops.c   | 531 +++----------------------------\n drivers/ml/cnxk/cn10k_ml_ops.h   |  16 +-\n drivers/ml/cnxk/cnxk_ml_dev.h    |   5 +\n drivers/ml/cnxk/cnxk_ml_ops.c    | 481 +++++++++++++++++++++++++++-\n drivers/ml/cnxk/cnxk_ml_xstats.h |  21 +-\n 6 files changed, 551 insertions(+), 507 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h\nindex be989e0a20..bde9d08901 100644\n--- a/drivers/ml/cnxk/cn10k_ml_dev.h\n+++ b/drivers/ml/cnxk/cn10k_ml_dev.h\n@@ -10,7 +10,6 @@\n #include \"cn10k_ml_ocm.h\"\n \n #include \"cnxk_ml_io.h\"\n-#include \"cnxk_ml_xstats.h\"\n \n /* Dummy Device ops */\n extern struct rte_ml_dev_ops ml_dev_dummy_ops;\n@@ -133,9 +132,6 @@ struct cn10k_ml_dev {\n \t/* OCM info */\n \tstruct cn10k_ml_ocm ocm;\n \n-\t/* Extended stats data */\n-\tstruct cnxk_ml_xstats xstats;\n-\n \t/* Enable / disable model data caching */\n \tint cache_model_data;\n \ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 27d255a830..776ad60401 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -198,107 +198,21 @@ cn10k_ml_prep_fp_job_descriptor(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_r\n \treq->cn10k_req.jd.model_run.num_batches = op->nb_batches;\n }\n \n-static int\n-cn10k_ml_xstats_init(struct rte_ml_dev *dev)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tuint16_t nb_stats;\n-\tuint16_t stat_id;\n-\tuint16_t model;\n-\tuint16_t i;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\n-\t/* Allocate memory for xstats entries. Don't allocate during reconfigure */\n-\tnb_stats = RTE_DIM(device_xstats) + ML_CNXK_MAX_MODELS * RTE_DIM(layer_xstats);\n-\tif (cn10k_mldev->xstats.entries == NULL)\n-\t\tcn10k_mldev->xstats.entries = rte_zmalloc(\n-\t\t\t\"cn10k_ml_xstats\", sizeof(struct cnxk_ml_xstats_entry) * nb_stats,\n-\t\t\tPLT_CACHE_LINE_SIZE);\n-\n-\tif (cn10k_mldev->xstats.entries == NULL)\n-\t\treturn -ENOMEM;\n-\n-\t/* Initialize device xstats */\n-\tstat_id = 0;\n-\tfor (i = 0; i < RTE_DIM(device_xstats); i++) {\n-\t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n-\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n-\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s\",\n-\t\t\t device_xstats[i].name);\n-\n-\t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;\n-\t\tcn10k_mldev->xstats.entries[stat_id].type = device_xstats[i].type;\n-\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_DEVICE;\n-\t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = 0;\n-\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed = device_xstats[i].reset_allowed;\n-\t\tstat_id++;\n-\t}\n-\tcn10k_mldev->xstats.count_mode_device = stat_id;\n-\n-\t/* Initialize model xstats */\n-\tfor (model = 0; model < ML_CNXK_MAX_MODELS; model++) {\n-\t\tcn10k_mldev->xstats.offset_for_model[model] = stat_id;\n-\n-\t\tfor (i = 0; i < RTE_DIM(layer_xstats); i++) {\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].map.id = stat_id;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].type = layer_xstats[i].type;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_MODEL;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].obj_idx = model;\n-\t\t\tcn10k_mldev->xstats.entries[stat_id].reset_allowed =\n-\t\t\t\tlayer_xstats[i].reset_allowed;\n-\n-\t\t\t/* Name of xstat is updated during model load */\n-\t\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n-\t\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name),\n-\t\t\t\t \"Model-%u-%s\", model, layer_xstats[i].name);\n-\n-\t\t\tstat_id++;\n-\t\t}\n-\n-\t\tcn10k_mldev->xstats.count_per_model[model] = RTE_DIM(layer_xstats);\n-\t}\n-\n-\tcn10k_mldev->xstats.count_mode_model = stat_id - cn10k_mldev->xstats.count_mode_device;\n-\tcn10k_mldev->xstats.count = stat_id;\n-\n-\treturn 0;\n-}\n-\n static void\n-cn10k_ml_xstats_uninit(struct rte_ml_dev *dev)\n+cn10k_ml_xstats_layer_name_update(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id,\n+\t\t\t\t  uint16_t layer_id)\n {\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\n-\trte_free(cn10k_mldev->xstats.entries);\n-\tcn10k_mldev->xstats.entries = NULL;\n-\n-\tcn10k_mldev->xstats.count = 0;\n-}\n-\n-static void\n-cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n \tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n \tuint16_t rclk_freq;\n \tuint16_t sclk_freq;\n \tuint16_t stat_id;\n \tchar suffix[8];\n \tuint16_t i;\n \n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tmodel = dev->data->models[model_id];\n-\tstat_id = RTE_DIM(device_xstats) + model_id * RTE_DIM(layer_xstats);\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\tlayer = &model->layer[layer_id];\n+\tstat_id = cnxk_mldev->xstats.offset_for_layer[model_id][layer_id];\n \n \troc_clk_freq_get(&rclk_freq, &sclk_freq);\n \tif (sclk_freq == 0)\n@@ -306,270 +220,94 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)\n \telse\n \t\tstrcpy(suffix, \"ns\");\n \n-\t/* Update xstat name based on model name and sclk availability */\n+\t/* Update xstat name based on layer name and sclk availability */\n \tfor (i = 0; i < RTE_DIM(layer_xstats); i++) {\n-\t\tsnprintf(cn10k_mldev->xstats.entries[stat_id].map.name,\n-\t\t\t sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n-\t\t\t model->layer[0].glow.metadata.model.name, layer_xstats[i].name, suffix);\n+\t\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n+\t\t\t layer->glow.metadata.model.name, layer_xstats[i].name, suffix);\n \t\tstat_id++;\n \t}\n }\n \n-static uint64_t\n-cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,\n-\t\t       enum cnxk_ml_xstats_type type)\n-{\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\n-\tswitch (type) {\n-\tcase nb_models_loaded:\n-\t\treturn cnxk_mldev->nb_models_loaded;\n-\tcase nb_models_unloaded:\n-\t\treturn cnxk_mldev->nb_models_unloaded;\n-\tcase nb_models_started:\n-\t\treturn cnxk_mldev->nb_models_started;\n-\tcase nb_models_stopped:\n-\t\treturn cnxk_mldev->nb_models_stopped;\n-\tdefault:\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-#define ML_AVG_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \\\n+#define ML_AVG_FOREACH_QP(cnxk_mldev, layer, qp_id, str, value, count)                             \\\n \tdo {                                                                                       \\\n \t\tvalue = 0;                                                                         \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tvalue += model->layer[0].glow.burst_xstats[qp_id].str##_latency_tot;       \\\n-\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n-\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue += layer->glow.burst_xstats[qp_id].str##_latency_tot;                \\\n+\t\t\tcount += layer->glow.burst_xstats[qp_id].dequeued_count -                  \\\n+\t\t\t\t layer->glow.burst_xstats[qp_id].str##_reset_count;                \\\n \t\t}                                                                                  \\\n+\t\tvalue += layer->glow.sync_xstats->str##_latency_tot;                               \\\n+\t\tcount += layer->glow.sync_xstats->dequeued_count -                                 \\\n+\t\t\t layer->glow.sync_xstats->str##_reset_count;                               \\\n \t\tif (count != 0)                                                                    \\\n \t\t\tvalue = value / count;                                                     \\\n \t} while (0)\n \n-#define ML_MIN_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \\\n+#define ML_MIN_FOREACH_QP(cnxk_mldev, layer, qp_id, str, value, count)                             \\\n \tdo {                                                                                       \\\n \t\tvalue = UINT64_MAX;                                                                \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tvalue = PLT_MIN(                                                           \\\n-\t\t\t\tvalue,                                                             \\\n-\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_min);       \\\n-\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n-\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue = PLT_MIN(value, layer->glow.burst_xstats[qp_id].str##_latency_min); \\\n+\t\t\tcount += layer->glow.burst_xstats[qp_id].dequeued_count -                  \\\n+\t\t\t\t layer->glow.burst_xstats[qp_id].str##_reset_count;                \\\n \t\t}                                                                                  \\\n+\t\tvalue = PLT_MIN(value, layer->glow.sync_xstats->str##_latency_min);                \\\n+\t\tcount += layer->glow.sync_xstats->dequeued_count -                                 \\\n+\t\t\t layer->glow.sync_xstats->str##_reset_count;                               \\\n \t\tif (count == 0)                                                                    \\\n \t\t\tvalue = 0;                                                                 \\\n \t} while (0)\n \n-#define ML_MAX_FOREACH_QP(dev, model, qp_id, str, value, count)                                    \\\n+#define ML_MAX_FOREACH_QP(cnxk_mldev, layer, qp_id, str, value, count)                             \\\n \tdo {                                                                                       \\\n \t\tvalue = 0;                                                                         \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tvalue = PLT_MAX(                                                           \\\n-\t\t\t\tvalue,                                                             \\\n-\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_max);       \\\n-\t\t\tcount += model->layer[0].glow.burst_xstats[qp_id].dequeued_count -         \\\n-\t\t\t\t model->layer[0].glow.burst_xstats[qp_id].str##_reset_count;       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue = PLT_MAX(value, layer->glow.burst_xstats[qp_id].str##_latency_max); \\\n+\t\t\tcount += layer->glow.burst_xstats[qp_id].dequeued_count -                  \\\n+\t\t\t\t layer->glow.burst_xstats[qp_id].str##_reset_count;                \\\n \t\t}                                                                                  \\\n+\t\tvalue = PLT_MAX(value, layer->glow.sync_xstats->str##_latency_max);                \\\n+\t\tcount += layer->glow.sync_xstats->dequeued_count -                                 \\\n+\t\t\t layer->glow.sync_xstats->str##_reset_count;                               \\\n \t\tif (count == 0)                                                                    \\\n \t\t\tvalue = 0;                                                                 \\\n \t} while (0)\n \n-static uint64_t\n-cn10k_ml_model_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx, enum cnxk_ml_xstats_type type)\n+uint64_t\n+cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer,\n+\t\t\t enum cnxk_ml_xstats_type type)\n {\n-\tstruct cnxk_ml_model *model;\n-\tuint16_t rclk_freq; /* MHz */\n-\tuint16_t sclk_freq; /* MHz */\n \tuint64_t count = 0;\n-\tuint64_t value;\n+\tuint64_t value = 0;\n \tuint32_t qp_id;\n \n-\tmodel = dev->data->models[obj_idx];\n-\tif (model == NULL)\n-\t\treturn 0;\n-\n \tswitch (type) {\n \tcase avg_hw_latency:\n-\t\tML_AVG_FOREACH_QP(dev, model, qp_id, hw, value, count);\n+\t\tML_AVG_FOREACH_QP(cnxk_mldev, layer, qp_id, hw, value, count);\n \t\tbreak;\n \tcase min_hw_latency:\n-\t\tML_MIN_FOREACH_QP(dev, model, qp_id, hw, value, count);\n+\t\tML_MIN_FOREACH_QP(cnxk_mldev, layer, qp_id, hw, value, count);\n \t\tbreak;\n \tcase max_hw_latency:\n-\t\tML_MAX_FOREACH_QP(dev, model, qp_id, hw, value, count);\n+\t\tML_MAX_FOREACH_QP(cnxk_mldev, layer, qp_id, hw, value, count);\n \t\tbreak;\n \tcase avg_fw_latency:\n-\t\tML_AVG_FOREACH_QP(dev, model, qp_id, fw, value, count);\n+\t\tML_AVG_FOREACH_QP(cnxk_mldev, layer, qp_id, fw, value, count);\n \t\tbreak;\n \tcase min_fw_latency:\n-\t\tML_MIN_FOREACH_QP(dev, model, qp_id, fw, value, count);\n+\t\tML_MIN_FOREACH_QP(cnxk_mldev, layer, qp_id, fw, value, count);\n \t\tbreak;\n \tcase max_fw_latency:\n-\t\tML_MAX_FOREACH_QP(dev, model, qp_id, fw, value, count);\n+\t\tML_MAX_FOREACH_QP(cnxk_mldev, layer, qp_id, fw, value, count);\n \t\tbreak;\n \tdefault:\n \t\tvalue = 0;\n \t}\n \n-\troc_clk_freq_get(&rclk_freq, &sclk_freq);\n-\tif (sclk_freq != 0) /* return in ns */\n-\t\tvalue = (value * 1000ULL) / sclk_freq;\n-\n \treturn value;\n }\n \n-static int\n-cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[], uint16_t nb_ids)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_xstats_entry *xs;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tuint16_t nb_stats;\n-\tuint16_t stat_id;\n-\tuint32_t i;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\n-\tif (stat_ids == NULL)\n-\t\tnb_stats = cn10k_mldev->xstats.count_mode_device;\n-\telse\n-\t\tnb_stats = nb_ids;\n-\n-\tfor (i = 0; i < nb_stats; i++) {\n-\t\tif (stat_ids == NULL)\n-\t\t\tstat_id = i;\n-\t\telse\n-\t\t\tstat_id = stat_ids[i];\n-\n-\t\tif (stat_id >= cn10k_mldev->xstats.count_mode_device)\n-\t\t\treturn -EINVAL;\n-\n-\t\txs = &cn10k_mldev->xstats.entries[stat_id];\n-\t\tif (!xs->reset_allowed)\n-\t\t\tcontinue;\n-\n-\t\txs->reset_value = cn10k_ml_dev_xstat_get(dev, xs->obj_idx, xs->type);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-#define ML_AVG_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n-\tdo {                                                                                       \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {                      \\\n-\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_tot = 0;            \\\n-\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_reset_count =               \\\n-\t\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].dequeued_count;           \\\n-\t\t}                                                                                  \\\n-\t} while (0)\n-\n-#define ML_MIN_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n-\tdo {                                                                                       \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \\\n-\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_min = UINT64_MAX;   \\\n-\t} while (0)\n-\n-#define ML_MAX_RESET_FOREACH_QP(dev, model, qp_id, str)                                            \\\n-\tdo {                                                                                       \\\n-\t\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)                        \\\n-\t\t\tmodel->layer[0].glow.burst_xstats[qp_id].str##_latency_max = 0;            \\\n-\t} while (0)\n-\n-static void\n-cn10k_ml_reset_model_stat(struct rte_ml_dev *dev, uint16_t model_id, enum cnxk_ml_xstats_type type)\n-{\n-\tstruct cnxk_ml_model *model;\n-\tuint32_t qp_id;\n-\n-\tmodel = dev->data->models[model_id];\n-\n-\tswitch (type) {\n-\tcase avg_hw_latency:\n-\t\tML_AVG_RESET_FOREACH_QP(dev, model, qp_id, hw);\n-\t\tbreak;\n-\tcase min_hw_latency:\n-\t\tML_MIN_RESET_FOREACH_QP(dev, model, qp_id, hw);\n-\t\tbreak;\n-\tcase max_hw_latency:\n-\t\tML_MAX_RESET_FOREACH_QP(dev, model, qp_id, hw);\n-\t\tbreak;\n-\tcase avg_fw_latency:\n-\t\tML_AVG_RESET_FOREACH_QP(dev, model, qp_id, fw);\n-\t\tbreak;\n-\tcase min_fw_latency:\n-\t\tML_MIN_RESET_FOREACH_QP(dev, model, qp_id, fw);\n-\t\tbreak;\n-\tcase max_fw_latency:\n-\t\tML_MAX_RESET_FOREACH_QP(dev, model, qp_id, fw);\n-\t\tbreak;\n-\tdefault:\n-\t\treturn;\n-\t}\n-}\n-\n-static int\n-cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint16_t stat_ids[],\n-\t\t\t    uint16_t nb_ids)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_xstats_entry *xs;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tstruct cnxk_ml_model *model;\n-\tint32_t lcl_model_id = 0;\n-\tuint16_t start_id;\n-\tuint16_t end_id;\n-\tint32_t i;\n-\tint32_t j;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tfor (i = 0; i < ML_CNXK_MAX_MODELS; i++) {\n-\t\tif (model_id == -1) {\n-\t\t\tmodel = dev->data->models[i];\n-\t\t\tif (model == NULL) /* Skip inactive models */\n-\t\t\t\tcontinue;\n-\t\t} else {\n-\t\t\tif (model_id != i)\n-\t\t\t\tcontinue;\n-\n-\t\t\tmodel = dev->data->models[model_id];\n-\t\t\tif (model == NULL) {\n-\t\t\t\tplt_err(\"Invalid model_id = %d\\n\", model_id);\n-\t\t\t\treturn -EINVAL;\n-\t\t\t}\n-\t\t}\n-\n-\t\tstart_id = cn10k_mldev->xstats.offset_for_model[i];\n-\t\tend_id = cn10k_mldev->xstats.offset_for_model[i] +\n-\t\t\t cn10k_mldev->xstats.count_per_model[i] - 1;\n-\n-\t\tif (stat_ids == NULL) {\n-\t\t\tfor (j = start_id; j <= end_id; j++) {\n-\t\t\t\txs = &cn10k_mldev->xstats.entries[j];\n-\t\t\t\tcn10k_ml_reset_model_stat(dev, i, xs->type);\n-\t\t\t}\n-\t\t} else {\n-\t\t\tfor (j = 0; j < nb_ids; j++) {\n-\t\t\t\tif (stat_ids[j] < start_id || stat_ids[j] > end_id) {\n-\t\t\t\t\tplt_err(\"Invalid stat_ids[%d] = %d for model_id = %d\\n\", j,\n-\t\t\t\t\t\tstat_ids[j], lcl_model_id);\n-\t\t\t\t\treturn -EINVAL;\n-\t\t\t\t}\n-\t\t\t\txs = &cn10k_mldev->xstats.entries[stat_ids[j]];\n-\t\t\t\tcn10k_ml_reset_model_stat(dev, i, xs->type);\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n static int\n cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer)\n {\n@@ -654,7 +392,6 @@ cn10k_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_c\n \tstruct cn10k_ml_dev *cn10k_mldev;\n \tstruct cn10k_ml_ocm *ocm;\n \tuint16_t tile_id;\n-\tint ret;\n \n \tRTE_SET_USED(conf);\n \n@@ -682,13 +419,6 @@ cn10k_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_c\n \n \trte_spinlock_init(&ocm->lock);\n \n-\t/* Initialize xstats */\n-\tret = cn10k_ml_xstats_init(cnxk_mldev->mldev);\n-\tif (ret != 0) {\n-\t\tplt_err(\"Failed to initialize xstats\");\n-\t\treturn ret;\n-\t}\n-\n \t/* Set JCMDQ enqueue function */\n \tif (cn10k_mldev->hw_queue_lock == 1)\n \t\tcn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_sl;\n@@ -717,9 +447,6 @@ cn10k_ml_dev_close(struct cnxk_ml_dev *cnxk_mldev)\n \t/* Release ocm_mask memory */\n \trte_free(cn10k_mldev->ocm.ocm_mask);\n \n-\t/* Un-initialize xstats */\n-\tcn10k_ml_xstats_uninit(cnxk_mldev->mldev);\n-\n \t/* Unload firmware */\n \tcn10k_ml_fw_unload(cnxk_mldev);\n \n@@ -770,174 +497,6 @@ cn10k_ml_dev_stop(struct cnxk_ml_dev *cnxk_mldev)\n \treturn 0;\n }\n \n-int\n-cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n-\t\t\t      int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,\n-\t\t\t      uint32_t size)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tuint32_t xstats_mode_count;\n-\tuint32_t idx = 0;\n-\tuint32_t i;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\n-\txstats_mode_count = 0;\n-\tswitch (mode) {\n-\tcase RTE_ML_DEV_XSTATS_DEVICE:\n-\t\txstats_mode_count = cn10k_mldev->xstats.count_mode_device;\n-\t\tbreak;\n-\tcase RTE_ML_DEV_XSTATS_MODEL:\n-\t\tif (model_id >= ML_CNXK_MAX_MODELS)\n-\t\t\tbreak;\n-\t\txstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];\n-\t\tbreak;\n-\tdefault:\n-\t\treturn -EINVAL;\n-\t};\n-\n-\tif (xstats_mode_count > size || xstats_map == NULL)\n-\t\treturn xstats_mode_count;\n-\n-\tfor (i = 0; i < cn10k_mldev->xstats.count && idx < size; i++) {\n-\t\tif (cn10k_mldev->xstats.entries[i].mode != mode)\n-\t\t\tcontinue;\n-\n-\t\tif (mode != RTE_ML_DEV_XSTATS_DEVICE &&\n-\t\t    model_id != cn10k_mldev->xstats.entries[i].obj_idx)\n-\t\t\tcontinue;\n-\n-\t\tstrncpy(xstats_map[idx].name, cn10k_mldev->xstats.entries[i].map.name,\n-\t\t\tRTE_ML_STR_MAX);\n-\t\txstats_map[idx].id = cn10k_mldev->xstats.entries[i].map.id;\n-\t\tidx++;\n-\t}\n-\n-\treturn idx;\n-}\n-\n-int\n-cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,\n-\t\t\t\tuint64_t *value)\n-{\n-\tstruct cnxk_ml_xstats_entry *xs;\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tcnxk_ml_xstats_fn fn;\n-\tuint32_t i;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\tfor (i = 0; i < cn10k_mldev->xstats.count; i++) {\n-\t\txs = &cn10k_mldev->xstats.entries[i];\n-\t\tif (strncmp(xs->map.name, name, RTE_ML_STR_MAX) == 0) {\n-\t\t\tif (stat_id != NULL)\n-\t\t\t\t*stat_id = xs->map.id;\n-\n-\t\t\tswitch (xs->fn_id) {\n-\t\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n-\t\t\t\tfn = cn10k_ml_dev_xstat_get;\n-\t\t\t\tbreak;\n-\t\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n-\t\t\t\tfn = cn10k_ml_model_xstat_get;\n-\t\t\t\tbreak;\n-\t\t\tdefault:\n-\t\t\t\tplt_err(\"Unexpected xstat fn_id = %d\", xs->fn_id);\n-\t\t\t\treturn -EINVAL;\n-\t\t\t}\n-\n-\t\t\t*value = fn(dev, xs->obj_idx, xs->type) - xs->reset_value;\n-\n-\t\t\treturn 0;\n-\t\t}\n-\t}\n-\n-\tif (stat_id != NULL)\n-\t\t*stat_id = (uint16_t)-1;\n-\n-\treturn -EINVAL;\n-}\n-\n-int\n-cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,\n-\t\t\tconst uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)\n-{\n-\tstruct cn10k_ml_dev *cn10k_mldev;\n-\tstruct cnxk_ml_xstats_entry *xs;\n-\tstruct cnxk_ml_dev *cnxk_mldev;\n-\tuint32_t xstats_mode_count;\n-\tcnxk_ml_xstats_fn fn;\n-\tuint64_t val;\n-\tuint32_t idx;\n-\tuint32_t i;\n-\n-\tcnxk_mldev = dev->data->dev_private;\n-\tcn10k_mldev = &cnxk_mldev->cn10k_mldev;\n-\txstats_mode_count = 0;\n-\n-\tswitch (mode) {\n-\tcase RTE_ML_DEV_XSTATS_DEVICE:\n-\t\txstats_mode_count = cn10k_mldev->xstats.count_mode_device;\n-\t\tbreak;\n-\tcase RTE_ML_DEV_XSTATS_MODEL:\n-\t\tif (model_id >= ML_CNXK_MAX_MODELS)\n-\t\t\treturn -EINVAL;\n-\t\txstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];\n-\t\tbreak;\n-\tdefault:\n-\t\treturn -EINVAL;\n-\t};\n-\n-\tidx = 0;\n-\tfor (i = 0; i < nb_ids && idx < xstats_mode_count; i++) {\n-\t\txs = &cn10k_mldev->xstats.entries[stat_ids[i]];\n-\t\tif (stat_ids[i] > cn10k_mldev->xstats.count || xs->mode != mode)\n-\t\t\tcontinue;\n-\n-\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL && model_id != xs->obj_idx) {\n-\t\t\tplt_err(\"Invalid stats_id[%d] = %d for model_id = %d\\n\", i, stat_ids[i],\n-\t\t\t\tmodel_id);\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\n-\t\tswitch (xs->fn_id) {\n-\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n-\t\t\tfn = cn10k_ml_dev_xstat_get;\n-\t\t\tbreak;\n-\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n-\t\t\tfn = cn10k_ml_model_xstat_get;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tplt_err(\"Unexpected xstat fn_id = %d\", xs->fn_id);\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\n-\t\tval = fn(dev, xs->obj_idx, xs->type);\n-\t\tif (values)\n-\t\t\tvalues[idx] = val;\n-\n-\t\tidx++;\n-\t}\n-\n-\treturn idx;\n-}\n-\n-int\n-cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n-\t\t\t  int32_t model_id, const uint16_t stat_ids[], uint16_t nb_ids)\n-{\n-\tswitch (mode) {\n-\tcase RTE_ML_DEV_XSTATS_DEVICE:\n-\t\treturn cn10k_ml_device_xstats_reset(dev, stat_ids, nb_ids);\n-\tcase RTE_ML_DEV_XSTATS_MODEL:\n-\t\treturn cn10k_ml_model_xstats_reset(dev, model_id, stat_ids, nb_ids);\n-\t};\n-\n-\treturn 0;\n-}\n-\n int\n cn10k_ml_dev_dump(struct cnxk_ml_dev *cnxk_mldev, FILE *fp)\n {\n@@ -1211,7 +770,7 @@ cn10k_ml_layer_load(void *device, uint16_t model_id, const char *layer_name, uin\n \t\t\t\t\t\t\t      sizeof(struct cn10k_ml_layer_xstats));\n \n \t/* Update xstats names */\n-\tcn10k_ml_xstats_model_name_update(cnxk_mldev->mldev, idx);\n+\tcn10k_ml_xstats_layer_name_update(cnxk_mldev, model_id, layer_id);\n \n \tlayer->state = ML_CNXK_LAYER_STATE_LOADED;\n \tcnxk_mldev->index_map[idx].model_id = model->model_id;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.h b/drivers/ml/cnxk/cn10k_ml_ops.h\nindex 47e7cb12af..4d76164dba 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.h\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.h\n@@ -13,6 +13,7 @@\n struct cnxk_ml_dev;\n struct cnxk_ml_qp;\n struct cnxk_ml_model;\n+struct cnxk_ml_layer;\n \n /* Firmware version string length */\n #define MLDEV_FIRMWARE_VERSION_LENGTH 32\n@@ -298,17 +299,6 @@ int cn10k_ml_dev_stop(struct cnxk_ml_dev *cnxk_mldev);\n int cn10k_ml_dev_dump(struct cnxk_ml_dev *cnxk_mldev, FILE *fp);\n int cn10k_ml_dev_selftest(struct cnxk_ml_dev *cnxk_mldev);\n \n-int cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n-\t\t\t\t  int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,\n-\t\t\t\t  uint32_t size);\n-int cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,\n-\t\t\t\t    uint64_t *value);\n-int cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n-\t\t\t    int32_t model_id, const uint16_t stat_ids[], uint64_t values[],\n-\t\t\t    uint16_t nb_ids);\n-int cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n-\t\t\t      int32_t model_id, const uint16_t stat_ids[], uint16_t nb_ids);\n-\n /* Slow-path ops */\n int cn10k_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *params,\n \t\t\tstruct cnxk_ml_model *model);\n@@ -337,4 +327,8 @@ int cn10k_ml_layer_unload(void *device, uint16_t model_id, const char *layer_nam\n int cn10k_ml_layer_start(void *device, uint16_t model_id, const char *layer_name);\n int cn10k_ml_layer_stop(void *device, uint16_t model_id, const char *layer_name);\n \n+/* xstats ops */\n+uint64_t cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer,\n+\t\t\t\t  enum cnxk_ml_xstats_type type);\n+\n #endif /* _CN10K_ML_OPS_H_ */\ndiff --git a/drivers/ml/cnxk/cnxk_ml_dev.h b/drivers/ml/cnxk/cnxk_ml_dev.h\nindex 1590249abd..3ce9338f1f 100644\n--- a/drivers/ml/cnxk/cnxk_ml_dev.h\n+++ b/drivers/ml/cnxk/cnxk_ml_dev.h\n@@ -9,6 +9,8 @@\n \n #include \"cn10k_ml_dev.h\"\n \n+#include \"cnxk_ml_xstats.h\"\n+\n /* ML command timeout in seconds */\n #define ML_CNXK_CMD_TIMEOUT 5\n \n@@ -51,6 +53,9 @@ struct cnxk_ml_dev {\n \t/* Configuration state */\n \tenum cnxk_ml_dev_state state;\n \n+\t/* Extended stats data */\n+\tstruct cnxk_ml_xstats xstats;\n+\n \t/* Number of models loaded */\n \tuint16_t nb_models_loaded;\n \ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nindex c75317d6da..6a423d9eda 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.c\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -115,6 +115,285 @@ cnxk_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_desc\n \treturn NULL;\n }\n \n+static int\n+cnxk_ml_xstats_init(struct cnxk_ml_dev *cnxk_mldev)\n+{\n+\tuint16_t nb_stats;\n+\tuint16_t stat_id;\n+\tuint16_t model;\n+\tuint16_t layer;\n+\tuint16_t i;\n+\n+\t/* Allocate memory for xstats entries. Don't allocate during reconfigure */\n+\tnb_stats = RTE_DIM(device_xstats) +\n+\t\t   RTE_DIM(layer_xstats) * ML_CNXK_MAX_MODELS * ML_CNXK_MODEL_MAX_LAYERS;\n+\tif (cnxk_mldev->xstats.entries == NULL)\n+\t\tcnxk_mldev->xstats.entries = rte_zmalloc(\n+\t\t\t\"cnxk_ml_xstats\", sizeof(struct cnxk_ml_xstats_entry) * nb_stats,\n+\t\t\tPLT_CACHE_LINE_SIZE);\n+\n+\tif (cnxk_mldev->xstats.entries == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* Initialize device xstats */\n+\tstat_id = 0;\n+\tfor (i = 0; i < RTE_DIM(device_xstats); i++) {\n+\t\tcnxk_mldev->xstats.entries[stat_id].map.id = stat_id;\n+\t\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name), \"%s\",\n+\t\t\t device_xstats[i].name);\n+\n+\t\tcnxk_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;\n+\t\tcnxk_mldev->xstats.entries[stat_id].group = CNXK_ML_XSTATS_GROUP_DEVICE;\n+\t\tcnxk_mldev->xstats.entries[stat_id].type = device_xstats[i].type;\n+\t\tcnxk_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_DEVICE;\n+\t\tcnxk_mldev->xstats.entries[stat_id].obj_idx = 0;\n+\t\tcnxk_mldev->xstats.entries[stat_id].reset_allowed = device_xstats[i].reset_allowed;\n+\t\tstat_id++;\n+\t}\n+\tcnxk_mldev->xstats.count_mode_device = stat_id;\n+\n+\t/* Initialize model xstats */\n+\tfor (model = 0; model < ML_CNXK_MAX_MODELS; model++) {\n+\t\tcnxk_mldev->xstats.offset_for_model[model] = stat_id;\n+\n+\t\tfor (layer = 0; layer < ML_CNXK_MODEL_MAX_LAYERS; layer++) {\n+\t\t\tcnxk_mldev->xstats.offset_for_layer[model][layer] = stat_id;\n+\n+\t\t\tfor (i = 0; i < RTE_DIM(layer_xstats); i++) {\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].map.id = stat_id;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].group =\n+\t\t\t\t\tCNXK_ML_XSTATS_GROUP_LAYER;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].type = layer_xstats[i].type;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_MODEL;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].obj_idx = model;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].layer_id = layer;\n+\t\t\t\tcnxk_mldev->xstats.entries[stat_id].reset_allowed =\n+\t\t\t\t\tlayer_xstats[i].reset_allowed;\n+\n+\t\t\t\t/* Name of xstat is updated during model load */\n+\t\t\t\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name),\n+\t\t\t\t\t \"Layer-%u-%u-%s\", model, layer, layer_xstats[i].name);\n+\n+\t\t\t\tstat_id++;\n+\t\t\t}\n+\n+\t\t\tcnxk_mldev->xstats.count_per_layer[model][layer] = RTE_DIM(layer_xstats);\n+\t\t}\n+\n+\t\tcnxk_mldev->xstats.count_per_model[model] = RTE_DIM(layer_xstats);\n+\t}\n+\n+\tcnxk_mldev->xstats.count_mode_model = stat_id - cnxk_mldev->xstats.count_mode_device;\n+\tcnxk_mldev->xstats.count = stat_id;\n+\n+\treturn 0;\n+}\n+\n+static void\n+cnxk_ml_xstats_uninit(struct cnxk_ml_dev *cnxk_mldev)\n+{\n+\trte_free(cnxk_mldev->xstats.entries);\n+\tcnxk_mldev->xstats.entries = NULL;\n+\n+\tcnxk_mldev->xstats.count = 0;\n+}\n+\n+static uint64_t\n+cnxk_ml_dev_xstat_get(struct cnxk_ml_dev *cnxk_mldev, uint16_t obj_idx __rte_unused,\n+\t\t      int32_t layer_id __rte_unused, enum cnxk_ml_xstats_type type)\n+{\n+\tswitch (type) {\n+\tcase nb_models_loaded:\n+\t\treturn cnxk_mldev->nb_models_loaded;\n+\tcase nb_models_unloaded:\n+\t\treturn cnxk_mldev->nb_models_unloaded;\n+\tcase nb_models_started:\n+\t\treturn cnxk_mldev->nb_models_started;\n+\tcase nb_models_stopped:\n+\t\treturn cnxk_mldev->nb_models_stopped;\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static uint64_t\n+cnxk_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, uint16_t obj_idx, int32_t layer_id,\n+\t\t\tenum cnxk_ml_xstats_type type)\n+{\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n+\tuint16_t rclk_freq; /* MHz */\n+\tuint16_t sclk_freq; /* MHz */\n+\tuint64_t value = 0;\n+\n+\tmodel = cnxk_mldev->mldev->data->models[obj_idx];\n+\tif (model == NULL)\n+\t\treturn 0;\n+\n+\tif (layer_id >= 0)\n+\t\tlayer = &model->layer[layer_id];\n+\telse\n+\t\treturn 0;\n+\n+\tvalue = cn10k_ml_model_xstat_get(cnxk_mldev, layer, type);\n+\n+\troc_clk_freq_get(&rclk_freq, &sclk_freq);\n+\tif (sclk_freq != 0) /* return in ns */\n+\t\tvalue = (value * 1000ULL) / sclk_freq;\n+\n+\treturn value;\n+}\n+\n+static int\n+cnxk_ml_device_xstats_reset(struct cnxk_ml_dev *cnxk_mldev, const uint16_t stat_ids[],\n+\t\t\t    uint16_t nb_ids)\n+{\n+\tstruct cnxk_ml_xstats_entry *xs;\n+\tuint16_t nb_stats;\n+\tuint16_t stat_id;\n+\tuint32_t i;\n+\n+\tif (stat_ids == NULL)\n+\t\tnb_stats = cnxk_mldev->xstats.count_mode_device;\n+\telse\n+\t\tnb_stats = nb_ids;\n+\n+\tfor (i = 0; i < nb_stats; i++) {\n+\t\tif (stat_ids == NULL)\n+\t\t\tstat_id = i;\n+\t\telse\n+\t\t\tstat_id = stat_ids[i];\n+\n+\t\tif (stat_id >= cnxk_mldev->xstats.count_mode_device)\n+\t\t\treturn -EINVAL;\n+\n+\t\txs = &cnxk_mldev->xstats.entries[stat_id];\n+\t\tif (!xs->reset_allowed)\n+\t\t\tcontinue;\n+\n+\t\txs->reset_value =\n+\t\t\tcnxk_ml_dev_xstat_get(cnxk_mldev, xs->obj_idx, xs->layer_id, xs->type);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+#define ML_AVG_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, str)                                     \\\n+\tdo {                                                                                       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tlayer->glow.burst_xstats[qp_id].str##_latency_tot = 0;                     \\\n+\t\t\tlayer->glow.burst_xstats[qp_id].str##_reset_count =                        \\\n+\t\t\t\tlayer->glow.burst_xstats[qp_id].dequeued_count;                    \\\n+\t\t}                                                                                  \\\n+\t} while (0)\n+\n+#define ML_MIN_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, str)                                     \\\n+\tdo {                                                                                       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++)          \\\n+\t\t\tlayer->glow.burst_xstats[qp_id].str##_latency_min = UINT64_MAX;            \\\n+\t} while (0)\n+\n+#define ML_MAX_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, str)                                     \\\n+\tdo {                                                                                       \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++)          \\\n+\t\t\tlayer->glow.burst_xstats[qp_id].str##_latency_max = 0;                     \\\n+\t} while (0)\n+\n+static void\n+cnxk_ml_reset_model_stat(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id,\n+\t\t\t enum cnxk_ml_xstats_type type)\n+{\n+\tstruct cnxk_ml_model *model;\n+\tstruct cnxk_ml_layer *layer;\n+\tuint16_t layer_id = 0;\n+\tuint32_t qp_id;\n+\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\tlayer = &model->layer[layer_id];\n+\n+\tswitch (type) {\n+\tcase avg_hw_latency:\n+\t\tML_AVG_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, hw);\n+\t\tbreak;\n+\tcase min_hw_latency:\n+\t\tML_MIN_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, hw);\n+\t\tbreak;\n+\tcase max_hw_latency:\n+\t\tML_MAX_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, hw);\n+\t\tbreak;\n+\tcase avg_fw_latency:\n+\t\tML_AVG_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, fw);\n+\t\tbreak;\n+\tcase min_fw_latency:\n+\t\tML_MIN_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, fw);\n+\t\tbreak;\n+\tcase max_fw_latency:\n+\t\tML_MAX_RESET_FOREACH_QP(cnxk_mldev, layer, qp_id, fw);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn;\n+\t}\n+}\n+\n+static int\n+cnxk_ml_model_xstats_reset(struct cnxk_ml_dev *cnxk_mldev, int32_t model_id,\n+\t\t\t   const uint16_t stat_ids[], uint16_t nb_ids)\n+{\n+\tstruct cnxk_ml_xstats_entry *xs;\n+\tstruct cnxk_ml_model *model;\n+\tint32_t lcl_model_id = 0;\n+\tuint16_t layer_id = 0;\n+\tuint16_t start_id;\n+\tuint16_t end_id;\n+\tint32_t i;\n+\tint32_t j;\n+\n+\tfor (i = 0; i < ML_CNXK_MAX_MODELS; i++) {\n+\t\tif (model_id == -1) {\n+\t\t\tmodel = cnxk_mldev->mldev->data->models[i];\n+\t\t\tif (model == NULL) /* skip inactive models */\n+\t\t\t\tcontinue;\n+\t\t} else {\n+\t\t\tif (model_id != i)\n+\t\t\t\tcontinue;\n+\n+\t\t\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\t\t\tif (model == NULL) {\n+\t\t\t\tplt_err(\"Invalid model_id = %d\\n\", model_id);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t}\n+\n+\t\tstart_id = cnxk_mldev->xstats.offset_for_layer[i][layer_id];\n+\t\tend_id = cnxk_mldev->xstats.offset_for_layer[i][layer_id] +\n+\t\t\t cnxk_mldev->xstats.count_per_layer[i][layer_id] - 1;\n+\n+\t\tif (stat_ids == NULL) {\n+\t\t\tfor (j = start_id; j <= end_id; j++) {\n+\t\t\t\txs = &cnxk_mldev->xstats.entries[j];\n+\t\t\t\tcnxk_ml_reset_model_stat(cnxk_mldev, i, xs->type);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tfor (j = 0; j < nb_ids; j++) {\n+\t\t\t\tif (stat_ids[j] < start_id || stat_ids[j] > end_id) {\n+\t\t\t\t\tplt_err(\"Invalid stat_ids[%d] = %d for model_id = %d\\n\", j,\n+\t\t\t\t\t\tstat_ids[j], lcl_model_id);\n+\t\t\t\t\treturn -EINVAL;\n+\t\t\t\t}\n+\t\t\t\txs = &cnxk_mldev->xstats.entries[stat_ids[j]];\n+\t\t\t\tcnxk_ml_reset_model_stat(cnxk_mldev, i, xs->type);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n static int\n cnxk_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)\n {\n@@ -294,6 +573,13 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co\n \tfor (i = 0; i < cnxk_mldev->max_nb_layers; i++)\n \t\tcnxk_mldev->index_map[i].active = false;\n \n+\t/* Initialize xstats */\n+\tret = cnxk_ml_xstats_init(cnxk_mldev);\n+\tif (ret != 0) {\n+\t\tplt_err(\"Failed to initialize xstats\");\n+\t\tgoto error;\n+\t}\n+\n \tcnxk_mldev->nb_models_loaded = 0;\n \tcnxk_mldev->nb_models_started = 0;\n \tcnxk_mldev->nb_models_stopped = 0;\n@@ -323,6 +609,9 @@ cnxk_ml_dev_close(struct rte_ml_dev *dev)\n \n \tcnxk_mldev = dev->data->dev_private;\n \n+\t/* Un-initialize xstats */\n+\tcnxk_ml_xstats_uninit(cnxk_mldev);\n+\n \tif (cn10k_ml_dev_close(cnxk_mldev) != 0)\n \t\tplt_err(\"Failed to close CN10K ML Device\");\n \n@@ -521,6 +810,190 @@ cnxk_ml_dev_stats_reset(struct rte_ml_dev *dev)\n \t}\n }\n \n+static int\n+cnxk_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n+\t\t\t     int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,\n+\t\t\t     uint32_t size)\n+{\n+\tstruct cnxk_ml_xstats_entry *xs;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\tuint32_t xstats_mode_count;\n+\tuint16_t layer_id = 0;\n+\tuint32_t idx = 0;\n+\tuint32_t i;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\txstats_mode_count = 0;\n+\n+\tswitch (mode) {\n+\tcase RTE_ML_DEV_XSTATS_DEVICE:\n+\t\txstats_mode_count = cnxk_mldev->xstats.count_mode_device;\n+\t\tbreak;\n+\tcase RTE_ML_DEV_XSTATS_MODEL:\n+\t\tif (model_id >= ML_CNXK_MAX_MODELS)\n+\t\t\tbreak;\n+\t\txstats_mode_count = cnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t};\n+\n+\tif (xstats_mode_count > size || xstats_map == NULL)\n+\t\treturn xstats_mode_count;\n+\n+\tfor (i = 0; i < cnxk_mldev->xstats.count && idx < size; i++) {\n+\t\txs = &cnxk_mldev->xstats.entries[i];\n+\t\tif (xs->mode != mode)\n+\t\t\tcontinue;\n+\n+\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL &&\n+\t\t    (model_id != xs->obj_idx || layer_id != xs->layer_id))\n+\t\t\tcontinue;\n+\n+\t\tstrncpy(xstats_map[idx].name, xs->map.name, RTE_ML_STR_MAX);\n+\t\txstats_map[idx].id = xs->map.id;\n+\t\tidx++;\n+\t}\n+\n+\treturn idx;\n+}\n+\n+static int\n+cnxk_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16_t *stat_id,\n+\t\t\t       uint64_t *value)\n+{\n+\tstruct cnxk_ml_xstats_entry *xs;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\tcnxk_ml_xstats_fn fn;\n+\tuint32_t i;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\n+\tfor (i = 0; i < cnxk_mldev->xstats.count; i++) {\n+\t\txs = &cnxk_mldev->xstats.entries[i];\n+\t\tif (strncmp(xs->map.name, name, RTE_ML_STR_MAX) == 0) {\n+\t\t\tif (stat_id != NULL)\n+\t\t\t\t*stat_id = xs->map.id;\n+\n+\t\t\tswitch (xs->fn_id) {\n+\t\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n+\t\t\t\tfn = cnxk_ml_dev_xstat_get;\n+\t\t\t\tbreak;\n+\t\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n+\t\t\t\tfn = cnxk_ml_model_xstat_get;\n+\t\t\t\tbreak;\n+\t\t\tdefault:\n+\t\t\t\tplt_err(\"Unexpected xstat fn_id = %d\", xs->fn_id);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\t*value = fn(cnxk_mldev, xs->obj_idx, xs->layer_id, xs->type) -\n+\t\t\t\t xs->reset_value;\n+\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\tif (stat_id != NULL)\n+\t\t*stat_id = (uint16_t)-1;\n+\n+\treturn -EINVAL;\n+}\n+\n+static int\n+cnxk_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,\n+\t\t       const uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)\n+{\n+\tstruct cnxk_ml_xstats_entry *xs;\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\tuint32_t xstats_mode_count;\n+\tuint16_t layer_id = 0;\n+\tcnxk_ml_xstats_fn fn;\n+\tuint64_t val;\n+\tuint32_t idx;\n+\tuint32_t i;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\txstats_mode_count = 0;\n+\n+\tswitch (mode) {\n+\tcase RTE_ML_DEV_XSTATS_DEVICE:\n+\t\txstats_mode_count = cnxk_mldev->xstats.count_mode_device;\n+\t\tbreak;\n+\tcase RTE_ML_DEV_XSTATS_MODEL:\n+\t\tif (model_id >= ML_CNXK_MAX_MODELS)\n+\t\t\treturn -EINVAL;\n+\t\txstats_mode_count = cnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t};\n+\n+\tidx = 0;\n+\tfor (i = 0; i < nb_ids && idx < xstats_mode_count; i++) {\n+\t\txs = &cnxk_mldev->xstats.entries[stat_ids[i]];\n+\t\tif (stat_ids[i] > cnxk_mldev->xstats.count || xs->mode != mode)\n+\t\t\tcontinue;\n+\n+\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL &&\n+\t\t    (model_id != xs->obj_idx || layer_id != xs->layer_id)) {\n+\t\t\tplt_err(\"Invalid stats_id[%d] = %d for model_id = %d\\n\", i, stat_ids[i],\n+\t\t\t\tmodel_id);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tswitch (xs->fn_id) {\n+\t\tcase CNXK_ML_XSTATS_FN_DEVICE:\n+\t\t\tfn = cnxk_ml_dev_xstat_get;\n+\t\t\tbreak;\n+\t\tcase CNXK_ML_XSTATS_FN_MODEL:\n+\t\t\tfn = cnxk_ml_model_xstat_get;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tplt_err(\"Unexpected xstat fn_id = %d\", xs->fn_id);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tval = fn(cnxk_mldev, xs->obj_idx, xs->layer_id, xs->type);\n+\t\tif (values)\n+\t\t\tvalues[idx] = val;\n+\n+\t\tidx++;\n+\t}\n+\n+\treturn idx;\n+}\n+\n+static int\n+cnxk_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode, int32_t model_id,\n+\t\t\t const uint16_t stat_ids[], uint16_t nb_ids)\n+{\n+\tstruct cnxk_ml_dev *cnxk_mldev;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tcnxk_mldev = dev->data->dev_private;\n+\n+\tswitch (mode) {\n+\tcase RTE_ML_DEV_XSTATS_DEVICE:\n+\t\treturn cnxk_ml_device_xstats_reset(cnxk_mldev, stat_ids, nb_ids);\n+\tcase RTE_ML_DEV_XSTATS_MODEL:\n+\t\treturn cnxk_ml_model_xstats_reset(cnxk_mldev, model_id, stat_ids, nb_ids);\n+\t};\n+\n+\treturn 0;\n+}\n+\n static int\n cnxk_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params, uint16_t *model_id)\n {\n@@ -806,10 +1279,10 @@ struct rte_ml_dev_ops cnxk_ml_ops = {\n \t/* Stats ops */\n \t.dev_stats_get = cnxk_ml_dev_stats_get,\n \t.dev_stats_reset = cnxk_ml_dev_stats_reset,\n-\t.dev_xstats_names_get = cn10k_ml_dev_xstats_names_get,\n-\t.dev_xstats_by_name_get = cn10k_ml_dev_xstats_by_name_get,\n-\t.dev_xstats_get = cn10k_ml_dev_xstats_get,\n-\t.dev_xstats_reset = cn10k_ml_dev_xstats_reset,\n+\t.dev_xstats_names_get = cnxk_ml_dev_xstats_names_get,\n+\t.dev_xstats_by_name_get = cnxk_ml_dev_xstats_by_name_get,\n+\t.dev_xstats_get = cnxk_ml_dev_xstats_get,\n+\t.dev_xstats_reset = cnxk_ml_dev_xstats_reset,\n \n \t/* Model ops */\n \t.model_load = cnxk_ml_model_load,\ndiff --git a/drivers/ml/cnxk/cnxk_ml_xstats.h b/drivers/ml/cnxk/cnxk_ml_xstats.h\nindex 0d405679ca..5e02bb876c 100644\n--- a/drivers/ml/cnxk/cnxk_ml_xstats.h\n+++ b/drivers/ml/cnxk/cnxk_ml_xstats.h\n@@ -7,6 +7,8 @@\n \n #include \"cnxk_ml_io.h\"\n \n+struct cnxk_ml_dev;\n+\n /* Extended stats types enum */\n enum cnxk_ml_xstats_type {\n \t/* Number of models loaded */\n@@ -58,9 +60,21 @@ enum cnxk_ml_xstats_fn_type {\n \tCNXK_ML_XSTATS_FN_MODEL,\n };\n \n+/* Extended stats group */\n+enum cnxk_ml_xstats_group {\n+\t/* Device stats */\n+\tCNXK_ML_XSTATS_GROUP_DEVICE,\n+\n+\t/* Model stats */\n+\tCNXK_ML_XSTATS_GROUP_MODEL,\n+\n+\t/* Layer stats */\n+\tCNXK_ML_XSTATS_GROUP_LAYER,\n+};\n+\n /* Function pointer to get xstats for a type */\n-typedef uint64_t (*cnxk_ml_xstats_fn)(struct rte_ml_dev *cnxk_mldev, uint16_t obj_idx,\n-\t\t\t\t      enum cnxk_ml_xstats_type stat);\n+typedef uint64_t (*cnxk_ml_xstats_fn)(struct cnxk_ml_dev *cnxk_mldev, uint16_t obj_idx,\n+\t\t\t\t      int32_t layer_id, enum cnxk_ml_xstats_type stat);\n \n /* Extended stats entry structure */\n struct cnxk_ml_xstats_entry {\n@@ -70,6 +84,9 @@ struct cnxk_ml_xstats_entry {\n \t/* xstats mode, device or model */\n \tenum rte_ml_dev_xstats_mode mode;\n \n+\t/* xstats group */\n+\tenum cnxk_ml_xstats_group group;\n+\n \t/* Type of xstats */\n \tenum cnxk_ml_xstats_type type;\n \n",
    "prefixes": [
        "v3",
        "15/35"
    ]
}