get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133411/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133411,
    "url": "http://patches.dpdk.org/api/patches/133411/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-30-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231026124347.22477-30-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231026124347.22477-30-syalavarthi@marvell.com",
    "date": "2023-10-26T12:43:38",
    "name": "[v9,29/34] ml/cnxk: enable reporting model runtime as xstats",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "788df5cd511842c3a8336e329226a70886aa897d",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231026124347.22477-30-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 30002,
            "url": "http://patches.dpdk.org/api/series/30002/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=30002",
            "date": "2023-10-26T12:43:09",
            "name": "Implementation of revised ml/cnxk driver",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/30002/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/133411/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/133411/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CCF4743208;\n\tThu, 26 Oct 2023 14:48:37 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8280E42EA4;\n\tThu, 26 Oct 2023 14:44:27 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id A35FF42E04\n for <dev@dpdk.org>; Thu, 26 Oct 2023 14:44:03 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 39QAKqcd006841 for <dev@dpdk.org>; Thu, 26 Oct 2023 05:44:03 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3txcsr25pj-14\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Thu, 26 Oct 2023 05:44:02 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Thu, 26 Oct 2023 05:44:01 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Thu, 26 Oct 2023 05:44:01 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 3592B5B6938;\n Thu, 26 Oct 2023 05:44:01 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=y+YQG5WuWSmGxR7dZPA84XtcC0zIr0rh33lV48E3QiA=;\n b=aS+brzWsm0GJi7C1B9/dGHFzSORwJ2XXNqvbDi3cNs4BuBjiWApCDiIlKfa15dKtVy9x\n 7d2FWI93aXDRf1n42d712Eo2O0ECpe17oacFo25pB2LQI8zTn6Ld3mQne+3BYLc66kjp\n Rg2JDsn1KJbABKXnH84a+nQe2Aloxvc8NBLdljC8NeWFdzVp9cVtX2tU4FMmt6gMeK0J\n ngmDxh9cUMWc+EZkbV0hHMW3QcucH1Y+umcZ8Br/yaCJC1JzwYwFqqRLNkxRe6hsiOVH\n MfxZGaKN4+E+S1qZX62nHr2I612b2GNjWqeXbgsTywvq+zDP1taHWBGcPLd2rKG9RoCk dg==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v9 29/34] ml/cnxk: enable reporting model runtime as xstats",
        "Date": "Thu, 26 Oct 2023 05:43:38 -0700",
        "Message-ID": "<20231026124347.22477-30-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20231026124347.22477-1-syalavarthi@marvell.com>",
        "References": "<20230830155927.3566-1-syalavarthi@marvell.com>\n <20231026124347.22477-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "C_xgyVzuJYavKNWJ2IriB2WV74uSu5ye",
        "X-Proofpoint-GUID": "C_xgyVzuJYavKNWJ2IriB2WV74uSu5ye",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.987,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-10-26_10,2023-10-26_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added model xstats entries to compute runtime latency.\nAllocated internal resources for TVM model xstats.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_ops.c   |   9 +++\n drivers/ml/cnxk/cn10k_ml_ops.h   |   2 +\n drivers/ml/cnxk/cnxk_ml_ops.c    | 131 +++++++++++++++++++++++++++----\n drivers/ml/cnxk/cnxk_ml_ops.h    |   1 +\n drivers/ml/cnxk/cnxk_ml_xstats.h |   7 ++\n drivers/ml/cnxk/mvtvm_ml_model.h |  24 ++++++\n drivers/ml/cnxk/mvtvm_ml_ops.c   |  96 +++++++++++++++++++++-\n drivers/ml/cnxk/mvtvm_ml_ops.h   |   8 ++\n drivers/ml/cnxk/mvtvm_ml_stubs.c |  23 ++++++\n drivers/ml/cnxk/mvtvm_ml_stubs.h |   6 ++\n 10 files changed, 289 insertions(+), 18 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex 2d308802cf..0c67ce7b40 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -197,6 +197,15 @@ cn10k_ml_xstats_layer_name_update(struct cnxk_ml_dev *cnxk_mldev, uint16_t model\n \t}\n }\n \n+void\n+cn10k_ml_xstat_model_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t      uint16_t stat_id, uint16_t entry, char *suffix)\n+{\n+\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n+\t\t model->glow.metadata.model.name, model_xstats[entry].name, suffix);\n+}\n+\n #define ML_AVG_FOREACH_QP(cnxk_mldev, layer, qp_id, str, value, count)                             \\\n \tdo {                                                                                       \\\n \t\tvalue = 0;                                                                         \\\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.h b/drivers/ml/cnxk/cn10k_ml_ops.h\nindex 3d18303ed3..045e2e6cd2 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.h\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.h\n@@ -331,6 +331,8 @@ int cn10k_ml_layer_start(void *device, uint16_t model_id, const char *layer_name\n int cn10k_ml_layer_stop(void *device, uint16_t model_id, const char *layer_name);\n \n /* xstats ops */\n+void cn10k_ml_xstat_model_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t\t   uint16_t stat_id, uint16_t entry, char *suffix);\n uint64_t cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer,\n \t\t\t\t  enum cnxk_ml_xstats_type type);\n \ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.c b/drivers/ml/cnxk/cnxk_ml_ops.c\nindex c38c60bf76..2632d70d8c 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.c\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.c\n@@ -138,7 +138,8 @@ cnxk_ml_xstats_init(struct cnxk_ml_dev *cnxk_mldev)\n \n \t/* Allocate memory for xstats entries. Don't allocate during reconfigure */\n \tnb_stats = RTE_DIM(device_xstats) +\n-\t\t   RTE_DIM(layer_xstats) * ML_CNXK_MAX_MODELS * ML_CNXK_MODEL_MAX_LAYERS;\n+\t\t   RTE_DIM(layer_xstats) * ML_CNXK_MAX_MODELS * ML_CNXK_MODEL_MAX_LAYERS +\n+\t\t   RTE_DIM(model_xstats) * ML_CNXK_MAX_MODELS;\n \tif (cnxk_mldev->xstats.entries == NULL)\n \t\tcnxk_mldev->xstats.entries = rte_zmalloc(\n \t\t\t\"cnxk_ml_xstats\", sizeof(struct cnxk_ml_xstats_entry) * nb_stats,\n@@ -169,6 +170,25 @@ cnxk_ml_xstats_init(struct cnxk_ml_dev *cnxk_mldev)\n \tfor (model = 0; model < ML_CNXK_MAX_MODELS; model++) {\n \t\tcnxk_mldev->xstats.offset_for_model[model] = stat_id;\n \n+\t\tfor (i = 0; i < RTE_DIM(model_xstats); i++) {\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].map.id = stat_id;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].group = CNXK_ML_XSTATS_GROUP_MODEL;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].type = model_xstats[i].type;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].fn_id = CNXK_ML_XSTATS_FN_MODEL;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].obj_idx = model;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].layer_id = -1;\n+\t\t\tcnxk_mldev->xstats.entries[stat_id].reset_allowed =\n+\t\t\t\tmodel_xstats[i].reset_allowed;\n+\n+\t\t\t/* Name of xstat is updated during model load */\n+\t\t\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name),\n+\t\t\t\t \"Model-%u-%s\", model, model_xstats[i].name);\n+\n+\t\t\tstat_id++;\n+\t\t}\n+\n \t\tfor (layer = 0; layer < ML_CNXK_MODEL_MAX_LAYERS; layer++) {\n \t\t\tcnxk_mldev->xstats.offset_for_layer[model][layer] = stat_id;\n \n@@ -195,7 +215,8 @@ cnxk_ml_xstats_init(struct cnxk_ml_dev *cnxk_mldev)\n \t\t\tcnxk_mldev->xstats.count_per_layer[model][layer] = RTE_DIM(layer_xstats);\n \t\t}\n \n-\t\tcnxk_mldev->xstats.count_per_model[model] = RTE_DIM(layer_xstats);\n+\t\tcnxk_mldev->xstats.count_per_model[model] =\n+\t\t\tRTE_DIM(layer_xstats) + ML_CNXK_MODEL_MAX_LAYERS * RTE_DIM(model_xstats);\n \t}\n \n \tcnxk_mldev->xstats.count_mode_model = stat_id - cnxk_mldev->xstats.count_mode_device;\n@@ -204,6 +225,36 @@ cnxk_ml_xstats_init(struct cnxk_ml_dev *cnxk_mldev)\n \treturn 0;\n }\n \n+void\n+cnxk_ml_xstats_model_name_update(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id)\n+{\n+\tstruct cnxk_ml_model *model;\n+\tuint16_t rclk_freq;\n+\tuint16_t sclk_freq;\n+\tuint16_t stat_id;\n+\tchar suffix[8];\n+\tuint16_t i;\n+\n+\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\tstat_id = cnxk_mldev->xstats.offset_for_model[model_id];\n+\n+\troc_clk_freq_get(&rclk_freq, &sclk_freq);\n+\tif (sclk_freq == 0)\n+\t\trte_strscpy(suffix, \"cycles\", 7);\n+\telse\n+\t\trte_strscpy(suffix, \"ns\", 3);\n+\n+\t/* Update xstat name based on layer name and sclk availability */\n+\tfor (i = 0; i < RTE_DIM(model_xstats); i++) {\n+\t\tif (model->type == ML_CNXK_MODEL_TYPE_GLOW)\n+\t\t\tcn10k_ml_xstat_model_name_set(cnxk_mldev, model, stat_id, i, suffix);\n+\t\telse\n+\t\t\tmvtvm_ml_model_xstat_name_set(cnxk_mldev, model, stat_id, i, suffix);\n+\n+\t\tstat_id++;\n+\t}\n+}\n+\n static void\n cnxk_ml_xstats_uninit(struct cnxk_ml_dev *cnxk_mldev)\n {\n@@ -247,13 +298,22 @@ cnxk_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, uint16_t obj_idx, int32_\n \tif (model == NULL)\n \t\treturn 0;\n \n-\tif (layer_id >= 0)\n+\tif (layer_id >= 0) {\n \t\tlayer = &model->layer[layer_id];\n-\telse\n-\t\treturn 0;\n+\t\tgoto layer_xstats;\n+\t} else {\n+\t\tlayer = NULL;\n+\t\tgoto model_xstats;\n+\t}\n \n+layer_xstats:\n \tvalue = cn10k_ml_model_xstat_get(cnxk_mldev, layer, type);\n+\tgoto exit_xstats;\n \n+model_xstats:\n+\tvalue = mvtvm_ml_model_xstat_get(cnxk_mldev, model, type);\n+\n+exit_xstats:\n \troc_clk_freq_get(&rclk_freq, &sclk_freq);\n \tif (sclk_freq != 0) /* return in ns */\n \t\tvalue = (value * 1000ULL) / sclk_freq;\n@@ -836,8 +896,9 @@ cnxk_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode\n {\n \tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n+\tstruct cnxk_ml_model *model;\n \tuint32_t xstats_mode_count;\n-\tuint16_t layer_id = 0;\n+\tuint16_t layer_id;\n \tuint32_t idx = 0;\n \tuint32_t i;\n \n@@ -854,7 +915,17 @@ cnxk_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode\n \tcase RTE_ML_DEV_XSTATS_MODEL:\n \t\tif (model_id >= ML_CNXK_MAX_MODELS)\n \t\t\tbreak;\n-\t\txstats_mode_count = cnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\n+\t\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\t\tfor (layer_id = 0; layer_id < model->nb_layers; layer_id++) {\n+\t\t\tif (model->layer[layer_id].type == ML_CNXK_LAYER_TYPE_MRVL)\n+\t\t\t\txstats_mode_count +=\n+\t\t\t\t\tcnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\t\t}\n+\n+\t\tif ((model->type == ML_CNXK_MODEL_TYPE_TVM) &&\n+\t\t    (model->subtype != ML_CNXK_MODEL_SUBTYPE_TVM_MRVL))\n+\t\t\txstats_mode_count += RTE_DIM(model_xstats);\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n@@ -868,9 +939,20 @@ cnxk_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode\n \t\tif (xs->mode != mode)\n \t\t\tcontinue;\n \n-\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL &&\n-\t\t    (model_id != xs->obj_idx || layer_id != xs->layer_id))\n-\t\t\tcontinue;\n+\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL) {\n+\t\t\tif (model_id != xs->obj_idx)\n+\t\t\t\tcontinue;\n+\n+\t\t\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\t\t\tif ((model->type == ML_CNXK_MODEL_TYPE_GLOW ||\n+\t\t\t     model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) &&\n+\t\t\t    xs->group == CNXK_ML_XSTATS_GROUP_MODEL)\n+\t\t\t\tcontinue;\n+\n+\t\t\tif (model->type == ML_CNXK_MODEL_TYPE_TVM &&\n+\t\t\t    model->layer[xs->layer_id].type == ML_CNXK_LAYER_TYPE_LLVM)\n+\t\t\t\tcontinue;\n+\t\t}\n \n \t\trte_strscpy(xstats_map[idx].name, xs->map.name, RTE_ML_STR_MAX);\n \t\txstats_map[idx].id = xs->map.id;\n@@ -931,9 +1013,10 @@ cnxk_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n {\n \tstruct cnxk_ml_xstats_entry *xs;\n \tstruct cnxk_ml_dev *cnxk_mldev;\n+\tstruct cnxk_ml_model *model;\n \tuint32_t xstats_mode_count;\n-\tuint16_t layer_id = 0;\n \tcnxk_ml_xstats_fn fn;\n+\tuint16_t layer_id;\n \tuint64_t val;\n \tuint32_t idx;\n \tuint32_t i;\n@@ -951,7 +1034,14 @@ cnxk_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n \tcase RTE_ML_DEV_XSTATS_MODEL:\n \t\tif (model_id >= ML_CNXK_MAX_MODELS)\n \t\t\treturn -EINVAL;\n-\t\txstats_mode_count = cnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\n+\t\tmodel = cnxk_mldev->mldev->data->models[model_id];\n+\t\tfor (layer_id = 0; layer_id < model->nb_layers; layer_id++)\n+\t\t\txstats_mode_count += cnxk_mldev->xstats.count_per_layer[model_id][layer_id];\n+\n+\t\tif ((model->type == ML_CNXK_MODEL_TYPE_TVM) &&\n+\t\t    (model->subtype != ML_CNXK_MODEL_SUBTYPE_TVM_MRVL))\n+\t\t\txstats_mode_count += RTE_DIM(model_xstats);\n \t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n@@ -963,11 +1053,18 @@ cnxk_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode,\n \t\tif (stat_ids[i] > cnxk_mldev->xstats.count || xs->mode != mode)\n \t\t\tcontinue;\n \n-\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL &&\n-\t\t    (model_id != xs->obj_idx || layer_id != xs->layer_id)) {\n-\t\t\tplt_err(\"Invalid stats_id[%d] = %d for model_id = %d\\n\", i, stat_ids[i],\n-\t\t\t\tmodel_id);\n-\t\t\treturn -EINVAL;\n+\t\tif (mode == RTE_ML_DEV_XSTATS_MODEL) {\n+\t\t\tif (model_id != xs->obj_idx)\n+\t\t\t\tcontinue;\n+\n+\t\t\tmodel = cnxk_mldev->mldev->data->models[xs->obj_idx];\n+\t\t\tif ((model->type == ML_CNXK_MODEL_TYPE_GLOW ||\n+\t\t\t     model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) &&\n+\t\t\t    xs->group == CNXK_ML_XSTATS_GROUP_MODEL)\n+\t\t\t\tcontinue;\n+\n+\t\t\tif (xs->layer_id == -1 && xs->group == CNXK_ML_XSTATS_GROUP_LAYER)\n+\t\t\t\tcontinue;\n \t\t}\n \n \t\tswitch (xs->fn_id) {\ndiff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h\nindex b22a2b0d95..ab32676b3e 100644\n--- a/drivers/ml/cnxk/cnxk_ml_ops.h\n+++ b/drivers/ml/cnxk/cnxk_ml_ops.h\n@@ -70,6 +70,7 @@ extern struct rte_ml_dev_ops cnxk_ml_ops;\n \n int cnxk_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id);\n int cnxk_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id);\n+void cnxk_ml_xstats_model_name_update(struct cnxk_ml_dev *cnxk_mldev, uint16_t model_id);\n \n __rte_hot uint16_t cnxk_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id,\n \t\t\t\t\t struct rte_ml_op **ops, uint16_t nb_ops);\ndiff --git a/drivers/ml/cnxk/cnxk_ml_xstats.h b/drivers/ml/cnxk/cnxk_ml_xstats.h\nindex 5e02bb876c..a2c9adfe4a 100644\n--- a/drivers/ml/cnxk/cnxk_ml_xstats.h\n+++ b/drivers/ml/cnxk/cnxk_ml_xstats.h\n@@ -142,4 +142,11 @@ static const struct cnxk_ml_xstat_info layer_xstats[] = {\n \t{\"Min-FW-Latency\", min_fw_latency, 1}, {\"Max-FW-Latency\", max_fw_latency, 1},\n };\n \n+/* Model xstats */\n+static const struct cnxk_ml_xstat_info model_xstats[] = {\n+\t{\"Avg-RT-Latency\", avg_rt_latency, 1},\n+\t{\"Min-RT-Latency\", min_rt_latency, 1},\n+\t{\"Max-RT-Latency\", max_rt_latency, 1},\n+};\n+\n #endif /* _CNXK_ML_XSTATS_H_ */\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_model.h b/drivers/ml/cnxk/mvtvm_ml_model.h\nindex 900ba44fa0..66c3af18e1 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_model.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_model.h\n@@ -33,6 +33,27 @@ struct mvtvm_ml_model_object {\n \tint64_t size;\n };\n \n+/* Model fast-path stats */\n+struct mvtvm_ml_model_xstats {\n+\t/* Total TVM runtime latency, sum of all inferences */\n+\tuint64_t tvm_rt_latency_tot;\n+\n+\t/* TVM runtime latency */\n+\tuint64_t tvm_rt_latency;\n+\n+\t/* Minimum TVM runtime latency */\n+\tuint64_t tvm_rt_latency_min;\n+\n+\t/* Maximum TVM runtime latency */\n+\tuint64_t tvm_rt_latency_max;\n+\n+\t/* Total jobs dequeued */\n+\tuint64_t dequeued_count;\n+\n+\t/* Hardware stats reset index */\n+\tuint64_t tvm_rt_reset_count;\n+};\n+\n struct mvtvm_ml_model_data {\n \t/* Model metadata */\n \tstruct tvmdp_model_metadata metadata;\n@@ -45,6 +66,9 @@ struct mvtvm_ml_model_data {\n \n \t/* Model I/O info */\n \tstruct cnxk_ml_io_info info;\n+\n+\t/* Stats for burst ops */\n+\tstruct mvtvm_ml_model_xstats *burst_xstats;\n };\n \n enum cnxk_ml_model_type mvtvm_ml_model_type_get(struct rte_ml_model_params *params);\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.c b/drivers/ml/cnxk/mvtvm_ml_ops.c\nindex c6872cd89a..abfbae2b3a 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.c\n@@ -10,10 +10,83 @@\n #include \"cnxk_ml_dev.h\"\n #include \"cnxk_ml_model.h\"\n #include \"cnxk_ml_ops.h\"\n+#include \"cnxk_ml_xstats.h\"\n \n /* ML model macros */\n #define MVTVM_ML_MODEL_MEMZONE_NAME \"ml_mvtvm_model_mz\"\n \n+void\n+mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t      uint16_t stat_id, uint16_t entry, char *suffix)\n+{\n+\tsnprintf(cnxk_mldev->xstats.entries[stat_id].map.name,\n+\t\t sizeof(cnxk_mldev->xstats.entries[stat_id].map.name), \"%s-%s-%s\",\n+\t\t model->mvtvm.metadata.model.name, model_xstats[entry].name, suffix);\n+}\n+\n+#define ML_AVG_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count)                            \\\n+\tdo {                                                                                       \\\n+\t\tvalue = 0;                                                                         \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue += model->mvtvm.burst_xstats[qp_id].tvm_rt_latency_tot;              \\\n+\t\t\tcount += model->mvtvm.burst_xstats[qp_id].dequeued_count -                 \\\n+\t\t\t\t model->mvtvm.burst_xstats[qp_id].tvm_rt_reset_count;              \\\n+\t\t}                                                                                  \\\n+\t\tif (count != 0)                                                                    \\\n+\t\t\tvalue = value / count;                                                     \\\n+\t} while (0)\n+\n+#define ML_MIN_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count)                            \\\n+\tdo {                                                                                       \\\n+\t\tvalue = UINT64_MAX;                                                                \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue = PLT_MIN(value,                                                     \\\n+\t\t\t\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency_min);      \\\n+\t\t\tcount += model->mvtvm.burst_xstats[qp_id].dequeued_count -                 \\\n+\t\t\t\t model->mvtvm.burst_xstats[qp_id].tvm_rt_reset_count;              \\\n+\t\t}                                                                                  \\\n+\t\tif (count == 0)                                                                    \\\n+\t\t\tvalue = 0;                                                                 \\\n+\t} while (0)\n+\n+#define ML_MAX_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count)                            \\\n+\tdo {                                                                                       \\\n+\t\tvalue = 0;                                                                         \\\n+\t\tfor (qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {        \\\n+\t\t\tvalue = PLT_MAX(value,                                                     \\\n+\t\t\t\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency_max);      \\\n+\t\t\tcount += model->mvtvm.burst_xstats[qp_id].dequeued_count -                 \\\n+\t\t\t\t model->mvtvm.burst_xstats[qp_id].tvm_rt_reset_count;              \\\n+\t\t}                                                                                  \\\n+\t\tif (count == 0)                                                                    \\\n+\t\t\tvalue = 0;                                                                 \\\n+\t} while (0)\n+\n+uint64_t\n+mvtvm_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t enum cnxk_ml_xstats_type type)\n+{\n+\tuint64_t count = 0;\n+\tuint64_t value = 0;\n+\tuint32_t qp_id;\n+\n+\tswitch (type) {\n+\tcase avg_rt_latency:\n+\t\tML_AVG_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count);\n+\t\tbreak;\n+\tcase min_rt_latency:\n+\t\tML_MIN_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count);\n+\t\tbreak;\n+\tcase max_rt_latency:\n+\t\tML_MAX_FOREACH_QP_MVTVM(cnxk_mldev, model, qp_id, value, count);\n+\t\tbreak;\n+\tdefault:\n+\t\tvalue = 0;\n+\t}\n+\n+\treturn value;\n+}\n+\n int\n mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf)\n {\n@@ -53,6 +126,7 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \tchar str[RTE_MEMZONE_NAMESIZE];\n \tconst struct plt_memzone *mz;\n \tsize_t model_object_size = 0;\n+\tsize_t model_xstats_size = 0;\n \tuint16_t nb_mrvl_layers;\n \tuint16_t nb_llvm_layers;\n \tuint8_t layer_id = 0;\n@@ -68,7 +142,11 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \tmodel_object_size = RTE_ALIGN_CEIL(object[0].size, RTE_CACHE_LINE_MIN_SIZE) +\n \t\t\t    RTE_ALIGN_CEIL(object[1].size, RTE_CACHE_LINE_MIN_SIZE) +\n \t\t\t    RTE_ALIGN_CEIL(object[2].size, RTE_CACHE_LINE_MIN_SIZE);\n-\tmz_size += model_object_size;\n+\n+\tmodel_xstats_size =\n+\t\tcnxk_mldev->mldev->data->nb_queue_pairs * sizeof(struct mvtvm_ml_model_xstats);\n+\n+\tmz_size += model_object_size + model_xstats_size;\n \n \t/* Allocate memzone for model object */\n \tsnprintf(str, RTE_MEMZONE_NAMESIZE, \"%s_%u\", MVTVM_ML_MODEL_MEMZONE_NAME, model->model_id);\n@@ -181,6 +259,22 @@ mvtvm_ml_model_load(struct cnxk_ml_dev *cnxk_mldev, struct rte_ml_model_params *\n \t/* Set model info */\n \tmvtvm_ml_model_info_set(cnxk_mldev, model);\n \n+\t/* Update model xstats name */\n+\tcnxk_ml_xstats_model_name_update(cnxk_mldev, model->model_id);\n+\n+\tmodel->mvtvm.burst_xstats = RTE_PTR_ADD(\n+\t\tmodel->mvtvm.object.params.addr,\n+\t\tRTE_ALIGN_CEIL(model->mvtvm.object.params.size, RTE_CACHE_LINE_MIN_SIZE));\n+\n+\tfor (int qp_id = 0; qp_id < cnxk_mldev->mldev->data->nb_queue_pairs; qp_id++) {\n+\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency_tot = 0;\n+\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency = 0;\n+\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency_min = UINT64_MAX;\n+\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_latency_max = 0;\n+\t\tmodel->mvtvm.burst_xstats[qp_id].tvm_rt_reset_count = 0;\n+\t\tmodel->mvtvm.burst_xstats[qp_id].dequeued_count = 0;\n+\t}\n+\n \treturn 0;\n \n error:\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_ops.h b/drivers/ml/cnxk/mvtvm_ml_ops.h\nindex 55459f9f7f..22e0340146 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_ops.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_ops.h\n@@ -11,8 +11,11 @@\n \n #include <rte_mldev.h>\n \n+#include \"cnxk_ml_xstats.h\"\n+\n struct cnxk_ml_dev;\n struct cnxk_ml_model;\n+struct cnxk_ml_layer;\n \n int mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf);\n int mvtvm_ml_dev_close(struct cnxk_ml_dev *cnxk_mldev);\n@@ -22,4 +25,9 @@ int mvtvm_ml_model_unload(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *\n int mvtvm_ml_model_start(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model);\n int mvtvm_ml_model_stop(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model);\n \n+void mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t\t   uint16_t stat_id, uint16_t entry, char *suffix);\n+uint64_t mvtvm_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t\t  enum cnxk_ml_xstats_type type);\n+\n #endif /* _MVTVM_ML_OPS_H_ */\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_stubs.c b/drivers/ml/cnxk/mvtvm_ml_stubs.c\nindex 260a051b08..19af1d2703 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_stubs.c\n+++ b/drivers/ml/cnxk/mvtvm_ml_stubs.c\n@@ -8,6 +8,7 @@\n \n #include \"cnxk_ml_dev.h\"\n #include \"cnxk_ml_model.h\"\n+#include \"cnxk_ml_xstats.h\"\n \n enum cnxk_ml_model_type\n mvtvm_ml_model_type_get(struct rte_ml_model_params *params)\n@@ -44,6 +45,28 @@ mvtvm_ml_layer_print(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer\n \tRTE_SET_USED(fp);\n }\n \n+void\n+mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t      uint16_t stat_id, uint16_t entry, char *suffix)\n+{\n+\tRTE_SET_USED(cnxk_mldev);\n+\tRTE_SET_USED(model);\n+\tRTE_SET_USED(stat_id);\n+\tRTE_SET_USED(entry);\n+\tRTE_SET_USED(suffix);\n+}\n+\n+uint64_t\n+mvtvm_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t enum cnxk_ml_xstats_type type)\n+{\n+\tRTE_SET_USED(cnxk_mldev);\n+\tRTE_SET_USED(model);\n+\tRTE_SET_USED(type);\n+\n+\treturn 0;\n+}\n+\n int\n mvtvm_ml_dev_configure(struct cnxk_ml_dev *cnxk_mldev, const struct rte_ml_dev_config *conf)\n {\ndiff --git a/drivers/ml/cnxk/mvtvm_ml_stubs.h b/drivers/ml/cnxk/mvtvm_ml_stubs.h\nindex d6d0edbcf1..3fd1f04c35 100644\n--- a/drivers/ml/cnxk/mvtvm_ml_stubs.h\n+++ b/drivers/ml/cnxk/mvtvm_ml_stubs.h\n@@ -7,6 +7,8 @@\n \n #include <rte_mldev.h>\n \n+#include \"cnxk_ml_xstats.h\"\n+\n struct cnxk_ml_dev;\n struct cnxk_ml_model;\n struct cnxk_ml_layer;\n@@ -24,5 +26,9 @@ int mvtvm_ml_model_get_layer_id(struct cnxk_ml_model *model, const char *layer_n\n \t\t\t\tuint16_t *layer_id);\n struct cnxk_ml_io_info *mvtvm_ml_model_io_info_get(struct cnxk_ml_model *model, uint16_t layer_id);\n void mvtvm_ml_layer_print(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer, FILE *fp);\n+void mvtvm_ml_model_xstat_name_set(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t\t   uint16_t stat_id, uint16_t entry, char *suffix);\n+uint64_t mvtvm_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_model *model,\n+\t\t\t\t  enum cnxk_ml_xstats_type type);\n \n #endif /* _MVTVM_ML_STUBS_H_ */\n",
    "prefixes": [
        "v9",
        "29/34"
    ]
}