get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/126426/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 126426,
    "url": "http://patches.dpdk.org/api/patches/126426/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230423050814.825-3-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230423050814.825-3-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230423050814.825-3-syalavarthi@marvell.com",
    "date": "2023-04-23T05:08:13",
    "name": "[v1,2/3] ml/cnxk: update model metadata to v2301",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "b102da0903acd08a1cd3cf4eb1da08b02bcdaf7d",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230423050814.825-3-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 27829,
            "url": "http://patches.dpdk.org/api/series/27829/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=27829",
            "date": "2023-04-23T05:08:11",
            "name": "Add support for 32 I/O per model",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/27829/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/126426/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/126426/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 42DB9429BA;\n\tSun, 23 Apr 2023 07:08:33 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E66C140156;\n\tSun, 23 Apr 2023 07:08:23 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 9E18540A80\n for <dev@dpdk.org>; Sun, 23 Apr 2023 07:08:22 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 33N4iPB6009043 for <dev@dpdk.org>; Sat, 22 Apr 2023 22:08:22 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3q4f3p225y-2\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sat, 22 Apr 2023 22:08:21 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Sat, 22 Apr 2023 22:08:19 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Sat, 22 Apr 2023 22:08:19 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id C4D403F704D;\n Sat, 22 Apr 2023 22:08:19 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=P8RtKTKy6O5Do/m3yKoK5dVHcSdviHgsqrHl2Y2U1T8=;\n b=OcTN4U3Uk960GHR09e/uI7rsy2SoWzV3mQMKGiRJ8rfanRpKea4Jj3eqRx1B0FYlAnch\n +LPI0HlvtfjTMGviluolZBtbrjgQjZ8EzCiJAa/dKSUpd5qVLJZx9rgQXSJmEGkkB2Qf\n dY2X2COovs3YQXs8MHlkKQfttQWHyrqHg7o/hZc9qg+PZohbXkU+F6lKb38NYUCRWEGX\n 9b/WbnOHAikqR0IbrOXL7XZt+s4rbDlANbO6CHZVZ37TS8Ktu3A3Yjofo+J13mto1Kq+\n /QXNym6gsxaocdEx7b57813H7/UJ77cBs//SB+Vgur3Vme87Eher0h/50lQDKuWcnhv8 5A==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v1 2/3] ml/cnxk: update model metadata to v2301",
        "Date": "Sat, 22 Apr 2023 22:08:13 -0700",
        "Message-ID": "<20230423050814.825-3-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20230423050814.825-1-syalavarthi@marvell.com>",
        "References": "<20230423050814.825-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "52hnG146c6F1iP38_Buy8XaS0a_ZkdGE",
        "X-Proofpoint-GUID": "52hnG146c6F1iP38_Buy8XaS0a_ZkdGE",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.942,Hydra:6.0.573,FMLib:17.11.170.22\n definitions=2023-04-23_02,2023-04-21_01,2023-02-09_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Update model metadata to v2301. Revised metadata introduces\nfields to support up to 32 inputs/outputs per model, scratch\nrelocation and updates to names of existing fields. Update\ndriver files to include changes in names of metadata fields.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_model.c | 111 ++++++++++++++++---------------\n drivers/ml/cnxk/cn10k_ml_model.h |  36 +++++++---\n drivers/ml/cnxk/cn10k_ml_ops.c   |  50 +++++++-------\n 3 files changed, 106 insertions(+), 91 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c\nindex c0b7b061f5..a15df700aa 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.c\n+++ b/drivers/ml/cnxk/cn10k_ml_model.c\n@@ -83,11 +83,11 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \n \t/* Header version */\n \trte_memcpy(version, metadata->header.version, 4 * sizeof(uint8_t));\n-\tif (version[0] * 1000 + version[1] * 100 < MRVL_ML_MODEL_VERSION) {\n+\tif (version[0] * 1000 + version[1] * 100 != MRVL_ML_MODEL_VERSION_MIN) {\n \t\tplt_err(\"Metadata version = %u.%u.%u.%u (< %u.%u.%u.%u) not supported\", version[0],\n-\t\t\tversion[1], version[2], version[3], (MRVL_ML_MODEL_VERSION / 1000) % 10,\n-\t\t\t(MRVL_ML_MODEL_VERSION / 100) % 10, (MRVL_ML_MODEL_VERSION / 10) % 10,\n-\t\t\tMRVL_ML_MODEL_VERSION % 10);\n+\t\t\tversion[1], version[2], version[3], (MRVL_ML_MODEL_VERSION_MIN / 1000) % 10,\n+\t\t\t(MRVL_ML_MODEL_VERSION_MIN / 100) % 10,\n+\t\t\t(MRVL_ML_MODEL_VERSION_MIN / 10) % 10, MRVL_ML_MODEL_VERSION_MIN % 10);\n \t\treturn -ENOTSUP;\n \t}\n \n@@ -125,36 +125,36 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \t}\n \n \t/* Check input count */\n-\tif (metadata->model.num_input > MRVL_ML_INPUT_OUTPUT_SIZE) {\n+\tif (metadata->model.num_input > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n \t\tplt_err(\"Invalid metadata, num_input  = %u (> %u)\", metadata->model.num_input,\n-\t\t\tMRVL_ML_INPUT_OUTPUT_SIZE);\n+\t\t\tMRVL_ML_NUM_INPUT_OUTPUT_1);\n \t\treturn -EINVAL;\n \t}\n \n \t/* Check output count */\n-\tif (metadata->model.num_output > MRVL_ML_INPUT_OUTPUT_SIZE) {\n+\tif (metadata->model.num_output > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n \t\tplt_err(\"Invalid metadata, num_output  = %u (> %u)\", metadata->model.num_output,\n-\t\t\tMRVL_ML_INPUT_OUTPUT_SIZE);\n+\t\t\tMRVL_ML_NUM_INPUT_OUTPUT_1);\n \t\treturn -EINVAL;\n \t}\n \n \t/* Inputs */\n \tfor (i = 0; i < metadata->model.num_input; i++) {\n-\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(metadata->input[i].input_type)) <=\n+\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(metadata->input1[i].input_type)) <=\n \t\t    0) {\n \t\t\tplt_err(\"Invalid metadata, input[%u] : input_type = %u\", i,\n-\t\t\t\tmetadata->input[i].input_type);\n+\t\t\t\tmetadata->input1[i].input_type);\n \t\t\treturn -EINVAL;\n \t\t}\n \n \t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->input[i].model_input_type)) <= 0) {\n+\t\t\t    cn10k_ml_io_type_map(metadata->input1[i].model_input_type)) <= 0) {\n \t\t\tplt_err(\"Invalid metadata, input[%u] : model_input_type = %u\", i,\n-\t\t\t\tmetadata->input[i].model_input_type);\n+\t\t\t\tmetadata->input1[i].model_input_type);\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tif (metadata->input[i].relocatable != 1) {\n+\t\tif (metadata->input1[i].relocatable != 1) {\n \t\t\tplt_err(\"Model not supported, non-relocatable input: %u\", i);\n \t\t\treturn -ENOTSUP;\n \t\t}\n@@ -163,20 +163,20 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \t/* Outputs */\n \tfor (i = 0; i < metadata->model.num_output; i++) {\n \t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->output[i].output_type)) <= 0) {\n+\t\t\t    cn10k_ml_io_type_map(metadata->output1[i].output_type)) <= 0) {\n \t\t\tplt_err(\"Invalid metadata, output[%u] : output_type = %u\", i,\n-\t\t\t\tmetadata->output[i].output_type);\n+\t\t\t\tmetadata->output1[i].output_type);\n \t\t\treturn -EINVAL;\n \t\t}\n \n \t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->output[i].model_output_type)) <= 0) {\n+\t\t\t    cn10k_ml_io_type_map(metadata->output1[i].model_output_type)) <= 0) {\n \t\t\tplt_err(\"Invalid metadata, output[%u] : model_output_type = %u\", i,\n-\t\t\t\tmetadata->output[i].model_output_type);\n+\t\t\t\tmetadata->output1[i].model_output_type);\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tif (metadata->output[i].relocatable != 1) {\n+\t\tif (metadata->output1[i].relocatable != 1) {\n \t\t\tplt_err(\"Model not supported, non-relocatable output: %u\", i);\n \t\t\treturn -ENOTSUP;\n \t\t}\n@@ -191,28 +191,29 @@ cn10k_ml_model_metadata_update(struct cn10k_ml_model_metadata *metadata)\n \tuint8_t i;\n \n \tfor (i = 0; i < metadata->model.num_input; i++) {\n-\t\tmetadata->input[i].input_type = cn10k_ml_io_type_map(metadata->input[i].input_type);\n-\t\tmetadata->input[i].model_input_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->input[i].model_input_type);\n+\t\tmetadata->input1[i].input_type =\n+\t\t\tcn10k_ml_io_type_map(metadata->input1[i].input_type);\n+\t\tmetadata->input1[i].model_input_type =\n+\t\t\tcn10k_ml_io_type_map(metadata->input1[i].model_input_type);\n \n-\t\tif (metadata->input[i].shape.w == 0)\n-\t\t\tmetadata->input[i].shape.w = 1;\n+\t\tif (metadata->input1[i].shape.w == 0)\n+\t\t\tmetadata->input1[i].shape.w = 1;\n \n-\t\tif (metadata->input[i].shape.x == 0)\n-\t\t\tmetadata->input[i].shape.x = 1;\n+\t\tif (metadata->input1[i].shape.x == 0)\n+\t\t\tmetadata->input1[i].shape.x = 1;\n \n-\t\tif (metadata->input[i].shape.y == 0)\n-\t\t\tmetadata->input[i].shape.y = 1;\n+\t\tif (metadata->input1[i].shape.y == 0)\n+\t\t\tmetadata->input1[i].shape.y = 1;\n \n-\t\tif (metadata->input[i].shape.z == 0)\n-\t\t\tmetadata->input[i].shape.z = 1;\n+\t\tif (metadata->input1[i].shape.z == 0)\n+\t\t\tmetadata->input1[i].shape.z = 1;\n \t}\n \n \tfor (i = 0; i < metadata->model.num_output; i++) {\n-\t\tmetadata->output[i].output_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->output[i].output_type);\n-\t\tmetadata->output[i].model_output_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->output[i].model_output_type);\n+\t\tmetadata->output1[i].output_type =\n+\t\t\tcn10k_ml_io_type_map(metadata->output1[i].output_type);\n+\t\tmetadata->output1[i].model_output_type =\n+\t\t\tcn10k_ml_io_type_map(metadata->output1[i].model_output_type);\n \t}\n }\n \n@@ -272,31 +273,31 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_\n \taddr->total_input_sz_q = 0;\n \tfor (i = 0; i < metadata->model.num_input; i++) {\n \t\taddr->input[i].nb_elements =\n-\t\t\tmetadata->input[i].shape.w * metadata->input[i].shape.x *\n-\t\t\tmetadata->input[i].shape.y * metadata->input[i].shape.z;\n+\t\t\tmetadata->input1[i].shape.w * metadata->input1[i].shape.x *\n+\t\t\tmetadata->input1[i].shape.y * metadata->input1[i].shape.z;\n \t\taddr->input[i].sz_d = addr->input[i].nb_elements *\n-\t\t\t\t      rte_ml_io_type_size_get(metadata->input[i].input_type);\n+\t\t\t\t      rte_ml_io_type_size_get(metadata->input1[i].input_type);\n \t\taddr->input[i].sz_q = addr->input[i].nb_elements *\n-\t\t\t\t      rte_ml_io_type_size_get(metadata->input[i].model_input_type);\n+\t\t\t\t      rte_ml_io_type_size_get(metadata->input1[i].model_input_type);\n \t\taddr->total_input_sz_d += addr->input[i].sz_d;\n \t\taddr->total_input_sz_q += addr->input[i].sz_q;\n \n \t\tplt_ml_dbg(\"model_id = %u, input[%u] - w:%u x:%u y:%u z:%u, sz_d = %u sz_q = %u\",\n-\t\t\t   model->model_id, i, metadata->input[i].shape.w,\n-\t\t\t   metadata->input[i].shape.x, metadata->input[i].shape.y,\n-\t\t\t   metadata->input[i].shape.z, addr->input[i].sz_d, addr->input[i].sz_q);\n+\t\t\t   model->model_id, i, metadata->input1[i].shape.w,\n+\t\t\t   metadata->input1[i].shape.x, metadata->input1[i].shape.y,\n+\t\t\t   metadata->input1[i].shape.z, addr->input[i].sz_d, addr->input[i].sz_q);\n \t}\n \n \t/* Outputs */\n \taddr->total_output_sz_q = 0;\n \taddr->total_output_sz_d = 0;\n \tfor (i = 0; i < metadata->model.num_output; i++) {\n-\t\taddr->output[i].nb_elements = metadata->output[i].size;\n+\t\taddr->output[i].nb_elements = metadata->output1[i].size;\n \t\taddr->output[i].sz_d = addr->output[i].nb_elements *\n-\t\t\t\t       rte_ml_io_type_size_get(metadata->output[i].output_type);\n+\t\t\t\t       rte_ml_io_type_size_get(metadata->output1[i].output_type);\n \t\taddr->output[i].sz_q =\n \t\t\taddr->output[i].nb_elements *\n-\t\t\trte_ml_io_type_size_get(metadata->output[i].model_output_type);\n+\t\t\trte_ml_io_type_size_get(metadata->output1[i].model_output_type);\n \t\taddr->total_output_sz_q += addr->output[i].sz_q;\n \t\taddr->total_output_sz_d += addr->output[i].sz_d;\n \n@@ -388,24 +389,24 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)\n \n \t/* Set input info */\n \tfor (i = 0; i < info->nb_inputs; i++) {\n-\t\trte_memcpy(input[i].name, metadata->input[i].input_name, MRVL_ML_INPUT_NAME_LEN);\n-\t\tinput[i].dtype = metadata->input[i].input_type;\n-\t\tinput[i].qtype = metadata->input[i].model_input_type;\n-\t\tinput[i].shape.format = metadata->input[i].shape.format;\n-\t\tinput[i].shape.w = metadata->input[i].shape.w;\n-\t\tinput[i].shape.x = metadata->input[i].shape.x;\n-\t\tinput[i].shape.y = metadata->input[i].shape.y;\n-\t\tinput[i].shape.z = metadata->input[i].shape.z;\n+\t\trte_memcpy(input[i].name, metadata->input1[i].input_name, MRVL_ML_INPUT_NAME_LEN);\n+\t\tinput[i].dtype = metadata->input1[i].input_type;\n+\t\tinput[i].qtype = metadata->input1[i].model_input_type;\n+\t\tinput[i].shape.format = metadata->input1[i].shape.format;\n+\t\tinput[i].shape.w = metadata->input1[i].shape.w;\n+\t\tinput[i].shape.x = metadata->input1[i].shape.x;\n+\t\tinput[i].shape.y = metadata->input1[i].shape.y;\n+\t\tinput[i].shape.z = metadata->input1[i].shape.z;\n \t}\n \n \t/* Set output info */\n \tfor (i = 0; i < info->nb_outputs; i++) {\n-\t\trte_memcpy(output[i].name, metadata->output[i].output_name,\n+\t\trte_memcpy(output[i].name, metadata->output1[i].output_name,\n \t\t\t   MRVL_ML_OUTPUT_NAME_LEN);\n-\t\toutput[i].dtype = metadata->output[i].output_type;\n-\t\toutput[i].qtype = metadata->output[i].model_output_type;\n+\t\toutput[i].dtype = metadata->output1[i].output_type;\n+\t\toutput[i].qtype = metadata->output1[i].model_output_type;\n \t\toutput[i].shape.format = RTE_ML_IO_FORMAT_1D;\n-\t\toutput[i].shape.w = metadata->output[i].size;\n+\t\toutput[i].shape.w = metadata->output1[i].size;\n \t\toutput[i].shape.x = 1;\n \t\toutput[i].shape.y = 1;\n \t\toutput[i].shape.z = 1;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h\nindex b30ad5a981..bd863a8c12 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.h\n+++ b/drivers/ml/cnxk/cn10k_ml_model.h\n@@ -21,14 +21,15 @@ enum cn10k_ml_model_state {\n \tML_CN10K_MODEL_STATE_UNKNOWN,\n };\n \n-/* Model Metadata : v 2.1.0.2 */\n+/* Model Metadata : v 2.3.0.1 */\n #define MRVL_ML_MODEL_MAGIC_STRING \"MRVL\"\n #define MRVL_ML_MODEL_TARGET_ARCH  128\n-#define MRVL_ML_MODEL_VERSION\t   2100\n+#define MRVL_ML_MODEL_VERSION_MIN  2100\n #define MRVL_ML_MODEL_NAME_LEN\t   64\n #define MRVL_ML_INPUT_NAME_LEN\t   16\n #define MRVL_ML_OUTPUT_NAME_LEN\t   16\n-#define MRVL_ML_INPUT_OUTPUT_SIZE  8\n+#define MRVL_ML_NUM_INPUT_OUTPUT_1 8\n+#define MRVL_ML_NUM_INPUT_OUTPUT_2 24\n \n /* Header (256-byte) */\n struct cn10k_ml_model_metadata_header {\n@@ -101,10 +102,10 @@ struct cn10k_ml_model_metadata_model {\n \t/* Inference batch size */\n \tuint8_t batch_size;\n \n-\t/* Number of input tensors (Max 8) */\n+\t/* Number of input tensors (Max 32) */\n \tuint8_t num_input;\n \n-\t/* Number of output tensors (Max 8) */\n+\t/* Number of output tensors (Max 32) */\n \tuint8_t num_output;\n \tuint8_t reserved_1;\n \n@@ -159,7 +160,14 @@ struct cn10k_ml_model_metadata_model {\n \t * 1 - Yes\n \t */\n \tuint8_t supports_lower_batch_size_optimization;\n-\tuint8_t reserved_3[59];\n+\tuint8_t reserved_3[3];\n+\n+\t/* Relative DDR start address of scratch space */\n+\tuint64_t ddr_scratch_range_start;\n+\n+\t/* Relative DDR end address of scratch space */\n+\tuint64_t ddr_scratch_range_end;\n+\tuint8_t reserved_4[40];\n };\n \n /* Init section (64-byte) */\n@@ -303,7 +311,7 @@ struct cn10k_ml_model_metadata_output_section {\n \n /* Model data */\n struct cn10k_ml_model_metadata_data_section {\n-\tuint8_t reserved[4068];\n+\tuint8_t reserved[996];\n \n \t/* Beta: xx.xx.xx.xx,\n \t * Later: YYYYMM.xx.xx\n@@ -337,13 +345,19 @@ struct cn10k_ml_model_metadata {\n \tstruct cn10k_ml_model_metadata_weights_bias_section weights_bias;\n \n \t/* Input (512-bytes, 64-byte per input) provisioned for 8 inputs */\n-\tstruct cn10k_ml_model_metadata_input_section input[MRVL_ML_INPUT_OUTPUT_SIZE];\n+\tstruct cn10k_ml_model_metadata_input_section input1[MRVL_ML_NUM_INPUT_OUTPUT_1];\n \n \t/* Output (512-bytes, 64-byte per output) provisioned for 8 outputs */\n-\tstruct cn10k_ml_model_metadata_output_section output[MRVL_ML_INPUT_OUTPUT_SIZE];\n+\tstruct cn10k_ml_model_metadata_output_section output1[MRVL_ML_NUM_INPUT_OUTPUT_1];\n \n \tuint8_t reserved_2[1792];\n \n+\t/* Input (1536-bytes, 64-byte per input) provisioned for 24 inputs */\n+\tstruct cn10k_ml_model_metadata_input_section input2[MRVL_ML_NUM_INPUT_OUTPUT_2];\n+\n+\t/* Output (1536-bytes, 64-byte per output) provisioned for 24 outputs */\n+\tstruct cn10k_ml_model_metadata_output_section output2[MRVL_ML_NUM_INPUT_OUTPUT_2];\n+\n \t/* Model data */\n \tstruct cn10k_ml_model_metadata_data_section data;\n \n@@ -399,7 +413,7 @@ struct cn10k_ml_model_addr {\n \n \t\t/* Quantized input size */\n \t\tuint32_t sz_q;\n-\t} input[MRVL_ML_INPUT_OUTPUT_SIZE];\n+\t} input[MRVL_ML_NUM_INPUT_OUTPUT_1];\n \n \t/* Output address and size */\n \tstruct {\n@@ -411,7 +425,7 @@ struct cn10k_ml_model_addr {\n \n \t\t/* Quantized output size */\n \t\tuint32_t sz_q;\n-\t} output[MRVL_ML_INPUT_OUTPUT_SIZE];\n+\t} output[MRVL_ML_NUM_INPUT_OUTPUT_1];\n \n \t/* Total size of quantized input */\n \tuint32_t total_input_sz_q;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex b5eaa24e83..aecc6e74ad 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -325,13 +325,13 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \tprint_line(fp, LINE_LEN);\n \tfor (i = 0; i < model->metadata.model.num_input; i++) {\n \t\tfprintf(fp, \"%8u  \", i);\n-\t\tfprintf(fp, \"%*s  \", 16, model->metadata.input[i].input_name);\n-\t\trte_ml_io_type_to_str(model->metadata.input[i].input_type, str, STR_LEN);\n+\t\tfprintf(fp, \"%*s  \", 16, model->metadata.input1[i].input_name);\n+\t\trte_ml_io_type_to_str(model->metadata.input1[i].input_type, str, STR_LEN);\n \t\tfprintf(fp, \"%*s  \", 12, str);\n-\t\trte_ml_io_type_to_str(model->metadata.input[i].model_input_type, str, STR_LEN);\n+\t\trte_ml_io_type_to_str(model->metadata.input1[i].model_input_type, str, STR_LEN);\n \t\tfprintf(fp, \"%*s  \", 18, str);\n-\t\tfprintf(fp, \"%*s\", 12, (model->metadata.input[i].quantize == 1 ? \"Yes\" : \"No\"));\n-\t\trte_ml_io_format_to_str(model->metadata.input[i].shape.format, str, STR_LEN);\n+\t\tfprintf(fp, \"%*s\", 12, (model->metadata.input1[i].quantize == 1 ? \"Yes\" : \"No\"));\n+\t\trte_ml_io_format_to_str(model->metadata.input1[i].shape.format, str, STR_LEN);\n \t\tfprintf(fp, \"%*s\", 16, str);\n \t\tfprintf(fp, \"\\n\");\n \t}\n@@ -343,12 +343,12 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \tprint_line(fp, LINE_LEN);\n \tfor (i = 0; i < model->metadata.model.num_output; i++) {\n \t\tfprintf(fp, \"%8u  \", i);\n-\t\tfprintf(fp, \"%*s  \", 16, model->metadata.output[i].output_name);\n-\t\trte_ml_io_type_to_str(model->metadata.output[i].output_type, str, STR_LEN);\n+\t\tfprintf(fp, \"%*s  \", 16, model->metadata.output1[i].output_name);\n+\t\trte_ml_io_type_to_str(model->metadata.output1[i].output_type, str, STR_LEN);\n \t\tfprintf(fp, \"%*s  \", 12, str);\n-\t\trte_ml_io_type_to_str(model->metadata.output[i].model_output_type, str, STR_LEN);\n+\t\trte_ml_io_type_to_str(model->metadata.output1[i].model_output_type, str, STR_LEN);\n \t\tfprintf(fp, \"%*s  \", 18, str);\n-\t\tfprintf(fp, \"%*s\", 12, (model->metadata.output[i].dequantize == 1 ? \"Yes\" : \"No\"));\n+\t\tfprintf(fp, \"%*s\", 12, (model->metadata.output1[i].dequantize == 1 ? \"Yes\" : \"No\"));\n \t\tfprintf(fp, \"\\n\");\n \t}\n \tfprintf(fp, \"\\n\");\n@@ -1882,28 +1882,28 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc\n \n next_batch:\n \tfor (i = 0; i < model->metadata.model.num_input; i++) {\n-\t\tif (model->metadata.input[i].input_type ==\n-\t\t    model->metadata.input[i].model_input_type) {\n+\t\tif (model->metadata.input1[i].input_type ==\n+\t\t    model->metadata.input1[i].model_input_type) {\n \t\t\trte_memcpy(lcl_qbuffer, lcl_dbuffer, model->addr.input[i].sz_d);\n \t\t} else {\n-\t\t\tswitch (model->metadata.input[i].model_input_type) {\n+\t\t\tswitch (model->metadata.input1[i].model_input_type) {\n \t\t\tcase RTE_ML_IO_TYPE_INT8:\n-\t\t\t\tret = rte_ml_io_float32_to_int8(model->metadata.input[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_int8(model->metadata.input1[i].qscale,\n \t\t\t\t\t\t\t\tmodel->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\tlcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT8:\n-\t\t\t\tret = rte_ml_io_float32_to_uint8(model->metadata.input[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_uint8(model->metadata.input1[i].qscale,\n \t\t\t\t\t\t\t\t model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_INT16:\n-\t\t\t\tret = rte_ml_io_float32_to_int16(model->metadata.input[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_int16(model->metadata.input1[i].qscale,\n \t\t\t\t\t\t\t\t model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT16:\n-\t\t\t\tret = rte_ml_io_float32_to_uint16(model->metadata.input[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_uint16(model->metadata.input1[i].qscale,\n \t\t\t\t\t\t\t\t  model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t  lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n@@ -1913,7 +1913,7 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc\n \t\t\t\tbreak;\n \t\t\tdefault:\n \t\t\t\tplt_err(\"Unsupported model_input_type[%u] : %u\", i,\n-\t\t\t\t\tmodel->metadata.input[i].model_input_type);\n+\t\t\t\t\tmodel->metadata.input1[i].model_input_type);\n \t\t\t\tret = -ENOTSUP;\n \t\t\t}\n \t\t\tif (ret < 0)\n@@ -1955,28 +1955,28 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba\n \n next_batch:\n \tfor (i = 0; i < model->metadata.model.num_output; i++) {\n-\t\tif (model->metadata.output[i].output_type ==\n-\t\t    model->metadata.output[i].model_output_type) {\n+\t\tif (model->metadata.output1[i].output_type ==\n+\t\t    model->metadata.output1[i].model_output_type) {\n \t\t\trte_memcpy(lcl_dbuffer, lcl_qbuffer, model->addr.output[i].sz_q);\n \t\t} else {\n-\t\t\tswitch (model->metadata.output[i].model_output_type) {\n+\t\t\tswitch (model->metadata.output1[i].model_output_type) {\n \t\t\tcase RTE_ML_IO_TYPE_INT8:\n-\t\t\t\tret = rte_ml_io_int8_to_float32(model->metadata.output[i].dscale,\n+\t\t\t\tret = rte_ml_io_int8_to_float32(model->metadata.output1[i].dscale,\n \t\t\t\t\t\t\t\tmodel->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\tlcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT8:\n-\t\t\t\tret = rte_ml_io_uint8_to_float32(model->metadata.output[i].dscale,\n+\t\t\t\tret = rte_ml_io_uint8_to_float32(model->metadata.output1[i].dscale,\n \t\t\t\t\t\t\t\t model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_INT16:\n-\t\t\t\tret = rte_ml_io_int16_to_float32(model->metadata.output[i].dscale,\n+\t\t\t\tret = rte_ml_io_int16_to_float32(model->metadata.output1[i].dscale,\n \t\t\t\t\t\t\t\t model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT16:\n-\t\t\t\tret = rte_ml_io_uint16_to_float32(model->metadata.output[i].dscale,\n+\t\t\t\tret = rte_ml_io_uint16_to_float32(model->metadata.output1[i].dscale,\n \t\t\t\t\t\t\t\t  model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t  lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n@@ -1987,7 +1987,7 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba\n \t\t\t\tbreak;\n \t\t\tdefault:\n \t\t\t\tplt_err(\"Unsupported model_output_type[%u] : %u\", i,\n-\t\t\t\t\tmodel->metadata.output[i].model_output_type);\n+\t\t\t\t\tmodel->metadata.output1[i].model_output_type);\n \t\t\t\tret = -ENOTSUP;\n \t\t\t}\n \t\t\tif (ret < 0)\n",
    "prefixes": [
        "v1",
        "2/3"
    ]
}