get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/126427/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 126427,
    "url": "http://patches.dpdk.org/api/patches/126427/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230423050814.825-4-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230423050814.825-4-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230423050814.825-4-syalavarthi@marvell.com",
    "date": "2023-04-23T05:08:14",
    "name": "[v1,3/3] ml/cnxk: add support for 32 I/O per model",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "9bb5c95dad8f8aba1343a04c43bf26cd07c08222",
    "submitter": {
        "id": 2480,
        "url": "http://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230423050814.825-4-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 27829,
            "url": "http://patches.dpdk.org/api/series/27829/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=27829",
            "date": "2023-04-23T05:08:11",
            "name": "Add support for 32 I/O per model",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/27829/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/126427/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/126427/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 24C64429BA;\n\tSun, 23 Apr 2023 07:08:41 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 79D1442D29;\n\tSun, 23 Apr 2023 07:08:27 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 9AD6C42D0E\n for <dev@dpdk.org>; Sun, 23 Apr 2023 07:08:24 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 33N2AvmS020680 for <dev@dpdk.org>; Sat, 22 Apr 2023 22:08:24 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3q4f3p2261-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sat, 22 Apr 2023 22:08:23 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Sat, 22 Apr 2023 22:08:21 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Sat, 22 Apr 2023 22:08:21 -0700",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 126793F704D;\n Sat, 22 Apr 2023 22:08:21 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=G+BPxCmf2UZt32VVZK7PTEYFaAgKg8lFeW6p2kEwDcY=;\n b=agNUA3LOfgG6pvuyUy/7UKuxKBjV34eNlySelTqsthUPjfH71jsyTs4FoKofVpef1Ii0\n tUbJcX2o6iq8Vjne8tP/+A1PmwCswh9mU2L1D9Upi7KeBzw9VFUWCa3d3ZBM8x8f/3+f\n W4oNZoQkYCAP+84USYKo2WCRxer9EjuYdRs1i6nOiSTZ3WSCotEF0rdUN3Vk08f2+KlS\n JJBuEpOthFPCw6NRBlypargnl/4kxILKJcGeQ7T/cvTs7zxXh+zk3EOmt5kWbgsh6PDt\n xSKJTibl0sV512UFBZC/RAMPUxhxNa6lRPUMFO8TqWT5t41yueoZPHtSqjjVgCDQAWJe gQ==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "CC": "<dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH v1 3/3] ml/cnxk: add support for 32 I/O per model",
        "Date": "Sat, 22 Apr 2023 22:08:14 -0700",
        "Message-ID": "<20230423050814.825-4-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20230423050814.825-1-syalavarthi@marvell.com>",
        "References": "<20230423050814.825-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "-GvP8x3BiDv6dYg-zDw6paNF71IyufwR",
        "X-Proofpoint-GUID": "-GvP8x3BiDv6dYg-zDw6paNF71IyufwR",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.942,Hydra:6.0.573,FMLib:17.11.170.22\n definitions=2023-04-23_02,2023-04-21_01,2023-02-09_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added support for 32 inputs and outputs per model.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n drivers/ml/cnxk/cn10k_ml_model.c | 374 ++++++++++++++++++++++---------\n drivers/ml/cnxk/cn10k_ml_model.h |   5 +-\n drivers/ml/cnxk/cn10k_ml_ops.c   | 125 ++++++++---\n 3 files changed, 367 insertions(+), 137 deletions(-)",
    "diff": "diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c\nindex a15df700aa..92c47d39ba 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.c\n+++ b/drivers/ml/cnxk/cn10k_ml_model.c\n@@ -41,8 +41,9 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \tstruct cn10k_ml_model_metadata *metadata;\n \tuint32_t payload_crc32c;\n \tuint32_t header_crc32c;\n-\tuint8_t version[4];\n+\tuint32_t version;\n \tuint8_t i;\n+\tuint8_t j;\n \n \tmetadata = (struct cn10k_ml_model_metadata *)buffer;\n \n@@ -82,10 +83,13 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \t}\n \n \t/* Header version */\n-\trte_memcpy(version, metadata->header.version, 4 * sizeof(uint8_t));\n-\tif (version[0] * 1000 + version[1] * 100 != MRVL_ML_MODEL_VERSION_MIN) {\n-\t\tplt_err(\"Metadata version = %u.%u.%u.%u (< %u.%u.%u.%u) not supported\", version[0],\n-\t\t\tversion[1], version[2], version[3], (MRVL_ML_MODEL_VERSION_MIN / 1000) % 10,\n+\tversion = metadata->header.version[0] * 1000 + metadata->header.version[1] * 100 +\n+\t\t  metadata->header.version[2] * 10 + metadata->header.version[3];\n+\tif (version < MRVL_ML_MODEL_VERSION_MIN) {\n+\t\tplt_err(\"Metadata version = %u.%u.%u.%u (< %u.%u.%u.%u) not supported\",\n+\t\t\tmetadata->header.version[0], metadata->header.version[1],\n+\t\t\tmetadata->header.version[2], metadata->header.version[3],\n+\t\t\t(MRVL_ML_MODEL_VERSION_MIN / 1000) % 10,\n \t\t\t(MRVL_ML_MODEL_VERSION_MIN / 100) % 10,\n \t\t\t(MRVL_ML_MODEL_VERSION_MIN / 10) % 10, MRVL_ML_MODEL_VERSION_MIN % 10);\n \t\treturn -ENOTSUP;\n@@ -125,60 +129,119 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size)\n \t}\n \n \t/* Check input count */\n-\tif (metadata->model.num_input > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n-\t\tplt_err(\"Invalid metadata, num_input  = %u (> %u)\", metadata->model.num_input,\n-\t\t\tMRVL_ML_NUM_INPUT_OUTPUT_1);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* Check output count */\n-\tif (metadata->model.num_output > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n-\t\tplt_err(\"Invalid metadata, num_output  = %u (> %u)\", metadata->model.num_output,\n-\t\t\tMRVL_ML_NUM_INPUT_OUTPUT_1);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* Inputs */\n-\tfor (i = 0; i < metadata->model.num_input; i++) {\n-\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(metadata->input1[i].input_type)) <=\n-\t\t    0) {\n-\t\t\tplt_err(\"Invalid metadata, input[%u] : input_type = %u\", i,\n-\t\t\t\tmetadata->input1[i].input_type);\n+\tif (version < 2301) {\n+\t\tif (metadata->model.num_input > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tplt_err(\"Invalid metadata, num_input  = %u (> %u)\",\n+\t\t\t\tmetadata->model.num_input, MRVL_ML_NUM_INPUT_OUTPUT_1);\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->input1[i].model_input_type)) <= 0) {\n-\t\t\tplt_err(\"Invalid metadata, input[%u] : model_input_type = %u\", i,\n-\t\t\t\tmetadata->input1[i].model_input_type);\n+\t\t/* Check output count */\n+\t\tif (metadata->model.num_output > MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tplt_err(\"Invalid metadata, num_output  = %u (> %u)\",\n+\t\t\t\tmetadata->model.num_output, MRVL_ML_NUM_INPUT_OUTPUT_1);\n \t\t\treturn -EINVAL;\n \t\t}\n-\n-\t\tif (metadata->input1[i].relocatable != 1) {\n-\t\t\tplt_err(\"Model not supported, non-relocatable input: %u\", i);\n-\t\t\treturn -ENOTSUP;\n+\t} else {\n+\t\tif (metadata->model.num_input > MRVL_ML_NUM_INPUT_OUTPUT) {\n+\t\t\tplt_err(\"Invalid metadata, num_input  = %u (> %u)\",\n+\t\t\t\tmetadata->model.num_input, MRVL_ML_NUM_INPUT_OUTPUT);\n+\t\t\treturn -EINVAL;\n \t\t}\n-\t}\n \n-\t/* Outputs */\n-\tfor (i = 0; i < metadata->model.num_output; i++) {\n-\t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->output1[i].output_type)) <= 0) {\n-\t\t\tplt_err(\"Invalid metadata, output[%u] : output_type = %u\", i,\n-\t\t\t\tmetadata->output1[i].output_type);\n+\t\t/* Check output count */\n+\t\tif (metadata->model.num_output > MRVL_ML_NUM_INPUT_OUTPUT) {\n+\t\t\tplt_err(\"Invalid metadata, num_output  = %u (> %u)\",\n+\t\t\t\tmetadata->model.num_output, MRVL_ML_NUM_INPUT_OUTPUT);\n \t\t\treturn -EINVAL;\n \t\t}\n+\t}\n \n-\t\tif (rte_ml_io_type_size_get(\n-\t\t\t    cn10k_ml_io_type_map(metadata->output1[i].model_output_type)) <= 0) {\n-\t\t\tplt_err(\"Invalid metadata, output[%u] : model_output_type = %u\", i,\n-\t\t\t\tmetadata->output1[i].model_output_type);\n-\t\t\treturn -EINVAL;\n+\t/* Inputs */\n+\tfor (i = 0; i < metadata->model.num_input; i++) {\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tif (rte_ml_io_type_size_get(\n+\t\t\t\t    cn10k_ml_io_type_map(metadata->input1[i].input_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, input1[%u] : input_type = %u\", i,\n+\t\t\t\t\tmetadata->input1[i].input_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(\n+\t\t\t\t    metadata->input1[i].model_input_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, input1[%u] : model_input_type = %u\", i,\n+\t\t\t\t\tmetadata->input1[i].model_input_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (metadata->input1[i].relocatable != 1) {\n+\t\t\t\tplt_err(\"Model not supported, non-relocatable input1: %u\", i);\n+\t\t\t\treturn -ENOTSUP;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tif (rte_ml_io_type_size_get(\n+\t\t\t\t    cn10k_ml_io_type_map(metadata->input2[j].input_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, input2[%u] : input_type = %u\", j,\n+\t\t\t\t\tmetadata->input2[j].input_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(\n+\t\t\t\t    metadata->input2[j].model_input_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, input2[%u] : model_input_type = %u\", j,\n+\t\t\t\t\tmetadata->input2[j].model_input_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (metadata->input2[j].relocatable != 1) {\n+\t\t\t\tplt_err(\"Model not supported, non-relocatable input2: %u\", j);\n+\t\t\t\treturn -ENOTSUP;\n+\t\t\t}\n \t\t}\n+\t}\n \n-\t\tif (metadata->output1[i].relocatable != 1) {\n-\t\t\tplt_err(\"Model not supported, non-relocatable output: %u\", i);\n-\t\t\treturn -ENOTSUP;\n+\t/* Outputs */\n+\tfor (i = 0; i < metadata->model.num_output; i++) {\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tif (rte_ml_io_type_size_get(\n+\t\t\t\t    cn10k_ml_io_type_map(metadata->output1[i].output_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, output1[%u] : output_type = %u\", i,\n+\t\t\t\t\tmetadata->output1[i].output_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(\n+\t\t\t\t    metadata->output1[i].model_output_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, output1[%u] : model_output_type = %u\", i,\n+\t\t\t\t\tmetadata->output1[i].model_output_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (metadata->output1[i].relocatable != 1) {\n+\t\t\t\tplt_err(\"Model not supported, non-relocatable output1: %u\", i);\n+\t\t\t\treturn -ENOTSUP;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tif (rte_ml_io_type_size_get(\n+\t\t\t\t    cn10k_ml_io_type_map(metadata->output2[j].output_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, output2[%u] : output_type = %u\", j,\n+\t\t\t\t\tmetadata->output2[j].output_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (rte_ml_io_type_size_get(cn10k_ml_io_type_map(\n+\t\t\t\t    metadata->output2[j].model_output_type)) <= 0) {\n+\t\t\t\tplt_err(\"Invalid metadata, output2[%u] : model_output_type = %u\", j,\n+\t\t\t\t\tmetadata->output2[j].model_output_type);\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\n+\t\t\tif (metadata->output2[j].relocatable != 1) {\n+\t\t\t\tplt_err(\"Model not supported, non-relocatable output2: %u\", j);\n+\t\t\t\treturn -ENOTSUP;\n+\t\t\t}\n \t\t}\n \t}\n \n@@ -189,31 +252,60 @@ void\n cn10k_ml_model_metadata_update(struct cn10k_ml_model_metadata *metadata)\n {\n \tuint8_t i;\n+\tuint8_t j;\n \n \tfor (i = 0; i < metadata->model.num_input; i++) {\n-\t\tmetadata->input1[i].input_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->input1[i].input_type);\n-\t\tmetadata->input1[i].model_input_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->input1[i].model_input_type);\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tmetadata->input1[i].input_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->input1[i].input_type);\n+\t\t\tmetadata->input1[i].model_input_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->input1[i].model_input_type);\n+\n+\t\t\tif (metadata->input1[i].shape.w == 0)\n+\t\t\t\tmetadata->input1[i].shape.w = 1;\n+\n+\t\t\tif (metadata->input1[i].shape.x == 0)\n+\t\t\t\tmetadata->input1[i].shape.x = 1;\n+\n+\t\t\tif (metadata->input1[i].shape.y == 0)\n+\t\t\t\tmetadata->input1[i].shape.y = 1;\n \n-\t\tif (metadata->input1[i].shape.w == 0)\n-\t\t\tmetadata->input1[i].shape.w = 1;\n+\t\t\tif (metadata->input1[i].shape.z == 0)\n+\t\t\t\tmetadata->input1[i].shape.z = 1;\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tmetadata->input2[j].input_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->input2[j].input_type);\n+\t\t\tmetadata->input2[j].model_input_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->input2[j].model_input_type);\n \n-\t\tif (metadata->input1[i].shape.x == 0)\n-\t\t\tmetadata->input1[i].shape.x = 1;\n+\t\t\tif (metadata->input2[j].shape.w == 0)\n+\t\t\t\tmetadata->input2[j].shape.w = 1;\n \n-\t\tif (metadata->input1[i].shape.y == 0)\n-\t\t\tmetadata->input1[i].shape.y = 1;\n+\t\t\tif (metadata->input2[j].shape.x == 0)\n+\t\t\t\tmetadata->input2[j].shape.x = 1;\n \n-\t\tif (metadata->input1[i].shape.z == 0)\n-\t\t\tmetadata->input1[i].shape.z = 1;\n+\t\t\tif (metadata->input2[j].shape.y == 0)\n+\t\t\t\tmetadata->input2[j].shape.y = 1;\n+\n+\t\t\tif (metadata->input2[j].shape.z == 0)\n+\t\t\t\tmetadata->input2[j].shape.z = 1;\n+\t\t}\n \t}\n \n \tfor (i = 0; i < metadata->model.num_output; i++) {\n-\t\tmetadata->output1[i].output_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->output1[i].output_type);\n-\t\tmetadata->output1[i].model_output_type =\n-\t\t\tcn10k_ml_io_type_map(metadata->output1[i].model_output_type);\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tmetadata->output1[i].output_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->output1[i].output_type);\n+\t\t\tmetadata->output1[i].model_output_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->output1[i].model_output_type);\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tmetadata->output2[j].output_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->output2[j].output_type);\n+\t\t\tmetadata->output2[j].model_output_type =\n+\t\t\t\tcn10k_ml_io_type_map(metadata->output2[j].model_output_type);\n+\t\t}\n \t}\n }\n \n@@ -226,6 +318,7 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_\n \tuint8_t *dma_addr_load;\n \tuint8_t *dma_addr_run;\n \tuint8_t i;\n+\tuint8_t j;\n \tint fpos;\n \n \tmetadata = &model->metadata;\n@@ -272,37 +365,80 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_\n \taddr->total_input_sz_d = 0;\n \taddr->total_input_sz_q = 0;\n \tfor (i = 0; i < metadata->model.num_input; i++) {\n-\t\taddr->input[i].nb_elements =\n-\t\t\tmetadata->input1[i].shape.w * metadata->input1[i].shape.x *\n-\t\t\tmetadata->input1[i].shape.y * metadata->input1[i].shape.z;\n-\t\taddr->input[i].sz_d = addr->input[i].nb_elements *\n-\t\t\t\t      rte_ml_io_type_size_get(metadata->input1[i].input_type);\n-\t\taddr->input[i].sz_q = addr->input[i].nb_elements *\n-\t\t\t\t      rte_ml_io_type_size_get(metadata->input1[i].model_input_type);\n-\t\taddr->total_input_sz_d += addr->input[i].sz_d;\n-\t\taddr->total_input_sz_q += addr->input[i].sz_q;\n-\n-\t\tplt_ml_dbg(\"model_id = %u, input[%u] - w:%u x:%u y:%u z:%u, sz_d = %u sz_q = %u\",\n-\t\t\t   model->model_id, i, metadata->input1[i].shape.w,\n-\t\t\t   metadata->input1[i].shape.x, metadata->input1[i].shape.y,\n-\t\t\t   metadata->input1[i].shape.z, addr->input[i].sz_d, addr->input[i].sz_q);\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\taddr->input[i].nb_elements =\n+\t\t\t\tmetadata->input1[i].shape.w * metadata->input1[i].shape.x *\n+\t\t\t\tmetadata->input1[i].shape.y * metadata->input1[i].shape.z;\n+\t\t\taddr->input[i].sz_d =\n+\t\t\t\taddr->input[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->input1[i].input_type);\n+\t\t\taddr->input[i].sz_q =\n+\t\t\t\taddr->input[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->input1[i].model_input_type);\n+\t\t\taddr->total_input_sz_d += addr->input[i].sz_d;\n+\t\t\taddr->total_input_sz_q += addr->input[i].sz_q;\n+\n+\t\t\tplt_ml_dbg(\n+\t\t\t\t\"model_id = %u, input[%u] - w:%u x:%u y:%u z:%u, sz_d = %u sz_q = %u\",\n+\t\t\t\tmodel->model_id, i, metadata->input1[i].shape.w,\n+\t\t\t\tmetadata->input1[i].shape.x, metadata->input1[i].shape.y,\n+\t\t\t\tmetadata->input1[i].shape.z, addr->input[i].sz_d,\n+\t\t\t\taddr->input[i].sz_q);\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\taddr->input[i].nb_elements =\n+\t\t\t\tmetadata->input2[j].shape.w * metadata->input2[j].shape.x *\n+\t\t\t\tmetadata->input2[j].shape.y * metadata->input2[j].shape.z;\n+\t\t\taddr->input[i].sz_d =\n+\t\t\t\taddr->input[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->input2[j].input_type);\n+\t\t\taddr->input[i].sz_q =\n+\t\t\t\taddr->input[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->input2[j].model_input_type);\n+\t\t\taddr->total_input_sz_d += addr->input[i].sz_d;\n+\t\t\taddr->total_input_sz_q += addr->input[i].sz_q;\n+\n+\t\t\tplt_ml_dbg(\n+\t\t\t\t\"model_id = %u, input2[%u] - w:%u x:%u y:%u z:%u, sz_d = %u sz_q = %u\",\n+\t\t\t\tmodel->model_id, j, metadata->input2[j].shape.w,\n+\t\t\t\tmetadata->input2[j].shape.x, metadata->input2[j].shape.y,\n+\t\t\t\tmetadata->input2[j].shape.z, addr->input[i].sz_d,\n+\t\t\t\taddr->input[i].sz_q);\n+\t\t}\n \t}\n \n \t/* Outputs */\n \taddr->total_output_sz_q = 0;\n \taddr->total_output_sz_d = 0;\n \tfor (i = 0; i < metadata->model.num_output; i++) {\n-\t\taddr->output[i].nb_elements = metadata->output1[i].size;\n-\t\taddr->output[i].sz_d = addr->output[i].nb_elements *\n-\t\t\t\t       rte_ml_io_type_size_get(metadata->output1[i].output_type);\n-\t\taddr->output[i].sz_q =\n-\t\t\taddr->output[i].nb_elements *\n-\t\t\trte_ml_io_type_size_get(metadata->output1[i].model_output_type);\n-\t\taddr->total_output_sz_q += addr->output[i].sz_q;\n-\t\taddr->total_output_sz_d += addr->output[i].sz_d;\n-\n-\t\tplt_ml_dbg(\"model_id = %u, output[%u] - sz_d = %u, sz_q = %u\", model->model_id, i,\n-\t\t\t   addr->output[i].sz_d, addr->output[i].sz_q);\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\taddr->output[i].nb_elements = metadata->output1[i].size;\n+\t\t\taddr->output[i].sz_d =\n+\t\t\t\taddr->output[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->output1[i].output_type);\n+\t\t\taddr->output[i].sz_q =\n+\t\t\t\taddr->output[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->output1[i].model_output_type);\n+\t\t\taddr->total_output_sz_q += addr->output[i].sz_q;\n+\t\t\taddr->total_output_sz_d += addr->output[i].sz_d;\n+\n+\t\t\tplt_ml_dbg(\"model_id = %u, output[%u] - sz_d = %u, sz_q = %u\",\n+\t\t\t\t   model->model_id, i, addr->output[i].sz_d, addr->output[i].sz_q);\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\taddr->output[i].nb_elements = metadata->output2[j].size;\n+\t\t\taddr->output[i].sz_d =\n+\t\t\t\taddr->output[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->output2[j].output_type);\n+\t\t\taddr->output[i].sz_q =\n+\t\t\t\taddr->output[i].nb_elements *\n+\t\t\t\trte_ml_io_type_size_get(metadata->output2[j].model_output_type);\n+\t\t\taddr->total_output_sz_q += addr->output[i].sz_q;\n+\t\t\taddr->total_output_sz_d += addr->output[i].sz_d;\n+\n+\t\t\tplt_ml_dbg(\"model_id = %u, output2[%u] - sz_d = %u, sz_q = %u\",\n+\t\t\t\t   model->model_id, j, addr->output[i].sz_d, addr->output[i].sz_q);\n+\t\t}\n \t}\n }\n \n@@ -366,6 +502,7 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)\n \tstruct rte_ml_io_info *output;\n \tstruct rte_ml_io_info *input;\n \tuint8_t i;\n+\tuint8_t j;\n \n \tmetadata = &model->metadata;\n \tinfo = PLT_PTR_CAST(model->info);\n@@ -389,26 +526,53 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model)\n \n \t/* Set input info */\n \tfor (i = 0; i < info->nb_inputs; i++) {\n-\t\trte_memcpy(input[i].name, metadata->input1[i].input_name, MRVL_ML_INPUT_NAME_LEN);\n-\t\tinput[i].dtype = metadata->input1[i].input_type;\n-\t\tinput[i].qtype = metadata->input1[i].model_input_type;\n-\t\tinput[i].shape.format = metadata->input1[i].shape.format;\n-\t\tinput[i].shape.w = metadata->input1[i].shape.w;\n-\t\tinput[i].shape.x = metadata->input1[i].shape.x;\n-\t\tinput[i].shape.y = metadata->input1[i].shape.y;\n-\t\tinput[i].shape.z = metadata->input1[i].shape.z;\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\trte_memcpy(input[i].name, metadata->input1[i].input_name,\n+\t\t\t\t   MRVL_ML_INPUT_NAME_LEN);\n+\t\t\tinput[i].dtype = metadata->input1[i].input_type;\n+\t\t\tinput[i].qtype = metadata->input1[i].model_input_type;\n+\t\t\tinput[i].shape.format = metadata->input1[i].shape.format;\n+\t\t\tinput[i].shape.w = metadata->input1[i].shape.w;\n+\t\t\tinput[i].shape.x = metadata->input1[i].shape.x;\n+\t\t\tinput[i].shape.y = metadata->input1[i].shape.y;\n+\t\t\tinput[i].shape.z = metadata->input1[i].shape.z;\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\trte_memcpy(input[i].name, metadata->input2[j].input_name,\n+\t\t\t\t   MRVL_ML_INPUT_NAME_LEN);\n+\t\t\tinput[i].dtype = metadata->input2[j].input_type;\n+\t\t\tinput[i].qtype = metadata->input2[j].model_input_type;\n+\t\t\tinput[i].shape.format = metadata->input2[j].shape.format;\n+\t\t\tinput[i].shape.w = metadata->input2[j].shape.w;\n+\t\t\tinput[i].shape.x = metadata->input2[j].shape.x;\n+\t\t\tinput[i].shape.y = metadata->input2[j].shape.y;\n+\t\t\tinput[i].shape.z = metadata->input2[j].shape.z;\n+\t\t}\n \t}\n \n \t/* Set output info */\n \tfor (i = 0; i < info->nb_outputs; i++) {\n-\t\trte_memcpy(output[i].name, metadata->output1[i].output_name,\n-\t\t\t   MRVL_ML_OUTPUT_NAME_LEN);\n-\t\toutput[i].dtype = metadata->output1[i].output_type;\n-\t\toutput[i].qtype = metadata->output1[i].model_output_type;\n-\t\toutput[i].shape.format = RTE_ML_IO_FORMAT_1D;\n-\t\toutput[i].shape.w = metadata->output1[i].size;\n-\t\toutput[i].shape.x = 1;\n-\t\toutput[i].shape.y = 1;\n-\t\toutput[i].shape.z = 1;\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\trte_memcpy(output[i].name, metadata->output1[i].output_name,\n+\t\t\t\t   MRVL_ML_OUTPUT_NAME_LEN);\n+\t\t\toutput[i].dtype = metadata->output1[i].output_type;\n+\t\t\toutput[i].qtype = metadata->output1[i].model_output_type;\n+\t\t\toutput[i].shape.format = RTE_ML_IO_FORMAT_1D;\n+\t\t\toutput[i].shape.w = metadata->output1[i].size;\n+\t\t\toutput[i].shape.x = 1;\n+\t\t\toutput[i].shape.y = 1;\n+\t\t\toutput[i].shape.z = 1;\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\trte_memcpy(output[i].name, metadata->output2[j].output_name,\n+\t\t\t\t   MRVL_ML_OUTPUT_NAME_LEN);\n+\t\t\toutput[i].dtype = metadata->output2[j].output_type;\n+\t\t\toutput[i].qtype = metadata->output2[j].model_output_type;\n+\t\t\toutput[i].shape.format = RTE_ML_IO_FORMAT_1D;\n+\t\t\toutput[i].shape.w = metadata->output2[j].size;\n+\t\t\toutput[i].shape.x = 1;\n+\t\t\toutput[i].shape.y = 1;\n+\t\t\toutput[i].shape.z = 1;\n+\t\t}\n \t}\n }\ndiff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h\nindex bd863a8c12..5c34e4d747 100644\n--- a/drivers/ml/cnxk/cn10k_ml_model.h\n+++ b/drivers/ml/cnxk/cn10k_ml_model.h\n@@ -30,6 +30,7 @@ enum cn10k_ml_model_state {\n #define MRVL_ML_OUTPUT_NAME_LEN\t   16\n #define MRVL_ML_NUM_INPUT_OUTPUT_1 8\n #define MRVL_ML_NUM_INPUT_OUTPUT_2 24\n+#define MRVL_ML_NUM_INPUT_OUTPUT   (MRVL_ML_NUM_INPUT_OUTPUT_1 + MRVL_ML_NUM_INPUT_OUTPUT_2)\n \n /* Header (256-byte) */\n struct cn10k_ml_model_metadata_header {\n@@ -413,7 +414,7 @@ struct cn10k_ml_model_addr {\n \n \t\t/* Quantized input size */\n \t\tuint32_t sz_q;\n-\t} input[MRVL_ML_NUM_INPUT_OUTPUT_1];\n+\t} input[MRVL_ML_NUM_INPUT_OUTPUT];\n \n \t/* Output address and size */\n \tstruct {\n@@ -425,7 +426,7 @@ struct cn10k_ml_model_addr {\n \n \t\t/* Quantized output size */\n \t\tuint32_t sz_q;\n-\t} output[MRVL_ML_NUM_INPUT_OUTPUT_1];\n+\t} output[MRVL_ML_NUM_INPUT_OUTPUT];\n \n \t/* Total size of quantized input */\n \tuint32_t total_input_sz_q;\ndiff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c\nindex aecc6e74ad..1033afb1b0 100644\n--- a/drivers/ml/cnxk/cn10k_ml_ops.c\n+++ b/drivers/ml/cnxk/cn10k_ml_ops.c\n@@ -269,6 +269,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \tstruct cn10k_ml_ocm *ocm;\n \tchar str[STR_LEN];\n \tuint8_t i;\n+\tuint8_t j;\n \n \tmldev = dev->data->dev_private;\n \tocm = &mldev->ocm;\n@@ -324,16 +325,36 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \t\t\"model_input_type\", \"quantize\", \"format\");\n \tprint_line(fp, LINE_LEN);\n \tfor (i = 0; i < model->metadata.model.num_input; i++) {\n-\t\tfprintf(fp, \"%8u  \", i);\n-\t\tfprintf(fp, \"%*s  \", 16, model->metadata.input1[i].input_name);\n-\t\trte_ml_io_type_to_str(model->metadata.input1[i].input_type, str, STR_LEN);\n-\t\tfprintf(fp, \"%*s  \", 12, str);\n-\t\trte_ml_io_type_to_str(model->metadata.input1[i].model_input_type, str, STR_LEN);\n-\t\tfprintf(fp, \"%*s  \", 18, str);\n-\t\tfprintf(fp, \"%*s\", 12, (model->metadata.input1[i].quantize == 1 ? \"Yes\" : \"No\"));\n-\t\trte_ml_io_format_to_str(model->metadata.input1[i].shape.format, str, STR_LEN);\n-\t\tfprintf(fp, \"%*s\", 16, str);\n-\t\tfprintf(fp, \"\\n\");\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tfprintf(fp, \"%8u  \", i);\n+\t\t\tfprintf(fp, \"%*s  \", 16, model->metadata.input1[i].input_name);\n+\t\t\trte_ml_io_type_to_str(model->metadata.input1[i].input_type, str, STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 12, str);\n+\t\t\trte_ml_io_type_to_str(model->metadata.input1[i].model_input_type, str,\n+\t\t\t\t\t      STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 18, str);\n+\t\t\tfprintf(fp, \"%*s\", 12,\n+\t\t\t\t(model->metadata.input1[i].quantize == 1 ? \"Yes\" : \"No\"));\n+\t\t\trte_ml_io_format_to_str(model->metadata.input1[i].shape.format, str,\n+\t\t\t\t\t\tSTR_LEN);\n+\t\t\tfprintf(fp, \"%*s\", 16, str);\n+\t\t\tfprintf(fp, \"\\n\");\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tfprintf(fp, \"%8u  \", i);\n+\t\t\tfprintf(fp, \"%*s  \", 16, model->metadata.input2[j].input_name);\n+\t\t\trte_ml_io_type_to_str(model->metadata.input2[j].input_type, str, STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 12, str);\n+\t\t\trte_ml_io_type_to_str(model->metadata.input2[j].model_input_type, str,\n+\t\t\t\t\t      STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 18, str);\n+\t\t\tfprintf(fp, \"%*s\", 12,\n+\t\t\t\t(model->metadata.input2[j].quantize == 1 ? \"Yes\" : \"No\"));\n+\t\t\trte_ml_io_format_to_str(model->metadata.input2[j].shape.format, str,\n+\t\t\t\t\t\tSTR_LEN);\n+\t\t\tfprintf(fp, \"%*s\", 16, str);\n+\t\t\tfprintf(fp, \"\\n\");\n+\t\t}\n \t}\n \tfprintf(fp, \"\\n\");\n \n@@ -342,14 +363,30 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)\n \t\t\"model_output_type\", \"dequantize\");\n \tprint_line(fp, LINE_LEN);\n \tfor (i = 0; i < model->metadata.model.num_output; i++) {\n-\t\tfprintf(fp, \"%8u  \", i);\n-\t\tfprintf(fp, \"%*s  \", 16, model->metadata.output1[i].output_name);\n-\t\trte_ml_io_type_to_str(model->metadata.output1[i].output_type, str, STR_LEN);\n-\t\tfprintf(fp, \"%*s  \", 12, str);\n-\t\trte_ml_io_type_to_str(model->metadata.output1[i].model_output_type, str, STR_LEN);\n-\t\tfprintf(fp, \"%*s  \", 18, str);\n-\t\tfprintf(fp, \"%*s\", 12, (model->metadata.output1[i].dequantize == 1 ? \"Yes\" : \"No\"));\n-\t\tfprintf(fp, \"\\n\");\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tfprintf(fp, \"%8u  \", i);\n+\t\t\tfprintf(fp, \"%*s  \", 16, model->metadata.output1[i].output_name);\n+\t\t\trte_ml_io_type_to_str(model->metadata.output1[i].output_type, str, STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 12, str);\n+\t\t\trte_ml_io_type_to_str(model->metadata.output1[i].model_output_type, str,\n+\t\t\t\t\t      STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 18, str);\n+\t\t\tfprintf(fp, \"%*s\", 12,\n+\t\t\t\t(model->metadata.output1[i].dequantize == 1 ? \"Yes\" : \"No\"));\n+\t\t\tfprintf(fp, \"\\n\");\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tfprintf(fp, \"%8u  \", i);\n+\t\t\tfprintf(fp, \"%*s  \", 16, model->metadata.output2[j].output_name);\n+\t\t\trte_ml_io_type_to_str(model->metadata.output2[j].output_type, str, STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 12, str);\n+\t\t\trte_ml_io_type_to_str(model->metadata.output2[j].model_output_type, str,\n+\t\t\t\t\t      STR_LEN);\n+\t\t\tfprintf(fp, \"%*s  \", 18, str);\n+\t\t\tfprintf(fp, \"%*s\", 12,\n+\t\t\t\t(model->metadata.output2[j].dequantize == 1 ? \"Yes\" : \"No\"));\n+\t\t\tfprintf(fp, \"\\n\");\n+\t\t}\n \t}\n \tfprintf(fp, \"\\n\");\n \tprint_line(fp, LINE_LEN);\n@@ -1863,10 +1900,14 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc\n \t\t     void *qbuffer)\n {\n \tstruct cn10k_ml_model *model;\n+\tuint8_t model_input_type;\n \tuint8_t *lcl_dbuffer;\n \tuint8_t *lcl_qbuffer;\n+\tuint8_t input_type;\n \tuint32_t batch_id;\n+\tfloat qscale;\n \tuint32_t i;\n+\tuint32_t j;\n \tint ret;\n \n \tmodel = dev->data->models[model_id];\n@@ -1882,28 +1923,38 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_batc\n \n next_batch:\n \tfor (i = 0; i < model->metadata.model.num_input; i++) {\n-\t\tif (model->metadata.input1[i].input_type ==\n-\t\t    model->metadata.input1[i].model_input_type) {\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\tinput_type = model->metadata.input1[i].input_type;\n+\t\t\tmodel_input_type = model->metadata.input1[i].model_input_type;\n+\t\t\tqscale = model->metadata.input1[i].qscale;\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\tinput_type = model->metadata.input2[j].input_type;\n+\t\t\tmodel_input_type = model->metadata.input2[j].model_input_type;\n+\t\t\tqscale = model->metadata.input2[j].qscale;\n+\t\t}\n+\n+\t\tif (input_type == model_input_type) {\n \t\t\trte_memcpy(lcl_qbuffer, lcl_dbuffer, model->addr.input[i].sz_d);\n \t\t} else {\n \t\t\tswitch (model->metadata.input1[i].model_input_type) {\n \t\t\tcase RTE_ML_IO_TYPE_INT8:\n-\t\t\t\tret = rte_ml_io_float32_to_int8(model->metadata.input1[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_int8(qscale,\n \t\t\t\t\t\t\t\tmodel->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\tlcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT8:\n-\t\t\t\tret = rte_ml_io_float32_to_uint8(model->metadata.input1[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_uint8(qscale,\n \t\t\t\t\t\t\t\t model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_INT16:\n-\t\t\t\tret = rte_ml_io_float32_to_int16(model->metadata.input1[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_int16(qscale,\n \t\t\t\t\t\t\t\t model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT16:\n-\t\t\t\tret = rte_ml_io_float32_to_uint16(model->metadata.input1[i].qscale,\n+\t\t\t\tret = rte_ml_io_float32_to_uint16(qscale,\n \t\t\t\t\t\t\t\t  model->addr.input[i].nb_elements,\n \t\t\t\t\t\t\t\t  lcl_dbuffer, lcl_qbuffer);\n \t\t\t\tbreak;\n@@ -1936,10 +1987,14 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba\n \t\t       void *qbuffer, void *dbuffer)\n {\n \tstruct cn10k_ml_model *model;\n+\tuint8_t model_output_type;\n \tuint8_t *lcl_qbuffer;\n \tuint8_t *lcl_dbuffer;\n+\tuint8_t output_type;\n \tuint32_t batch_id;\n+\tfloat dscale;\n \tuint32_t i;\n+\tuint32_t j;\n \tint ret;\n \n \tmodel = dev->data->models[model_id];\n@@ -1955,28 +2010,38 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, uint16_t nb_ba\n \n next_batch:\n \tfor (i = 0; i < model->metadata.model.num_output; i++) {\n-\t\tif (model->metadata.output1[i].output_type ==\n-\t\t    model->metadata.output1[i].model_output_type) {\n+\t\tif (i < MRVL_ML_NUM_INPUT_OUTPUT_1) {\n+\t\t\toutput_type = model->metadata.output1[i].output_type;\n+\t\t\tmodel_output_type = model->metadata.output1[i].model_output_type;\n+\t\t\tdscale = model->metadata.output1[i].dscale;\n+\t\t} else {\n+\t\t\tj = i - MRVL_ML_NUM_INPUT_OUTPUT_1;\n+\t\t\toutput_type = model->metadata.output2[j].output_type;\n+\t\t\tmodel_output_type = model->metadata.output2[j].model_output_type;\n+\t\t\tdscale = model->metadata.output2[j].dscale;\n+\t\t}\n+\n+\t\tif (output_type == model_output_type) {\n \t\t\trte_memcpy(lcl_dbuffer, lcl_qbuffer, model->addr.output[i].sz_q);\n \t\t} else {\n \t\t\tswitch (model->metadata.output1[i].model_output_type) {\n \t\t\tcase RTE_ML_IO_TYPE_INT8:\n-\t\t\t\tret = rte_ml_io_int8_to_float32(model->metadata.output1[i].dscale,\n+\t\t\t\tret = rte_ml_io_int8_to_float32(dscale,\n \t\t\t\t\t\t\t\tmodel->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\tlcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT8:\n-\t\t\t\tret = rte_ml_io_uint8_to_float32(model->metadata.output1[i].dscale,\n+\t\t\t\tret = rte_ml_io_uint8_to_float32(dscale,\n \t\t\t\t\t\t\t\t model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_INT16:\n-\t\t\t\tret = rte_ml_io_int16_to_float32(model->metadata.output1[i].dscale,\n+\t\t\t\tret = rte_ml_io_int16_to_float32(dscale,\n \t\t\t\t\t\t\t\t model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n \t\t\tcase RTE_ML_IO_TYPE_UINT16:\n-\t\t\t\tret = rte_ml_io_uint16_to_float32(model->metadata.output1[i].dscale,\n+\t\t\t\tret = rte_ml_io_uint16_to_float32(dscale,\n \t\t\t\t\t\t\t\t  model->addr.output[i].nb_elements,\n \t\t\t\t\t\t\t\t  lcl_qbuffer, lcl_dbuffer);\n \t\t\t\tbreak;\n",
    "prefixes": [
        "v1",
        "3/3"
    ]
}