Show a cover letter.

GET /api/covers/123256/?format=api
HTTP 200 OK
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 123256,
    "url": "https://patches.dpdk.org/api/covers/123256/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/cover/20230207151316.835441-1-jerinj@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230207151316.835441-1-jerinj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230207151316.835441-1-jerinj@marvell.com",
    "date": "2023-02-07T15:13:04",
    "name": "[v3,00/12] mldev: introduce machine learning device library",
    "submitter": {
        "id": 1188,
        "url": "https://patches.dpdk.org/api/people/1188/?format=api",
        "name": "Jerin Jacob Kollanukkaran",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/cover/20230207151316.835441-1-jerinj@marvell.com/mbox/",
    "series": [
        {
            "id": 26852,
            "url": "https://patches.dpdk.org/api/series/26852/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=26852",
            "date": "2023-02-07T15:13:04",
            "name": "mldev: introduce machine learning device library",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/26852/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/covers/123256/comments/",
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DE48C41C30;\n\tTue,  7 Feb 2023 16:13:33 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BB20C40E6E;\n\tTue,  7 Feb 2023 16:13:33 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 5650F4021F\n for <dev@dpdk.org>; Tue,  7 Feb 2023 16:13:32 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 317BJhJY017823; Tue, 7 Feb 2023 07:13:31 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3nhqrtme2d-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Tue, 07 Feb 2023 07:13:31 -0800",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.42;\n Tue, 7 Feb 2023 07:13:29 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.42 via Frontend\n Transport; Tue, 7 Feb 2023 07:13:29 -0800",
            "from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14])\n by maili.marvell.com (Postfix) with ESMTP id B6F433F7082;\n Tue,  7 Feb 2023 07:13:26 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=HvspDDMQm8+Eh39sV+9E8GhCl/mVbos64Zz+N6Y42H4=;\n b=gee2iJD/T6kpXUeU1gBkyXbjkA0cCAcAQ0h0bcIJO8nvH4ztFdaemQemEr66nbx9PRp/\n o5FyGUHp7FbZ9WsHO2ifsB37X1LzMVKPIEC5lxxGqWas+KXfI9t5oBgAgGkOMx/48EzL\n VPqWql36PKbzEkqP3eSUxLZWQlUsYErk+XsdG4K7JuGJO8psqmIQ3q1gm7ROXR6DOCgK\n ZnPKM9dpqUz7Krf2LUVHpbI76Gkj90Xppk2TIls/zNeED5VjvZcXBJRk4oPFRduuIfxs\n PSToYDYi5DyvnBLmcAIC1coVRJCK96jLA9ZqwE4RNb37BE/79wpJ6M1KooYlT9r22Uru +Q==",
        "From": "<jerinj@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<thomas@monjalon.net>, <ferruh.yigit@xilinx.com>,\n <stephen@networkplumber.org>, <dchickles@marvell.com>,\n <sshankarnara@marvell.com>, Jerin Jacob <jerinj@marvell.com>",
        "Subject": "[dpdk-dev] [PATCH v3 00/12] mldev: introduce machine learning device\n library",
        "Date": "Tue, 7 Feb 2023 20:43:04 +0530",
        "Message-ID": "<20230207151316.835441-1-jerinj@marvell.com>",
        "X-Mailer": "git-send-email 2.39.1",
        "In-Reply-To": "<20230206202453.336280-1-jerinj@marvell.com>",
        "References": "<20230206202453.336280-1-jerinj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "HtNZeqXv7joGZckOyX3woi-qpWqsvlLS",
        "X-Proofpoint-ORIG-GUID": "HtNZeqXv7joGZckOyX3woi-qpWqsvlLS",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.219,Aquarius:18.0.930,Hydra:6.0.562,FMLib:17.11.122.1\n definitions=2023-02-07_07,2023-02-06_03,2022-06-22_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Jerin Jacob <jerinj@marvell.com>\n\nMachine learning inference library\n==================================\n\nDefinition of machine learning inference\n----------------------------------------\nInference in machine learning is the process of making an output prediction\nbased on new input data using a pre-trained machine learning model.\n\nThe scope of the RFC would include only inferencing with pre-trained machine learning models,\ntraining and building/compiling the ML models is out of scope for this RFC or\nDPDK mldev API. Use existing machine learning compiler frameworks for model creation.\n\nMotivation for the new library\n------------------------------\nMultiple semiconductor vendors are offering accelerator products such as DPU\n(often called Smart-NIC), FPGA, GPU, etc., which have ML inferencing capabilities\nintegrated as part of the product. Use of ML inferencing is increasing in the domain\nof packet processing for flow classification, intrusion, malware and anomaly detection.\n\nLack of inferencing support through DPDK APIs will involve complexities and\nincreased latency from moving data across frameworks (i.e, dataplane to\nnon dataplane ML frameworks and vice-versa). Having a standardized DPDK APIs for ML\ninferencing would enable the dataplane solutions to harness the benefit of inline\ninferencing supported by the hardware.\n\nContents\n---------------\nA) API specification for:\n\n1) Discovery of ML capabilities (e.g., device specific features) in a vendor\nindependent fashion\n2) Definition of functions to handle ML devices, which includes probing,\ninitialization and termination of the devices.\n3) Definition of functions to handle ML models used to perform inference operations.\n4) Definition of function to handle quantize and dequantize operations\n\nB) Common code for above specification\n\nrfc..v1:\n- Added programmer guide documentation\n- Added implementation for common code\n\nv2..v1:\n- Moved dynamic log (Stephen)\n- model id to uint16_t from int16t_t (Stephen)\n- added release note updates\n\nv3..v2:\n- Introduced rte_ml_dev_init() similar to rte_gpu_init() (Stephen, Thomas)\n- In struct rte_ml_dev_data, removed reserved[3] and   __rte_cache_aligned.\nAlso, moved name field to the end(Stephen)\n \nMachine learning library framework\n----------------------------------\n\nThe ML framework is built on the following model:\n\n\n    +-----------------+               rte_ml_[en|de]queue_burst()\n    |                 |                          |\n    |     Machine     o------+     +--------+    |\n    |     Learning    |      |     | queue  |    |    +------+\n    |     Inference   o------+-----o        |<===o===>|Core 0|\n    |     Engine      |      |     | pair 0 |         +------+\n    |                 o----+ |     +--------+\n    |                 |    | |\n    +-----------------+    | |     +--------+\n             ^             | |     | queue  |         +------+\n             |             | +-----o        |<=======>|Core 1|\n             |             |       | pair 1 |         +------+\n             |             |       +--------+\n    +--------+--------+    |\n    | +-------------+ |    |       +--------+\n    | |   Model 0   | |    |       | queue  |         +------+\n    | +-------------+ |    +-------o        |<=======>|Core N|\n    | +-------------+ |            | pair N |         +------+\n    | |   Model 1   | |            +--------+\n    | +-------------+ |\n    | +-------------+ |<------- rte_ml_model_load()\n    | |   Model ..  | |-------> rte_ml_model_info()\n    | +-------------+ |<------- rte_ml_model_start()\n    | +-------------+ |<------- rte_ml_model_stop()\n    | |   Model N   | |<------- rte_ml_model_params_update()\n    | +-------------+ |<------- rte_ml_model_unload()\n    +-----------------+\n\nML Device: A hardware or software-based implementation of ML device API for\nrunning inferences using a pre-trained ML model.\n\nML Model: An ML model is an algorithm trained over a dataset. A model consists of\nprocedure/algorithm and data/pattern required to make predictions on live data.\nOnce the model is created and trained outside of the DPDK scope, the model can be loaded\nvia rte_ml_model_load() and then start it using rte_ml_model_start() API.\nThe rte_ml_model_params_update() can be used to update the model parameters such as weight\nand bias without unloading the model using rte_ml_model_unload().\n\nML Inference: ML inference is the process of feeding data to the model via\nrte_ml_enqueue_burst() API and use rte_ml_dequeue_burst() API to get the calculated\noutputs/predictions from the started model.\n\nIn all functions of the ML device API, the ML device is designated by an\ninteger >= 0 named as device identifier *dev_id*.\n\nThe functions exported by the ML device API to setup a device designated by\nits device identifier must be invoked in the following order:\n\n     - rte_ml_dev_configure()\n     - rte_ml_dev_queue_pair_setup()\n     - rte_ml_dev_start()\n\nA model is required to run the inference operations with the user specified inputs.\nApplication needs to invoke the ML model API in the following order before queueing\ninference jobs.\n\n     - rte_ml_model_load()\n     - rte_ml_model_start()\n\nThe rte_ml_model_info() API is provided to retrieve the information related to the model.\nThe information would include the shape and type of input and output required for the inference.\n\nData quantization and dequantization is one of the main aspects in ML domain. This involves\nconversion of input data from a higher precision to a lower precision data type and vice-versa\nfor the output. APIs are provided to enable quantization through rte_ml_io_quantize() and\ndequantization through rte_ml_io_dequantize(). These APIs have the capability to handle input\nand output buffers holding data for multiple batches.\nTwo utility APIs rte_ml_io_input_size_get() and rte_ml_io_output_size_get() can used to get the\nsize of quantized and de-quantized multi-batch input and output buffers.\n\nUser can optionally update the model parameters with rte_ml_model_params_update() after\ninvoking rte_ml_model_stop() API on a given model ID.\n\nThe application can invoke, in any order, the functions exported by the ML API to enqueue\ninference jobs and dequeue inference response.\n\nIf the application wants to change the device configuration (i.e., call\nrte_ml_dev_configure() or rte_ml_dev_queue_pair_setup()), then application must stop the\ndevice using rte_ml_dev_stop() API. Likewise, if model parameters need to be updated then\nthe application must call rte_ml_model_stop() followed by rte_ml_model_params_update() API\nfor the given model. The application does not need to call rte_ml_dev_stop() API for\nany model re-configuration such as rte_ml_model_params_update(), rte_ml_model_unload() etc.\n\nOnce the device is in the start state after invoking rte_ml_dev_start() API and the model is in\nstart state after invoking rte_ml_model_start() API, then the application can call\nrte_ml_enqueue() and rte_ml_dequeue() API on the destined device and model ID.\n\nFinally, an application can close an ML device by invoking the rte_ml_dev_close() function.\n\nTypical application utilisation of the ML API will follow the following\nprogramming flow.\n\n- rte_ml_dev_configure()\n- rte_ml_dev_queue_pair_setup()\n- rte_ml_model_load()\n- rte_ml_model_start()\n- rte_ml_model_info()\n- rte_ml_dev_start()\n- rte_ml_enqueue_burst()\n- rte_ml_dequeue_burst()\n- rte_ml_model_stop()\n- rte_ml_model_unload()\n- rte_ml_dev_stop()\n- rte_ml_dev_close()\n\nRegarding multi-threading, by default, all the functions of the ML Device API exported by a PMD\nare lock-free functions which assume to not be invoked in parallel on different logical cores\non the same target object. For instance, the dequeue function of a poll mode driver cannot be\ninvoked in parallel on two logical cores to operate on same queue pair. Of course, this function\ncan be invoked in parallel by different logical core on different queue pair.\nIt is the responsibility of the user application to enforce this rule.\n\nExample application usage for ML inferencing\n--------------------------------------------\nThis example application is to demonstrate the programming model of ML device\nlibrary. This example omits the error checks to simplify the application. This\nexample also assumes that the input data received is quantized and output expected\nis also quantized. In order to handle non-quantized inputs and outputs, users can\ninvoke rte_ml_io_quantize() or rte_ml_io_dequantize() for data type conversions.\n\n#define ML_MODEL_NAME \"model\"\n#define IO_MZ \"io_mz\"\n\nstruct app_ctx {\n\tchar model_file[PATH_MAX];\n\tchar inp_file[PATH_MAX];\n\tchar out_file[PATH_MAX];\n\n\tstruct rte_ml_model_params params;\n\tstruct rte_ml_model_info info;\n\tuint16_t id;\n\n\tuint64_t input_size;\n\tuint64_t output_size;\n\tuint8_t *input_buffer;\n\tuint8_t *output_buffer;\n} __rte_cache_aligned;\n\nstruct app_ctx ctx;\n\nstatic int\nparse_args(int argc, char **argv)\n{\n\tint opt, option_index;\n\tstatic struct option lgopts[] = {{\"model\", required_argument, NULL, 'm'},\n\t\t\t\t\t {\"input\", required_argument, NULL, 'i'},\n\t\t\t\t\t {\"output\", required_argument, NULL, 'o'},\n\t\t\t\t\t {NULL, 0, NULL, 0}};\n\n\twhile ((opt = getopt_long(argc, argv, \"m:i:o:\", lgopts, &option_index)) != EOF)\n\t\tswitch (opt) {\n\t\tcase 'm':\n\t\t\tstrncpy(ctx.model_file, optarg, PATH_MAX - 1);\n\t\t\tbreak;\n\t\tcase 'i':\n\t\t\tstrncpy(ctx.inp_file, optarg, PATH_MAX - 1);\n\t\t\tbreak;\n\t\tcase 'o':\n\t\t\tstrncpy(ctx.out_file, optarg, PATH_MAX - 1);\n\t\t\tbreak;\n\t\tdefault:\n\t\t\treturn -1;\n\t\t}\n\n\treturn 0;\n}\n\nint\nmain(int argc, char **argv)\n{\n\tstruct rte_ml_dev_qp_conf qp_conf;\n\tstruct rte_ml_dev_config config;\n\tstruct rte_ml_dev_info dev_info;\n\tconst struct rte_memzone *mz;\n\tstruct rte_mempool *op_pool;\n\tstruct rte_ml_op *op_enq;\n\tstruct rte_ml_op *op_deq;\n\n\tFILE *fp;\n\tint rc;\n\n\t/* Initialize EAL */\n\trc = rte_eal_init(argc, argv);\n\tif (rc < 0)\n\t\trte_exit(EXIT_FAILURE, \"Invalid EAL arguments\\n\");\n\targc -= rc;\n\targv += rc;\n\n\t/* Parse application arguments (after the EAL args) */\n\tif (parse_args(argc, argv) < 0)\n\t\trte_exit(EXIT_FAILURE, \"Invalid application arguments\\n\");\n\n\t/* Step 1: Check for ML devices */\n\tif (rte_ml_dev_count() <= 0)\n\t\trte_exit(EXIT_FAILURE, \"Failed to find ML devices\\n\");\n\n\t/* Step 2: Get device info */\n\tif (rte_ml_dev_info_get(0, &dev_info) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Failed to get device info\\n\");\n\n\t/* Step 3: Configure ML device, use device 0 */\n\tconfig.socket_id = rte_ml_dev_socket_id(0);\n\tconfig.max_nb_models = dev_info.max_models;\n\tconfig.nb_queue_pairs = dev_info.max_queue_pairs;\n\tif (rte_ml_dev_configure(0, &config) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Device configuration failed\\n\");\n\n\t/* Step 4: Setup queue pairs, used qp_id = 0 */\n\tqp_conf.nb_desc = 1;\n\tif (rte_ml_dev_queue_pair_setup(0, 0, &qp_conf, config.socket_id) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Queue-pair setup failed\\n\");\n\n\t/* Step 5: Start device */\n\tif (rte_ml_dev_start(0) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Device start failed\\n\");\n\n\t/* Step 6: Read model data and update load params structure */\n\tfp = fopen(ctx.model_file, \"r+\");\n\tif (fp == NULL)\n\t\trte_exit(EXIT_FAILURE, \"Failed to open model file\\n\");\n\n\tfseek(fp, 0, SEEK_END);\n\tctx.params.size = ftell(fp);\n\tfseek(fp, 0, SEEK_SET);\n\n\tctx.params.addr = malloc(ctx.params.size);\n\tif (fread(ctx.params.addr, 1, ctx.params.size, fp) != ctx.params.size){\n\t\tfclose(fp);\n\t\trte_exit(EXIT_FAILURE, \"Failed to read model\\n\");\n\t}\n\tfclose(fp);\n\tstrcpy(ctx.params.name, ML_MODEL_NAME);\n\n\t/* Step 7: Load the model */\n\tif (rte_ml_model_load(0, &ctx.params, &ctx.id) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Failed to load model\\n\");\n\tfree(ctx.params.addr);\n\n\t/* Step 8: Start the model */\n\tif (rte_ml_model_start(0, ctx.id) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Failed to start model\\n\");\n\n\t/* Step 9: Allocate buffers for quantized input and output */\n\n\t/* Get model information */\n\tif (rte_ml_model_info_get(0, ctx.id, &ctx.info) != 0)\n\t\trte_exit(EXIT_FAILURE, \"Failed to get model info\\n\");\n\n\t/* Get the buffer size for input and output */\n\trte_ml_io_input_size_get(0, ctx.id, ctx.info.batch_size, &ctx.input_size, NULL);\n\trte_ml_io_output_size_get(0, ctx.id, ctx.info.batch_size, &ctx.output_size, NULL);\n\n\tmz = rte_memzone_reserve(IO_MZ, ctx.input_size + ctx.output_size, config.socket_id, 0);\n\tif (mz == NULL)\n\t\trte_exit(EXIT_FAILURE, \"Failed to create IO memzone\\n\");\n\n\tctx.input_buffer = mz->addr;\n\tctx.output_buffer = ctx.input_buffer + ctx.input_size;\n\n\t/* Step 10: Fill the input data */\n\tfp = fopen(ctx.inp_file, \"r+\");\n\tif (fp == NULL)\n\t\trte_exit(EXIT_FAILURE, \"Failed to open input file\\n\");\n\n\tif (fread(ctx.input_buffer, 1, ctx.input_size, fp) != ctx.input_size) {\n\t\tfclose(fp);\n\t\trte_exit(EXIT_FAILURE, \"Failed to read input file\\n\");\n\t}\n\tfclose(fp);\n\n\t/* Step 11: Create ML op mempool */\n\top_pool = rte_ml_op_pool_create(\"ml_op_pool\", 1, 0, 0, config.socket_id);\n\tif (op_pool == NULL)\n\t\trte_exit(EXIT_FAILURE, \"Failed to create op pool\\n\");\n\n\t/* Step 12: Form an ML op */\n\trte_mempool_get_bulk(op_pool, (void *)op_enq, 1);\n\top_enq->model_id = ctx.id;\n\top_enq->nb_batches = ctx.info.batch_size;\n\top_enq->mempool = op_pool;\n\top_enq->input.addr = ctx.input_buffer;\n\top_enq->input.length = ctx.input_size;\n\top_enq->input.next = NULL;\n\top_enq->output.addr = ctx.output_buffer;\n\top_enq->output.length = ctx.output_size;\n\top_enq->output.next = NULL;\n\n\t/* Step 13: Enqueue jobs */\n\trte_ml_enqueue_burst(0, 0, &op_enq, 1);\n\n\t/* Step 14: Dequeue jobs and release op pool */\n\twhile (rte_ml_dequeue_burst(0, 0, &op_deq, 1) != 1)\n\t\t;\n\n\t/* Step 15: Write output */\n\tfp = fopen(ctx.out_file, \"w+\");\n\tif (fp == NULL)\n\t\trte_exit(EXIT_FAILURE, \"Failed to open output file\\n\");\n\tfwrite(ctx.output_buffer, 1, ctx.output_size, fp);\n\tfclose(fp);\n\n\t/* Step 16: Clean up */\n\t/* Stop ML model */\n\trte_ml_model_stop(0, ctx.id);\n\t/* Unload ML model */\n\trte_ml_model_unload(0, ctx.id);\n\t/* Free input/output memory */\n\trte_memzone_free(rte_memzone_lookup(IO_MZ));\n\t/* Free the ml op back to pool */\n\trte_mempool_put_bulk(op_pool, (void *)op_deq, 1);\n\t/* Free ml op pool */\n\trte_mempool_free(op_pool);\n\t/* Stop the device */\n\trte_ml_dev_stop(0);\n\trte_ml_dev_close(0);\n\trte_eal_cleanup();\n\n\treturn 0;\n}\n\n\nJerin Jacob (1):\n  mldev: introduce machine learning device library\n\nSrikanth Yalavarthi (11):\n  mldev: support PMD functions for ML device\n  mldev: support ML device handling functions\n  mldev: support ML device queue-pair setup\n  mldev: support handling ML models\n  mldev: support input and output data handling\n  mldev: support ML op pool and ops\n  mldev: support inference enqueue and dequeue\n  mldev: support device statistics\n  mldev: support device extended statistics\n  mldev: support to retrieve error information\n  mldev: support to get debug info and test device\n\n MAINTAINERS                              |    5 +\n doc/api/doxy-api-index.md                |    1 +\n doc/api/doxy-api.conf.in                 |    1 +\n doc/guides/prog_guide/img/mldev_flow.svg |  714 ++++++++++++++\n doc/guides/prog_guide/index.rst          |    1 +\n doc/guides/prog_guide/mldev.rst          |  186 ++++\n doc/guides/rel_notes/release_23_03.rst   |    5 +\n lib/meson.build                          |    1 +\n lib/mldev/meson.build                    |   27 +\n lib/mldev/rte_mldev.c                    |  947 ++++++++++++++++++\n lib/mldev/rte_mldev.h                    | 1119 ++++++++++++++++++++++\n lib/mldev/rte_mldev_core.h               |  717 ++++++++++++++\n lib/mldev/rte_mldev_pmd.c                |   62 ++\n lib/mldev/rte_mldev_pmd.h                |  151 +++\n lib/mldev/version.map                    |   51 +\n 15 files changed, 3988 insertions(+)\n create mode 100644 doc/guides/prog_guide/img/mldev_flow.svg\n create mode 100644 doc/guides/prog_guide/mldev.rst\n create mode 100644 lib/mldev/meson.build\n create mode 100644 lib/mldev/rte_mldev.c\n create mode 100644 lib/mldev/rte_mldev.h\n create mode 100644 lib/mldev/rte_mldev_core.h\n create mode 100644 lib/mldev/rte_mldev_pmd.c\n create mode 100644 lib/mldev/rte_mldev_pmd.h\n create mode 100644 lib/mldev/version.map"
}