get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/135784/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 135784,
    "url": "https://patches.dpdk.org/api/patches/135784/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20240107153454.3909-7-syalavarthi@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240107153454.3909-7-syalavarthi@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240107153454.3909-7-syalavarthi@marvell.com",
    "date": "2024-01-07T15:34:45",
    "name": "[06/11] event/ml: add support for service function",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "5e0d1f6b899058be7207165f05bf7596d0b54e4a",
    "submitter": {
        "id": 2480,
        "url": "https://patches.dpdk.org/api/people/2480/?format=api",
        "name": "Srikanth Yalavarthi",
        "email": "syalavarthi@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20240107153454.3909-7-syalavarthi@marvell.com/mbox/",
    "series": [
        {
            "id": 30752,
            "url": "https://patches.dpdk.org/api/series/30752/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=30752",
            "date": "2024-01-07T15:34:39",
            "name": "Introduce Event ML Adapter",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/30752/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/135784/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/135784/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1FB6E43857;\n\tSun,  7 Jan 2024 16:35:59 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CFA3740A7A;\n\tSun,  7 Jan 2024 16:35:26 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 0FAFC40698\n for <dev@dpdk.org>; Sun,  7 Jan 2024 16:35:23 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id\n 407DlRPu022570 for <dev@dpdk.org>; Sun, 7 Jan 2024 07:35:23 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3vf78n2a1r-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Sun, 07 Jan 2024 07:35:23 -0800 (PST)",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Sun, 7 Jan 2024 07:35:20 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Sun, 7 Jan 2024 07:35:20 -0800",
            "from ml-host-33.caveonetworks.com (unknown [10.110.143.233])\n by maili.marvell.com (Postfix) with ESMTP id 0C74E3F7093;\n Sun,  7 Jan 2024 07:35:20 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=\n from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding:content-type; s=\n pfpt0220; bh=OeqVFzzJXsdOnja2urJytWOV15I8Dy3akCa4PvG6nE8=; b=Ir3\n fWt8u+z2Eb/3YxWlR/Deb7gGH+2jcBiFirstxAF4o7D/OzaPA6BBMPLJYGllZaJ3\n APl2a+kmeTLAsiMFhRRRUIgrXhkjcEcKPEIqamEzum9CduO+DLrH9wIidektYp2d\n QvtNhaOqawmYibICbhQASThUBZ5KA6lA7zFw+iAiHwQMR3Sdjp7kge6Vw50ioxl6\n H7I/wZ6rRugoLkb0w0VxlPEL4kz2u8kcGHM+SqMes9o+r6bfkaSK4cU63inyTWDM\n PdiVQRmUTwwXr5Yy3fUYr3fzP6taSAntbX5rg6BH5kvF77u0MWBGnZVudaLZdzo5\n kHych7P6DTYtifvWBdA==",
        "From": "Srikanth Yalavarthi <syalavarthi@marvell.com>",
        "To": "Srikanth Yalavarthi <syalavarthi@marvell.com>, Jerin Jacob\n <jerinj@marvell.com>",
        "CC": "<dev@dpdk.org>, <aprabhu@marvell.com>, <sshankarnara@marvell.com>,\n <ptakkar@marvell.com>",
        "Subject": "[PATCH 06/11] event/ml: add support for service function",
        "Date": "Sun, 7 Jan 2024 07:34:45 -0800",
        "Message-ID": "<20240107153454.3909-7-syalavarthi@marvell.com>",
        "X-Mailer": "git-send-email 2.42.0",
        "In-Reply-To": "<20240107153454.3909-1-syalavarthi@marvell.com>",
        "References": "<20240107153454.3909-1-syalavarthi@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "MbGUJ5JqTxXIHoV9eWmvsw83PezpDNFk",
        "X-Proofpoint-GUID": "MbGUJ5JqTxXIHoV9eWmvsw83PezpDNFk",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-12-09_02,2023-12-07_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added support for ML adapter service function for software\nbased event devices.\n\nSigned-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>\n---\n lib/eventdev/rte_event_ml_adapter.c | 538 ++++++++++++++++++++++++++++\n 1 file changed, 538 insertions(+)",
    "diff": "diff --git a/lib/eventdev/rte_event_ml_adapter.c b/lib/eventdev/rte_event_ml_adapter.c\nindex 9d441c5d967..95f566b1025 100644\n--- a/lib/eventdev/rte_event_ml_adapter.c\n+++ b/lib/eventdev/rte_event_ml_adapter.c\n@@ -5,6 +5,7 @@\n #include \"rte_event_ml_adapter.h\"\n #include \"rte_eventdev.h\"\n #include <rte_mldev.h>\n+#include <rte_service_component.h>\n \n #include \"eventdev_pmd.h\"\n #include \"rte_mldev_pmd.h\"\n@@ -13,6 +14,9 @@\n #define ML_DEFAULT_MAX_NB      128\n #define ML_ADAPTER_BUFFER_SIZE 1024\n \n+#define ML_BATCH_SIZE\t\t   32\n+#define ML_ADAPTER_OPS_BUFFER_SIZE (ML_BATCH_SIZE + ML_BATCH_SIZE)\n+\n #define ML_ADAPTER_ARRAY \"event_ml_adapter_array\"\n \n /* ML ops circular buffer */\n@@ -54,6 +58,9 @@ struct ml_device_info {\n \t * be invoked if not already invoked\n \t */\n \tuint16_t num_qpairs;\n+\n+\t/* Next queue pair to be processed */\n+\tuint16_t next_queue_pair_id;\n } __rte_cache_aligned;\n \n struct event_ml_adapter {\n@@ -78,6 +85,9 @@ struct event_ml_adapter {\n \t/* ML device structure array */\n \tstruct ml_device_info *mldevs;\n \n+\t/* Next ML device to be processed */\n+\tint16_t next_mldev_id;\n+\n \t/* Circular buffer for processing ML ops to eventdev */\n \tstruct ml_ops_circular_buffer ebuf;\n \n@@ -92,6 +102,26 @@ struct event_ml_adapter {\n \n \t/* No. of queue pairs configured */\n \tuint16_t nb_qps;\n+\n+\t/* Per adapter EAL service ID */\n+\tuint32_t service_id;\n+\n+\t/* Service initialization state */\n+\tuint8_t service_initialized;\n+\n+\t/* Max ML ops processed in any service function invocation */\n+\tuint32_t max_nb;\n+\n+\t/* Store event port's implicit release capability */\n+\tuint8_t implicit_release_disabled;\n+\n+\t/* Flag to indicate backpressure at mldev\n+\t * Stop further dequeuing events from eventdev\n+\t */\n+\tbool stop_enq_to_mldev;\n+\n+\t/* Loop counter to flush ml ops */\n+\tuint16_t transmit_loop_count;\n } __rte_cache_aligned;\n \n static struct event_ml_adapter **event_ml_adapter;\n@@ -133,6 +163,18 @@ emla_array_init(void)\n \treturn 0;\n }\n \n+static inline bool\n+emla_circular_buffer_batch_ready(struct ml_ops_circular_buffer *bufp)\n+{\n+\treturn bufp->count >= ML_BATCH_SIZE;\n+}\n+\n+static inline bool\n+emla_circular_buffer_space_for_batch(struct ml_ops_circular_buffer *bufp)\n+{\n+\treturn (bufp->size - bufp->count) >= ML_BATCH_SIZE;\n+}\n+\n static inline int\n emla_circular_buffer_init(const char *name, struct ml_ops_circular_buffer *buf, uint16_t sz)\n {\n@@ -151,6 +193,49 @@ emla_circular_buffer_free(struct ml_ops_circular_buffer *buf)\n \trte_free(buf->op_buffer);\n }\n \n+static inline int\n+emla_circular_buffer_add(struct ml_ops_circular_buffer *bufp, struct rte_ml_op *op)\n+{\n+\tuint16_t *tail = &bufp->tail;\n+\n+\tbufp->op_buffer[*tail] = op;\n+\n+\t/* circular buffer, go round */\n+\t*tail = (*tail + 1) % bufp->size;\n+\tbufp->count++;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+emla_circular_buffer_flush_to_mldev(struct ml_ops_circular_buffer *bufp, uint8_t mldev_id,\n+\t\t\t\t    uint16_t qp_id, uint16_t *nb_ops_flushed)\n+{\n+\tuint16_t n = 0;\n+\tuint16_t *head = &bufp->head;\n+\tuint16_t *tail = &bufp->tail;\n+\tstruct rte_ml_op **ops = bufp->op_buffer;\n+\n+\tif (*tail > *head)\n+\t\tn = *tail - *head;\n+\telse if (*tail < *head)\n+\t\tn = bufp->size - *head;\n+\telse {\n+\t\t*nb_ops_flushed = 0;\n+\t\treturn 0; /* buffer empty */\n+\t}\n+\n+\t*nb_ops_flushed = rte_ml_enqueue_burst(mldev_id, qp_id, &ops[*head], n);\n+\tbufp->count -= *nb_ops_flushed;\n+\tif (!bufp->count) {\n+\t\t*head = 0;\n+\t\t*tail = 0;\n+\t} else\n+\t\t*head = (*head + *nb_ops_flushed) % bufp->size;\n+\n+\treturn *nb_ops_flushed == n ? 0 : -1;\n+}\n+\n static int\n emla_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_ml_adapter_conf *conf,\n \t\t       void *arg)\n@@ -361,6 +446,394 @@ rte_event_ml_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)\n \treturn 0;\n }\n \n+static inline unsigned int\n+emla_enq_to_mldev(struct event_ml_adapter *adapter, struct rte_event *ev, unsigned int cnt)\n+{\n+\tunion rte_event_ml_metadata *m_data = NULL;\n+\tstruct ml_queue_pair_info *qp_info = NULL;\n+\tstruct rte_ml_op *ml_op;\n+\tunsigned int i, n;\n+\tuint16_t qp_id, nb_enqueued = 0;\n+\tint16_t mldev_id;\n+\tint ret;\n+\n+\tret = 0;\n+\tn = 0;\n+\n+\tfor (i = 0; i < cnt; i++) {\n+\t\tml_op = ev[i].event_ptr;\n+\t\tif (ml_op == NULL)\n+\t\t\tcontinue;\n+\n+\t\tif (ml_op->private_data_offset)\n+\t\t\tm_data = (union rte_event_ml_metadata *)((uint8_t *)ml_op +\n+\t\t\t\t\t\t\t\t ml_op->private_data_offset);\n+\t\tif (m_data == NULL) {\n+\t\t\tif (ml_op != NULL && ml_op->mempool != NULL)\n+\t\t\t\trte_mempool_put(ml_op->mempool, ml_op);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tmldev_id = m_data->request_info.mldev_id;\n+\t\tqp_id = m_data->request_info.queue_pair_id;\n+\t\tqp_info = &adapter->mldevs[mldev_id].qpairs[qp_id];\n+\t\tif (!qp_info->qp_enabled) {\n+\t\t\tif (ml_op != NULL && ml_op->mempool != NULL)\n+\t\t\t\trte_mempool_put(ml_op->mempool, ml_op);\n+\t\t\tcontinue;\n+\t\t}\n+\t\temla_circular_buffer_add(&qp_info->mlbuf, ml_op);\n+\n+\t\tif (emla_circular_buffer_batch_ready(&qp_info->mlbuf)) {\n+\t\t\tret = emla_circular_buffer_flush_to_mldev(&qp_info->mlbuf, mldev_id, qp_id,\n+\t\t\t\t\t\t\t\t  &nb_enqueued);\n+\t\t\tn += nb_enqueued;\n+\n+\t\t\t/**\n+\t\t\t * If some ml ops failed to flush to mldev and\n+\t\t\t * space for another batch is not available, stop\n+\t\t\t * dequeue from eventdev momentarily\n+\t\t\t */\n+\t\t\tif (unlikely(ret < 0 &&\n+\t\t\t\t     !emla_circular_buffer_space_for_batch(&qp_info->mlbuf)))\n+\t\t\t\tadapter->stop_enq_to_mldev = true;\n+\t\t}\n+\t}\n+\n+\treturn n;\n+}\n+\n+static unsigned int\n+emla_ml_mldev_flush(struct event_ml_adapter *adapter, int16_t mldev_id, uint16_t *nb_ops_flushed)\n+{\n+\tstruct ml_device_info *curr_dev;\n+\tstruct ml_queue_pair_info *curr_queue;\n+\tstruct rte_ml_dev *dev;\n+\tuint16_t nb = 0, nb_enqueued = 0;\n+\tuint16_t qp;\n+\n+\tcurr_dev = &adapter->mldevs[mldev_id];\n+\tdev = rte_ml_dev_pmd_get_dev(mldev_id);\n+\n+\tfor (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {\n+\n+\t\tcurr_queue = &curr_dev->qpairs[qp];\n+\t\tif (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))\n+\t\t\tcontinue;\n+\n+\t\temla_circular_buffer_flush_to_mldev(&curr_queue->mlbuf, mldev_id, qp, &nb_enqueued);\n+\t\t*nb_ops_flushed += curr_queue->mlbuf.count;\n+\t\tnb += nb_enqueued;\n+\t}\n+\n+\treturn nb;\n+}\n+\n+static unsigned int\n+emla_ml_enq_flush(struct event_ml_adapter *adapter)\n+{\n+\tint16_t mldev_id;\n+\tuint16_t nb_enqueued = 0;\n+\tuint16_t nb_ops_flushed = 0;\n+\tuint16_t num_mldev = rte_ml_dev_count();\n+\n+\tfor (mldev_id = 0; mldev_id < num_mldev; mldev_id++)\n+\t\tnb_enqueued += emla_ml_mldev_flush(adapter, mldev_id, &nb_ops_flushed);\n+\t/**\n+\t * Enable dequeue from eventdev if all ops from circular\n+\t * buffer flushed to mldev\n+\t */\n+\tif (!nb_ops_flushed)\n+\t\tadapter->stop_enq_to_mldev = false;\n+\n+\treturn nb_enqueued;\n+}\n+\n+/* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD\n+ * iterations of emla_ml_adapter_enq_run()\n+ */\n+#define ML_ENQ_FLUSH_THRESHOLD 1024\n+\n+static int\n+emla_ml_adapter_enq_run(struct event_ml_adapter *adapter, unsigned int max_enq)\n+{\n+\tstruct rte_event ev[ML_BATCH_SIZE];\n+\tunsigned int nb_enq, nb_enqueued;\n+\tuint16_t n;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\n+\tnb_enqueued = 0;\n+\tif (adapter->mode == RTE_EVENT_ML_ADAPTER_OP_NEW)\n+\t\treturn 0;\n+\n+\tfor (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {\n+\t\tif (unlikely(adapter->stop_enq_to_mldev)) {\n+\t\t\tnb_enqueued += emla_ml_enq_flush(adapter);\n+\n+\t\t\tif (unlikely(adapter->stop_enq_to_mldev))\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tn = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, ML_BATCH_SIZE, 0);\n+\n+\t\tif (!n)\n+\t\t\tbreak;\n+\n+\t\tnb_enqueued += emla_enq_to_mldev(adapter, ev, n);\n+\t}\n+\n+\tif ((++adapter->transmit_loop_count & (ML_ENQ_FLUSH_THRESHOLD - 1)) == 0)\n+\t\tnb_enqueued += emla_ml_enq_flush(adapter);\n+\n+\treturn nb_enqueued;\n+}\n+\n+#define ML_ADAPTER_MAX_EV_ENQ_RETRIES 100\n+\n+static inline uint16_t\n+emla_ops_enqueue_burst(struct event_ml_adapter *adapter, struct rte_ml_op **ops, uint16_t num)\n+{\n+\tunion rte_event_ml_metadata *m_data = NULL;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\tstruct rte_event events[ML_BATCH_SIZE];\n+\tuint16_t nb_enqueued, nb_ev;\n+\tuint8_t retry;\n+\tuint8_t i;\n+\n+\tnb_ev = 0;\n+\tretry = 0;\n+\tnb_enqueued = 0;\n+\tnum = RTE_MIN(num, ML_BATCH_SIZE);\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct rte_event *ev = &events[nb_ev++];\n+\n+\t\tif (ops[i]->private_data_offset)\n+\t\t\tm_data = (union rte_event_ml_metadata *)((uint8_t *)ops[i] +\n+\t\t\t\t\t\t\t\t ops[i]->private_data_offset);\n+\t\tif (unlikely(m_data == NULL)) {\n+\t\t\tif (ops[i] != NULL && ops[i]->mempool != NULL)\n+\t\t\t\trte_mempool_put(ops[i]->mempool, ops[i]);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\trte_memcpy(ev, &m_data->response_info, sizeof(*ev));\n+\t\tev->event_ptr = ops[i];\n+\t\tev->event_type = RTE_EVENT_TYPE_CRYPTODEV;\n+\t\tif (adapter->implicit_release_disabled)\n+\t\t\tev->op = RTE_EVENT_OP_FORWARD;\n+\t\telse\n+\t\t\tev->op = RTE_EVENT_OP_NEW;\n+\t}\n+\n+\tdo {\n+\t\tnb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id,\n+\t\t\t\t\t\t       &events[nb_enqueued], nb_ev - nb_enqueued);\n+\n+\t} while (retry++ < ML_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev);\n+\n+\treturn nb_enqueued;\n+}\n+\n+static int\n+emla_circular_buffer_flush_to_evdev(struct event_ml_adapter *adapter,\n+\t\t\t\t    struct ml_ops_circular_buffer *bufp)\n+{\n+\tuint16_t n = 0, nb_ops_flushed;\n+\tuint16_t *head = &bufp->head;\n+\tuint16_t *tail = &bufp->tail;\n+\tstruct rte_ml_op **ops = bufp->op_buffer;\n+\n+\tif (*tail > *head)\n+\t\tn = *tail - *head;\n+\telse if (*tail < *head)\n+\t\tn = bufp->size - *head;\n+\telse\n+\t\treturn 0; /* buffer empty */\n+\n+\tnb_ops_flushed = emla_ops_enqueue_burst(adapter, &ops[*head], n);\n+\tbufp->count -= nb_ops_flushed;\n+\tif (!bufp->count) {\n+\t\t*head = 0;\n+\t\t*tail = 0;\n+\t\treturn 0; /* buffer empty */\n+\t}\n+\n+\t*head = (*head + nb_ops_flushed) % bufp->size;\n+\treturn 1;\n+}\n+\n+static void\n+emla_ops_buffer_flush(struct event_ml_adapter *adapter)\n+{\n+\tif (likely(adapter->ebuf.count == 0))\n+\t\treturn;\n+\n+\twhile (emla_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf))\n+\t\t;\n+}\n+\n+static inline unsigned int\n+emla_ml_adapter_deq_run(struct event_ml_adapter *adapter, unsigned int max_deq)\n+{\n+\tstruct ml_device_info *curr_dev;\n+\tstruct ml_queue_pair_info *curr_queue;\n+\tstruct rte_ml_op *ops[ML_BATCH_SIZE];\n+\tuint16_t n, nb_deq, nb_enqueued, i;\n+\tstruct rte_ml_dev *dev;\n+\tint16_t mldev_id;\n+\tuint16_t qp, dev_qps;\n+\tbool done;\n+\tuint16_t num_mldev = rte_ml_dev_count();\n+\n+\tnb_deq = 0;\n+\temla_ops_buffer_flush(adapter);\n+\n+\tdo {\n+\t\tdone = true;\n+\n+\t\tfor (mldev_id = adapter->next_mldev_id; mldev_id < num_mldev; mldev_id++) {\n+\t\t\tuint16_t queues = 0;\n+\n+\t\t\tcurr_dev = &adapter->mldevs[mldev_id];\n+\t\t\tdev = curr_dev->dev;\n+\t\t\tif (unlikely(dev == NULL))\n+\t\t\t\tcontinue;\n+\n+\t\t\tdev_qps = dev->data->nb_queue_pairs;\n+\n+\t\t\tfor (qp = curr_dev->next_queue_pair_id; queues < dev_qps;\n+\t\t\t     qp = (qp + 1) % dev_qps, queues++) {\n+\t\t\t\tcurr_queue = &curr_dev->qpairs[qp];\n+\t\t\t\tif (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tn = rte_ml_dequeue_burst(mldev_id, qp, ops, ML_BATCH_SIZE);\n+\t\t\t\tif (!n)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tdone = false;\n+\t\t\t\tnb_enqueued = 0;\n+\n+\t\t\t\tif (unlikely(!adapter->ebuf.count))\n+\t\t\t\t\tnb_enqueued = emla_ops_enqueue_burst(adapter, ops, n);\n+\n+\t\t\t\tif (likely(nb_enqueued == n))\n+\t\t\t\t\tgoto check;\n+\n+\t\t\t\t/* Failed to enqueue events case */\n+\t\t\t\tfor (i = nb_enqueued; i < n; i++)\n+\t\t\t\t\temla_circular_buffer_add(&adapter->ebuf, ops[i]);\n+\n+check:\n+\t\t\t\tnb_deq += n;\n+\n+\t\t\t\tif (nb_deq >= max_deq) {\n+\t\t\t\t\tif ((qp + 1) == dev_qps)\n+\t\t\t\t\t\tadapter->next_mldev_id = (mldev_id + 1) % num_mldev;\n+\n+\t\t\t\t\tcurr_dev->next_queue_pair_id =\n+\t\t\t\t\t\t(qp + 1) % dev->data->nb_queue_pairs;\n+\n+\t\t\t\t\treturn nb_deq;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tadapter->next_mldev_id = 0;\n+\t} while (done == false);\n+\n+\treturn nb_deq;\n+}\n+\n+static int\n+emla_ml_adapter_run(struct event_ml_adapter *adapter, unsigned int max_ops)\n+{\n+\tunsigned int ops_left = max_ops;\n+\n+\twhile (ops_left > 0) {\n+\t\tunsigned int e_cnt, d_cnt;\n+\n+\t\te_cnt = emla_ml_adapter_deq_run(adapter, ops_left);\n+\t\tops_left -= RTE_MIN(ops_left, e_cnt);\n+\n+\t\td_cnt = emla_ml_adapter_enq_run(adapter, ops_left);\n+\t\tops_left -= RTE_MIN(ops_left, d_cnt);\n+\n+\t\tif (e_cnt == 0 && d_cnt == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (ops_left == max_ops) {\n+\t\trte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0);\n+\t\treturn -EAGAIN;\n+\t} else\n+\t\treturn 0;\n+}\n+\n+static int\n+emla_service_func(void *args)\n+{\n+\tstruct event_ml_adapter *adapter = args;\n+\tint ret;\n+\n+\tif (rte_spinlock_trylock(&adapter->lock) == 0)\n+\t\treturn 0;\n+\tret = emla_ml_adapter_run(adapter, adapter->max_nb);\n+\trte_spinlock_unlock(&adapter->lock);\n+\n+\treturn ret;\n+}\n+\n+static int\n+emla_init_service(struct event_ml_adapter *adapter, uint8_t id)\n+{\n+\tstruct rte_event_ml_adapter_conf adapter_conf;\n+\tstruct rte_service_spec service;\n+\tint ret;\n+\tuint32_t impl_rel;\n+\n+\tif (adapter->service_initialized)\n+\t\treturn 0;\n+\n+\tmemset(&service, 0, sizeof(service));\n+\tsnprintf(service.name, ML_ADAPTER_NAME_LEN, \"rte_event_ml_adapter_%d\", id);\n+\tservice.socket_id = adapter->socket_id;\n+\tservice.callback = emla_service_func;\n+\tservice.callback_userdata = adapter;\n+\n+\t/* Service function handles locking for queue add/del updates */\n+\tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n+\tret = rte_service_component_register(&service, &adapter->service_id);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to register service %s err = %\" PRId32, service.name, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"configuration callback failed err = %\" PRId32, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tadapter->max_nb = adapter_conf.max_nb;\n+\tadapter->event_port_id = adapter_conf.event_port_id;\n+\n+\tif (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id,\n+\t\t\t\t    RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get port info for eventdev %\" PRId32,\n+\t\t\t\t adapter->eventdev_id);\n+\t\temla_circular_buffer_free(&adapter->ebuf);\n+\t\trte_free(adapter);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tadapter->implicit_release_disabled = (uint8_t)impl_rel;\n+\tadapter->service_initialized = 1;\n+\n+\treturn ret;\n+}\n+\n static void\n emla_update_qp_info(struct event_ml_adapter *adapter, struct ml_device_info *dev_info,\n \t\t    int32_t queue_pair_id, uint8_t add)\n@@ -389,6 +862,40 @@ emla_update_qp_info(struct event_ml_adapter *adapter, struct ml_device_info *dev\n \t}\n }\n \n+static int\n+emla_add_queue_pair(struct event_ml_adapter *adapter, int16_t mldev_id, int queue_pair_id)\n+{\n+\tstruct ml_device_info *dev_info = &adapter->mldevs[mldev_id];\n+\tstruct ml_queue_pair_info *qpairs;\n+\tuint32_t i;\n+\n+\tif (dev_info->qpairs == NULL) {\n+\t\tdev_info->qpairs = rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\t\t\t      dev_info->dev->data->nb_queue_pairs *\n+\t\t\t\t\t\t\t      sizeof(struct ml_queue_pair_info),\n+\t\t\t\t\t\t      0, adapter->socket_id);\n+\t\tif (dev_info->qpairs == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqpairs = dev_info->qpairs;\n+\n+\t\tif (emla_circular_buffer_init(\"mla_mldev_circular_buffer\", &qpairs->mlbuf,\n+\t\t\t\t\t      ML_ADAPTER_OPS_BUFFER_SIZE)) {\n+\t\t\tRTE_EDEV_LOG_ERR(\"Failed to get memory for mldev buffer\");\n+\t\t\trte_free(qpairs);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tif (queue_pair_id == -1) {\n+\t\tfor (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)\n+\t\t\temla_update_qp_info(adapter, dev_info, i, 1);\n+\t} else\n+\t\temla_update_qp_info(adapter, dev_info, (uint16_t)queue_pair_id, 1);\n+\n+\treturn 0;\n+}\n+\n int\n rte_event_ml_adapter_queue_pair_add(uint8_t id, int16_t mldev_id, int32_t queue_pair_id,\n \t\t\t\t    const struct rte_event *event)\n@@ -458,6 +965,36 @@ rte_event_ml_adapter_queue_pair_add(uint8_t id, int16_t mldev_id, int32_t queue_\n \t\t\temla_update_qp_info(adapter, &adapter->mldevs[mldev_id], queue_pair_id, 1);\n \t}\n \n+\t/* In case HW cap is RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate\n+\t * services so the application can choose which ever way it wants to use the adapter.\n+\t *\n+\t * Case 1: RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one\n+\t * of below two modes\n+\t *\n+\t * a. OP_FORWARD mode -> HW Dequeue + SW enqueue\n+\t * b. OP_NEW mode -> HW Dequeue\n+\t *\n+\t * Case 2: No HW caps, use SW adapter\n+\t *\n+\t * a. OP_FORWARD mode -> SW enqueue & dequeue\n+\t * b. OP_NEW mode -> SW Dequeue\n+\t */\n+\tif ((cap & RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&\n+\t     !(cap & RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t     adapter->mode == RTE_EVENT_ML_ADAPTER_OP_FORWARD) ||\n+\t    (!(cap & RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&\n+\t     !(cap & RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t     !(cap & RTE_EVENT_ML_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND))) {\n+\t\trte_spinlock_lock(&adapter->lock);\n+\t\tret = emla_init_service(adapter, id);\n+\t\tif (ret == 0)\n+\t\t\tret = emla_add_queue_pair(adapter, mldev_id, queue_pair_id);\n+\t\trte_spinlock_unlock(&adapter->lock);\n+\n+\t\tif (ret == 0)\n+\t\t\trte_service_component_runstate_set(adapter->service_id, 1);\n+\t}\n+\n \treturn ret;\n }\n \n@@ -529,6 +1066,7 @@ rte_event_ml_adapter_queue_pair_del(uint8_t id, int16_t mldev_id, int32_t queue_\n \t\t}\n \n \t\trte_spinlock_unlock(&adapter->lock);\n+\t\trte_service_component_runstate_set(adapter->service_id, adapter->nb_qps);\n \t}\n \n \treturn ret;\n",
    "prefixes": [
        "06/11"
    ]
}