get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131849/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131849,
    "url": "http://patches.dpdk.org/api/patches/131849/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230923133449.3780841-6-amitprakashs@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230923133449.3780841-6-amitprakashs@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230923133449.3780841-6-amitprakashs@marvell.com",
    "date": "2023-09-23T13:34:42",
    "name": "[v3,05/12] eventdev: add support for DMA adapter service function",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "67b453811251783b621baa9b1e7b8c5e3ec73460",
    "submitter": {
        "id": 2699,
        "url": "http://patches.dpdk.org/api/people/2699/?format=api",
        "name": "Amit Prakash Shukla",
        "email": "amitprakashs@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230923133449.3780841-6-amitprakashs@marvell.com/mbox/",
    "series": [
        {
            "id": 29609,
            "url": "http://patches.dpdk.org/api/series/29609/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29609",
            "date": "2023-09-23T13:34:37",
            "name": "event DMA adapter library support",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/29609/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/131849/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/131849/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CD68142624;\n\tSat, 23 Sep 2023 15:35:43 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2433D402E4;\n\tSat, 23 Sep 2023 15:35:36 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id CF672402E2\n for <dev@dpdk.org>; Sat, 23 Sep 2023 15:35:33 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 38NCqMoU005884; Sat, 23 Sep 2023 06:35:33 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3t9yhkr5nt-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Sat, 23 Sep 2023 06:35:33 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Sat, 23 Sep 2023 06:35:30 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Sat, 23 Sep 2023 06:35:30 -0700",
            "from localhost.localdomain (unknown [10.28.36.157])\n by maili.marvell.com (Postfix) with ESMTP id A91DA3F7065;\n Sat, 23 Sep 2023 06:35:26 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=WgKCZX+Fn2PguTmB3JNhQ8eyRST2khe4CDy6C0Efo10=;\n b=BgV1ZmAc75rns9M90froyzup0jF5aIbvpMtOwkqQdEVHv4Vb9fZzbpJQ9+5rcrDNZxQT\n AlMb1qrodUhKkr61PkS5wqoAlafie75SLRH69R7o7W/3uEhMAGcCilWAdOlvy2p8h7Ca\n U9t8HrAZSzYiy+hP+fjNmQeo/Z5T/buYxpf5vVKbMbfjRTZJLgL2oxBtO5pABpnYT/wg\n 54KbeijqdEdFB8Y3QOUZkppeB1t5uAliJaZCwD5TkX5KBVSHidhPoHhrVTGBXZL/SVeg\n VjRRkp8cuNjmMY9MFvtQMuDXBMZtRuCKd7hewKaljZOjjzhXnS+HVf6aBzi333ylAkBs sw==",
        "From": "Amit Prakash Shukla <amitprakashs@marvell.com>",
        "To": "Amit Prakash Shukla <amitprakashs@marvell.com>, Jerin Jacob\n <jerinj@marvell.com>",
        "CC": "<dev@dpdk.org>, <fengchengwen@huawei.com>, <kevin.laatz@intel.com>,\n <bruce.richardson@intel.com>, <conor.walsh@intel.com>,\n <vattunuru@marvell.com>, <g.singh@nxp.com>,\n <sachin.saxena@oss.nxp.com>, <hemant.agrawal@nxp.com>,\n <cheng1.jiang@intel.com>, <ndabilpuram@marvell.com>,\n <anoobj@marvell.com>, <mb@smartsharesystems.com>",
        "Subject": "[PATCH v3 05/12] eventdev: add support for DMA adapter service\n function",
        "Date": "Sat, 23 Sep 2023 19:04:42 +0530",
        "Message-ID": "<20230923133449.3780841-6-amitprakashs@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230923133449.3780841-1-amitprakashs@marvell.com>",
        "References": "<20230922201337.3347666-1-amitprakashs@marvell.com>\n <20230923133449.3780841-1-amitprakashs@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "9h_ShMO3UHVYcWcVjBpbGEGiRQ32sdhE",
        "X-Proofpoint-ORIG-GUID": "9h_ShMO3UHVYcWcVjBpbGEGiRQ32sdhE",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2023-09-23_10,2023-09-21_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Added support for DMA adapter service function for event devices.\nEnqueue and dequeue of event from eventdev and DMA device are done\nbased on the adapter mode and the supported HW capabilities.\n\nSigned-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>\n---\n lib/eventdev/rte_event_dma_adapter.c | 589 +++++++++++++++++++++++++++\n 1 file changed, 589 insertions(+)",
    "diff": "diff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c\nindex dd58188bf3..8349b95796 100644\n--- a/lib/eventdev/rte_event_dma_adapter.c\n+++ b/lib/eventdev/rte_event_dma_adapter.c\n@@ -2,6 +2,8 @@\n  * Copyright (c) 2023 Marvell.\n  */\n \n+#include <rte_service_component.h>\n+\n #include \"rte_eventdev.h\"\n #include \"eventdev_pmd.h\"\n #include \"rte_event_dma_adapter.h\"\n@@ -69,6 +71,10 @@ struct dma_device_info {\n \n \t/* Number of vchans configured for a DMA device. */\n \tuint16_t num_dma_dev_vchan;\n+\n+\t/* Next queue pair to be processed */\n+\tuint16_t next_vchan_id;\n+\n } __rte_cache_aligned;\n \n struct event_dma_adapter {\n@@ -90,6 +96,9 @@ struct event_dma_adapter {\n \t/* Lock to serialize config updates with service function */\n \trte_spinlock_t lock;\n \n+\t/* Next dma device to be processed */\n+\tuint16_t next_dmadev_id;\n+\n \t/* DMA device structure array */\n \tstruct dma_device_info *dma_devs;\n \n@@ -107,6 +116,26 @@ struct event_dma_adapter {\n \n \t/* No. of vchan queue configured */\n \tuint16_t nb_vchanq;\n+\n+\t/* Per adapter EAL service ID */\n+\tuint32_t service_id;\n+\n+\t/* Service initialization state */\n+\tuint8_t service_initialized;\n+\n+\t/* Max DMA ops processed in any service function invocation */\n+\tuint32_t max_nb;\n+\n+\t/* Store event port's implicit release capability */\n+\tuint8_t implicit_release_disabled;\n+\n+\t/* Flag to indicate backpressure at dma_dev\n+\t * Stop further dequeuing events from eventdev\n+\t */\n+\tbool stop_enq_to_dma_dev;\n+\n+\t/* Loop counter to flush dma ops */\n+\tuint16_t transmit_loop_count;\n } __rte_cache_aligned;\n \n static struct event_dma_adapter **event_dma_adapter;\n@@ -148,6 +177,18 @@ edma_array_init(void)\n \treturn 0;\n }\n \n+static inline bool\n+edma_circular_buffer_batch_ready(struct dma_ops_circular_buffer *bufp)\n+{\n+\treturn bufp->count >= DMA_BATCH_SIZE;\n+}\n+\n+static inline bool\n+edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp)\n+{\n+\treturn (bufp->size - bufp->count) >= DMA_BATCH_SIZE;\n+}\n+\n static inline int\n edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz)\n {\n@@ -166,6 +207,67 @@ edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)\n \trte_free(buf->op_buffer);\n }\n \n+static inline int\n+edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op)\n+{\n+\tuint16_t *tail = &bufp->tail;\n+\n+\tbufp->op_buffer[*tail] = op;\n+\n+\t/* circular buffer, go round */\n+\t*tail = (*tail + 1) % bufp->size;\n+\tbufp->count++;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,\n+\t\t\t\t      struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id,\n+\t\t\t\t      uint16_t vchan, uint16_t *nb_ops_flushed)\n+{\n+\tstruct rte_event_dma_adapter_op *op;\n+\tstruct dma_vchan_info *tq;\n+\tuint16_t *head = &bufp->head;\n+\tuint16_t *tail = &bufp->tail;\n+\tuint16_t n;\n+\tuint16_t i;\n+\tint ret;\n+\n+\tif (*tail > *head)\n+\t\tn = *tail - *head;\n+\telse if (*tail < *head)\n+\t\tn = bufp->size - *head;\n+\telse {\n+\t\t*nb_ops_flushed = 0;\n+\t\treturn 0; /* buffer empty */\n+\t}\n+\n+\ttq = &adapter->dma_devs[dma_dev_id].tqmap[vchan];\n+\n+\tfor (i = 0; i < n; i++)\t{\n+\t\top = bufp->op_buffer[*head];\n+\t\tret = rte_dma_copy_sg(dma_dev_id, vchan, op->src_seg, op->dst_seg,\n+\t\t\t\t      op->nb_src, op->nb_dst, op->flags);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\n+\t\t/* Enqueue in transaction queue. */\n+\t\tedma_circular_buffer_add(&tq->dma_buf, op);\n+\n+\t\t*head = (*head + 1) % bufp->size;\n+\t}\n+\n+\t*nb_ops_flushed = i;\n+\tbufp->count -= *nb_ops_flushed;\n+\tif (!bufp->count) {\n+\t\t*head = 0;\n+\t\t*tail = 0;\n+\t}\n+\n+\treturn *nb_ops_flushed == n ? 0 : -1;\n+}\n+\n static int\n edma_default_config_cb(uint8_t id, uint8_t evdev_id, struct rte_event_dma_adapter_conf *conf,\n \t\t       void *arg)\n@@ -360,6 +462,406 @@ rte_event_dma_adapter_free(uint8_t id)\n \treturn 0;\n }\n \n+static inline unsigned int\n+edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, unsigned int cnt)\n+{\n+\tstruct dma_vchan_info *vchan_qinfo = NULL;\n+\tstruct rte_event_dma_adapter_op *dma_op;\n+\tuint16_t vchan, nb_enqueued = 0;\n+\tint16_t dma_dev_id;\n+\tunsigned int i, n;\n+\tint ret;\n+\n+\tret = 0;\n+\tn = 0;\n+\n+\tfor (i = 0; i < cnt; i++) {\n+\t\tdma_op = ev[i].event_ptr;\n+\t\tif (dma_op == NULL)\n+\t\t\tcontinue;\n+\n+\t\t/* Expected to have response info appended to dma_op. */\n+\n+\t\tdma_dev_id = dma_op->dma_dev_id;\n+\t\tvchan = dma_op->vchan;\n+\t\tvchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan];\n+\t\tif (!vchan_qinfo->vq_enabled) {\n+\t\t\tif (dma_op != NULL && dma_op->op_mp != NULL)\n+\t\t\t\trte_mempool_put(dma_op->op_mp, dma_op);\n+\t\t\tcontinue;\n+\t\t}\n+\t\tedma_circular_buffer_add(&vchan_qinfo->dma_buf, dma_op);\n+\n+\t\tif (edma_circular_buffer_batch_ready(&vchan_qinfo->dma_buf)) {\n+\t\t\tret = edma_circular_buffer_flush_to_dma_dev(adapter, &vchan_qinfo->dma_buf,\n+\t\t\t\t\t\t\t\t    dma_dev_id, vchan,\n+\t\t\t\t\t\t\t\t    &nb_enqueued);\n+\t\t\tn += nb_enqueued;\n+\n+\t\t\t/**\n+\t\t\t * If some dma ops failed to flush to dma_dev and\n+\t\t\t * space for another batch is not available, stop\n+\t\t\t * dequeue from eventdev momentarily\n+\t\t\t */\n+\t\t\tif (unlikely(ret < 0 &&\n+\t\t\t\t     !edma_circular_buffer_space_for_batch(&vchan_qinfo->dma_buf)))\n+\t\t\t\tadapter->stop_enq_to_dma_dev = true;\n+\t\t}\n+\t}\n+\n+\treturn n;\n+}\n+\n+static unsigned int\n+edma_adapter_dev_flush(struct event_dma_adapter *adapter, int16_t dma_dev_id,\n+\t\t       uint16_t *nb_ops_flushed)\n+{\n+\tstruct dma_vchan_info *vchan_info;\n+\tstruct dma_device_info *dev_info;\n+\tuint16_t nb = 0, nb_enqueued = 0;\n+\tuint16_t vchan, nb_vchans;\n+\n+\tdev_info = &adapter->dma_devs[dma_dev_id];\n+\tnb_vchans = dev_info->num_vchanq;\n+\n+\tfor (vchan = 0; vchan < nb_vchans; vchan++) {\n+\n+\t\tvchan_info = &dev_info->vchanq[vchan];\n+\t\tif (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))\n+\t\t\tcontinue;\n+\n+\t\tedma_circular_buffer_flush_to_dma_dev(adapter, &vchan_info->dma_buf, dma_dev_id,\n+\t\t\t\t\t\t      vchan, &nb_enqueued);\n+\t\t*nb_ops_flushed += vchan_info->dma_buf.count;\n+\t\tnb += nb_enqueued;\n+\t}\n+\n+\treturn nb;\n+}\n+\n+static unsigned int\n+edma_adapter_enq_flush(struct event_dma_adapter *adapter)\n+{\n+\tint16_t dma_dev_id;\n+\tuint16_t nb_enqueued = 0;\n+\tuint16_t nb_ops_flushed = 0;\n+\tuint16_t num_dma_dev = rte_dma_count_avail();\n+\n+\tfor (dma_dev_id = 0; dma_dev_id < num_dma_dev; dma_dev_id++)\n+\t\tnb_enqueued += edma_adapter_dev_flush(adapter, dma_dev_id, &nb_ops_flushed);\n+\t/**\n+\t * Enable dequeue from eventdev if all ops from circular\n+\t * buffer flushed to dma_dev\n+\t */\n+\tif (!nb_ops_flushed)\n+\t\tadapter->stop_enq_to_dma_dev = false;\n+\n+\treturn nb_enqueued;\n+}\n+\n+/* Flush an instance's enqueue buffers every DMA_ENQ_FLUSH_THRESHOLD\n+ * iterations of edma_adapter_enq_run()\n+ */\n+#define DMA_ENQ_FLUSH_THRESHOLD 1024\n+\n+static int\n+edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq)\n+{\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tstruct rte_event ev[DMA_BATCH_SIZE];\n+\tunsigned int nb_enq, nb_enqueued;\n+\tuint16_t n;\n+\n+\tif (adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)\n+\t\treturn 0;\n+\n+\tnb_enqueued = 0;\n+\tfor (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {\n+\n+\t\tif (unlikely(adapter->stop_enq_to_dma_dev)) {\n+\t\t\tnb_enqueued += edma_adapter_enq_flush(adapter);\n+\n+\t\t\tif (unlikely(adapter->stop_enq_to_dma_dev))\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tn = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, DMA_BATCH_SIZE, 0);\n+\n+\t\tif (!n)\n+\t\t\tbreak;\n+\n+\t\tnb_enqueued += edma_enq_to_dma_dev(adapter, ev, n);\n+\t}\n+\n+\tif ((++adapter->transmit_loop_count & (DMA_ENQ_FLUSH_THRESHOLD - 1)) == 0)\n+\t\tnb_enqueued += edma_adapter_enq_flush(adapter);\n+\n+\treturn nb_enqueued;\n+}\n+\n+#define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100\n+\n+static inline uint16_t\n+edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops,\n+\t\t       uint16_t num)\n+{\n+\tuint8_t event_port_id = adapter->event_port_id;\n+\tuint8_t event_dev_id = adapter->eventdev_id;\n+\tstruct rte_event events[DMA_BATCH_SIZE];\n+\tstruct rte_event *response_info;\n+\tuint16_t nb_enqueued, nb_ev;\n+\tuint8_t retry;\n+\tuint8_t i;\n+\n+\tnb_ev = 0;\n+\tretry = 0;\n+\tnb_enqueued = 0;\n+\tnum = RTE_MIN(num, DMA_BATCH_SIZE);\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct rte_event *ev = &events[nb_ev++];\n+\n+\t\t/* Expected to have response info appended to dma_op. */\n+\t\tresponse_info = (struct rte_event *)((uint8_t *)ops[i] +\n+\t\t\t\t\t\t\t  sizeof(struct rte_event_dma_adapter_op));\n+\t\tif (unlikely(response_info == NULL)) {\n+\t\t\tif (ops[i] != NULL && ops[i]->op_mp != NULL)\n+\t\t\t\trte_mempool_put(ops[i]->op_mp, ops[i]);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\trte_memcpy(ev, response_info, sizeof(struct rte_event));\n+\t\tev->event_ptr = ops[i];\n+\t\tev->event_type = RTE_EVENT_TYPE_DMADEV;\n+\t\tif (adapter->implicit_release_disabled)\n+\t\t\tev->op = RTE_EVENT_OP_FORWARD;\n+\t\telse\n+\t\t\tev->op = RTE_EVENT_OP_NEW;\n+\t}\n+\n+\tdo {\n+\t\tnb_enqueued += rte_event_enqueue_burst(event_dev_id, event_port_id,\n+\t\t\t\t\t\t       &events[nb_enqueued], nb_ev - nb_enqueued);\n+\n+\t} while (retry++ < DMA_ADAPTER_MAX_EV_ENQ_RETRIES && nb_enqueued < nb_ev);\n+\n+\treturn nb_enqueued;\n+}\n+\n+static int\n+edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter,\n+\t\t\t\t    struct dma_ops_circular_buffer *bufp,\n+\t\t\t\t    uint16_t *enqueue_count)\n+{\n+\tstruct rte_event_dma_adapter_op **ops = bufp->op_buffer;\n+\tuint16_t n = 0, nb_ops_flushed;\n+\tuint16_t *head = &bufp->head;\n+\tuint16_t *tail = &bufp->tail;\n+\n+\tif (*tail > *head)\n+\t\tn = *tail - *head;\n+\telse if (*tail < *head)\n+\t\tn = bufp->size - *head;\n+\telse {\n+\t\tif (enqueue_count)\n+\t\t\t*enqueue_count = 0;\n+\t\treturn 0; /* buffer empty */\n+\t}\n+\n+\tif (enqueue_count && n > *enqueue_count)\n+\t\tn = *enqueue_count;\n+\n+\tnb_ops_flushed = edma_ops_enqueue_burst(adapter, &ops[*head], n);\n+\tif (enqueue_count)\n+\t\t*enqueue_count = nb_ops_flushed;\n+\n+\tbufp->count -= nb_ops_flushed;\n+\tif (!bufp->count) {\n+\t\t*head = 0;\n+\t\t*tail = 0;\n+\t\treturn 0; /* buffer empty */\n+\t}\n+\n+\t*head = (*head + nb_ops_flushed) % bufp->size;\n+\treturn 1;\n+}\n+\n+static void\n+edma_ops_buffer_flush(struct event_dma_adapter *adapter)\n+{\n+\tif (likely(adapter->ebuf.count == 0))\n+\t\treturn;\n+\n+\twhile (edma_circular_buffer_flush_to_evdev(adapter, &adapter->ebuf, NULL))\n+\t\t;\n+}\n+\n+static inline unsigned int\n+edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq)\n+{\n+\tstruct dma_vchan_info *vchan_info;\n+\tstruct dma_ops_circular_buffer *tq_buf;\n+\tstruct rte_event_dma_adapter_op *ops;\n+\tuint16_t n, nb_deq, nb_enqueued, i;\n+\tstruct dma_device_info *dev_info;\n+\tuint16_t vchan, num_vchan;\n+\tuint16_t num_dma_dev;\n+\tint16_t dma_dev_id;\n+\tuint16_t index;\n+\tbool done;\n+\tbool err;\n+\n+\tnb_deq = 0;\n+\tedma_ops_buffer_flush(adapter);\n+\n+\tnum_dma_dev = rte_dma_count_avail();\n+\tdo {\n+\t\tdone = true;\n+\n+\t\tfor (dma_dev_id = adapter->next_dmadev_id; dma_dev_id < num_dma_dev; dma_dev_id++) {\n+\t\t\tuint16_t queues = 0;\n+\t\t\tdev_info = &adapter->dma_devs[dma_dev_id];\n+\t\t\tnum_vchan = dev_info->num_vchanq;\n+\n+\t\t\tfor (vchan = dev_info->next_vchan_id; queues < num_vchan;\n+\t\t\t     vchan = (vchan + 1) % num_vchan, queues++) {\n+\n+\t\t\t\tvchan_info = &dev_info->vchanq[vchan];\n+\t\t\t\tif (unlikely(vchan_info == NULL || !vchan_info->vq_enabled))\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tn = rte_dma_completed(dma_dev_id, vchan, DMA_BATCH_SIZE,\n+\t\t\t\t\t\t&index, &err);\n+\t\t\t\tif (!n)\n+\t\t\t\t\tcontinue;\n+\n+\t\t\t\tdone = false;\n+\n+\t\t\t\ttq_buf = &dev_info->tqmap[vchan].dma_buf;\n+\n+\t\t\t\tnb_enqueued = n;\n+\t\t\t\tif (unlikely(!adapter->ebuf.count))\n+\t\t\t\t\tedma_circular_buffer_flush_to_evdev(adapter, tq_buf,\n+\t\t\t\t\t\t\t\t\t    &nb_enqueued);\n+\n+\t\t\t\tif (likely(nb_enqueued == n))\n+\t\t\t\t\tgoto check;\n+\n+\t\t\t\t/* Failed to enqueue events case */\n+\t\t\t\tfor (i = nb_enqueued; i < n; i++) {\n+\t\t\t\t\tops = tq_buf->op_buffer[tq_buf->head];\n+\t\t\t\t\tedma_circular_buffer_add(&adapter->ebuf, ops);\n+\t\t\t\t\ttq_buf->head = (tq_buf->head + 1) % tq_buf->size;\n+\t\t\t\t}\n+\n+check:\n+\t\t\t\tnb_deq += n;\n+\t\t\t\tif (nb_deq >= max_deq) {\n+\t\t\t\t\tif ((vchan + 1) == num_vchan)\n+\t\t\t\t\t\tadapter->next_dmadev_id =\n+\t\t\t\t\t\t\t\t(dma_dev_id + 1) % num_dma_dev;\n+\n+\t\t\t\t\tdev_info->next_vchan_id = (vchan + 1) % num_vchan;\n+\n+\t\t\t\t\treturn nb_deq;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tadapter->next_dmadev_id = 0;\n+\n+\t} while (done == false);\n+\n+\treturn nb_deq;\n+}\n+\n+static int\n+edma_adapter_run(struct event_dma_adapter *adapter, unsigned int max_ops)\n+{\n+\tunsigned int ops_left = max_ops;\n+\n+\twhile (ops_left > 0) {\n+\t\tunsigned int e_cnt, d_cnt;\n+\n+\t\te_cnt = edma_adapter_deq_run(adapter, ops_left);\n+\t\tops_left -= RTE_MIN(ops_left, e_cnt);\n+\n+\t\td_cnt = edma_adapter_enq_run(adapter, ops_left);\n+\t\tops_left -= RTE_MIN(ops_left, d_cnt);\n+\n+\t\tif (e_cnt == 0 && d_cnt == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (ops_left == max_ops) {\n+\t\trte_event_maintain(adapter->eventdev_id, adapter->event_port_id, 0);\n+\t\treturn -EAGAIN;\n+\t} else\n+\t\treturn 0;\n+}\n+\n+static int\n+edma_service_func(void *args)\n+{\n+\tstruct event_dma_adapter *adapter = args;\n+\tint ret;\n+\n+\tif (rte_spinlock_trylock(&adapter->lock) == 0)\n+\t\treturn 0;\n+\tret = edma_adapter_run(adapter, adapter->max_nb);\n+\trte_spinlock_unlock(&adapter->lock);\n+\n+\treturn ret;\n+}\n+\n+static int\n+edma_init_service(struct event_dma_adapter *adapter, uint8_t id)\n+{\n+\tstruct rte_event_dma_adapter_conf adapter_conf;\n+\tstruct rte_service_spec service;\n+\tuint32_t impl_rel;\n+\tint ret;\n+\n+\tif (adapter->service_initialized)\n+\t\treturn 0;\n+\n+\tmemset(&service, 0, sizeof(service));\n+\tsnprintf(service.name, DMA_ADAPTER_NAME_LEN, \"rte_event_dma_adapter_%d\", id);\n+\tservice.socket_id = adapter->socket_id;\n+\tservice.callback = edma_service_func;\n+\tservice.callback_userdata = adapter;\n+\n+\t/* Service function handles locking for queue add/del updates */\n+\tservice.capabilities = RTE_SERVICE_CAP_MT_SAFE;\n+\tret = rte_service_component_register(&service, &adapter->service_id);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"failed to register service %s err = %\" PRId32, service.name, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = adapter->conf_cb(id, adapter->eventdev_id, &adapter_conf, adapter->conf_arg);\n+\tif (ret) {\n+\t\tRTE_EDEV_LOG_ERR(\"configuration callback failed err = %\" PRId32, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tadapter->max_nb = adapter_conf.max_nb;\n+\tadapter->event_port_id = adapter_conf.event_port_id;\n+\n+\tif (rte_event_port_attr_get(adapter->eventdev_id, adapter->event_port_id,\n+\t\t\t\t    RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE, &impl_rel)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Failed to get port info for eventdev %\" PRId32,\n+\t\t\t\t adapter->eventdev_id);\n+\t\tedma_circular_buffer_free(&adapter->ebuf);\n+\t\trte_free(adapter);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tadapter->implicit_release_disabled = (uint8_t)impl_rel;\n+\tadapter->service_initialized = 1;\n+\n+\treturn ret;\n+}\n+\n static void\n edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_info *dev_info,\n \t\t\tuint16_t vchan, uint8_t add)\n@@ -391,6 +893,60 @@ edma_update_vchanq_info(struct event_dma_adapter *adapter, struct dma_device_inf\n \t}\n }\n \n+static int\n+edma_add_vchan(struct event_dma_adapter *adapter, int16_t dma_dev_id, uint16_t vchan)\n+{\n+\tstruct dma_device_info *dev_info = &adapter->dma_devs[dma_dev_id];\n+\tstruct dma_vchan_info *vchanq;\n+\tstruct dma_vchan_info *tqmap;\n+\tuint16_t nb_vchans;\n+\tuint32_t i;\n+\n+\tif (dev_info->vchanq == NULL) {\n+\t\tnb_vchans = dev_info->num_dma_dev_vchan;\n+\n+\t\tdev_info->vchanq = rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\tnb_vchans * sizeof(struct dma_vchan_info),\n+\t\t\t\t0, adapter->socket_id);\n+\t\tif (dev_info->vchanq == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tdev_info->tqmap = rte_zmalloc_socket(adapter->mem_name,\n+\t\t\t\tnb_vchans * sizeof(struct dma_vchan_info),\n+\t\t\t\t0, adapter->socket_id);\n+\t\tif (dev_info->tqmap == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tfor (i = 0; i < nb_vchans; i++) {\n+\t\t\tvchanq = &dev_info->vchanq[i];\n+\n+\t\t\tif (edma_circular_buffer_init(\"dma_dev_circular_buffer\", &vchanq->dma_buf,\n+\t\t\t\t\t\tDMA_ADAPTER_OPS_BUFFER_SIZE)) {\n+\t\t\t\tRTE_EDEV_LOG_ERR(\"Failed to get memory for dma_dev buffer\");\n+\t\t\t\trte_free(vchanq);\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\n+\t\t\ttqmap = &dev_info->tqmap[i];\n+\t\t\tif (edma_circular_buffer_init(\"dma_dev_circular_trans_buf\", &tqmap->dma_buf,\n+\t\t\t\t\t\tDMA_ADAPTER_OPS_BUFFER_SIZE)) {\n+\t\t\t\tRTE_EDEV_LOG_ERR(\n+\t\t\t\t\t\"Failed to get memory for dma_dev transaction buffer\");\n+\t\t\t\trte_free(tqmap);\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (vchan == RTE_DMA_ALL_VCHAN) {\n+\t\tfor (i = 0; i < dev_info->num_dma_dev_vchan; i++)\n+\t\t\tedma_update_vchanq_info(adapter, dev_info, i, 1);\n+\t} else\n+\t\tedma_update_vchanq_info(adapter, dev_info, vchan, 1);\n+\n+\treturn 0;\n+}\n+\n int\n rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,\n \t\t\t\tconst struct rte_event *event)\n@@ -470,6 +1026,38 @@ rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dma_dev_id, uint16_t vchan,\n \t\t\tedma_update_vchanq_info(adapter, &adapter->dma_devs[dma_dev_id], vchan, 1);\n \t}\n \n+\t/* In case HW cap is RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, or SW adapter, initiate\n+\t * services so the application can choose which ever way it wants to use the adapter.\n+\t *\n+\t * Case 1: RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW. Application may wants to use one\n+\t * of below two modes\n+\t *\n+\t * a. OP_FORWARD mode -> HW Dequeue + SW enqueue\n+\t * b. OP_NEW mode -> HW Dequeue\n+\t *\n+\t * Case 2: No HW caps, use SW adapter\n+\t *\n+\t * a. OP_FORWARD mode -> SW enqueue & dequeue\n+\t * b. OP_NEW mode -> SW Dequeue\n+\t */\n+\tif ((cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&\n+\t     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t     adapter->mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) ||\n+\t    (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&\n+\t     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&\n+\t     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))) {\n+\t\trte_spinlock_lock(&adapter->lock);\n+\t\tret = edma_init_service(adapter, id);\n+\t\tif (ret == 0)\n+\t\t\tret = edma_add_vchan(adapter, dma_dev_id, vchan);\n+\t\trte_spinlock_unlock(&adapter->lock);\n+\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\trte_service_component_runstate_set(adapter->service_id, 1);\n+\t}\n+\n \treturn 0;\n }\n \n@@ -533,6 +1121,7 @@ rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dma_dev_id, uint16_t vchan)\n \t\t}\n \n \t\trte_spinlock_unlock(&adapter->lock);\n+\t\trte_service_component_runstate_set(adapter->service_id, adapter->nb_vchanq);\n \t}\n \n \treturn ret;\n",
    "prefixes": [
        "v3",
        "05/12"
    ]
}