get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/139445/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 139445,
    "url": "http://patches.dpdk.org/api/patches/139445/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240417082645.4259-1-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240417082645.4259-1-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240417082645.4259-1-pbhagavatula@marvell.com",
    "date": "2024-04-17T08:26:44",
    "name": "[v3,1/2] eventdev/dma: reorganize event DMA ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "9865383985b3c3b689e90daee6f0486f02aad7cc",
    "submitter": {
        "id": 1183,
        "url": "http://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240417082645.4259-1-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 31767,
            "url": "http://patches.dpdk.org/api/series/31767/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31767",
            "date": "2024-04-17T08:26:44",
            "name": "[v3,1/2] eventdev/dma: reorganize event DMA ops",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/31767/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/139445/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/139445/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BD52743E90;\n\tWed, 17 Apr 2024 10:26:55 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2C5FA402AE;\n\tWed, 17 Apr 2024 10:26:55 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id BBAB24029E\n for <dev@dpdk.org>; Wed, 17 Apr 2024 10:26:52 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id\n 43GK1tD3016891 for <dev@dpdk.org>; Wed, 17 Apr 2024 01:26:52 -0700",
            "from dc6wp-exch02.marvell.com ([4.21.29.225])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3xhfdn5hk3-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Wed, 17 Apr 2024 01:26:51 -0700 (PDT)",
            "from DC6WP-EXCH02.marvell.com (10.76.176.209) by\n DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.2.1544.4; Wed, 17 Apr 2024 01:26:50 -0700",
            "from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com\n (10.76.176.209) with Microsoft SMTP Server id 15.2.1544.4 via Frontend\n Transport; Wed, 17 Apr 2024 01:26:50 -0700",
            "from MININT-80QBFE8.corp.innovium.com (MININT-80QBFE8.marvell.com\n [10.28.164.106])\n by maili.marvell.com (Postfix) with ESMTP id 974F83F707E;\n Wed, 17 Apr 2024 01:26:48 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=\n from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding:content-type; s=\n pfpt0220; bh=4O1OWg8q26TsTJGF5YKUbp7UdUL/J/HekLSa/Hrb3lA=; b=b8v\n fNaBnu4GUecPkGCoX4B949EwaOFgj5wDIjJVnsCO0Fs7JpM8k5y7rG5Z6Vn00zJQ\n jdf+IS4ZAxtuNUv5cVobrWafr3Xr/vINYHoDmy0sOFrBCHOEFjCFdXmHDgsBSIgE\n ZTvKCl4nlA6F2wv9gkX8r/xvuIABCL+pHDhakg4/qQ9Sk03Qrd5zttZcrcsOFyro\n 3M2oewDeZU8FCbLMbM53YViIPtuPfzPn6vKGObHh2pBnmbvKPm0tUueDtq+hr87e\n boVydcZ/3AWYK3CT0oj3Hfc9x4gXs/ncuw7jgJ6WUiNbSZQUYQyzb+mxJlqJrmxU\n heFEZ5lhwu9wJM942gw==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, Amit Prakash Shukla <amitprakashs@marvell.com>,\n Vamsi Attunuru <vattunuru@marvell.com>",
        "CC": "<dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>",
        "Subject": "[PATCH v3 1/2] eventdev/dma: reorganize event DMA ops",
        "Date": "Wed, 17 Apr 2024 13:56:44 +0530",
        "Message-ID": "<20240417082645.4259-1-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20240417055830.1935-1-pbhagavatula@marvell.com>",
        "References": "<20240417055830.1935-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "YGZanvBju73L05i-KNXuPOydhjrYxX1Z",
        "X-Proofpoint-ORIG-GUID": "YGZanvBju73L05i-KNXuPOydhjrYxX1Z",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26\n definitions=2024-04-17_07,2024-04-16_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nRe-organize event DMA ops structure to allow holding\nsource and destination pointers without the need for\nadditional memory, the mempool allocating memory for\nrte_event_dma_adapter_ops can size the structure to\naccommodate all the needed source and destination\npointers.\n\nAdd multiple words for holding user metadata, adapter\nimplementation specific metadata and event metadata.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n v3 Changes:\n - Fix stdatomic compilation.\n v2 Changes:\n - Fix 32bit compilation\n\n app/test-eventdev/test_perf_common.c        | 26 ++++--------\n app/test/test_event_dma_adapter.c           | 20 +++------\n doc/guides/prog_guide/event_dma_adapter.rst |  2 +-\n drivers/dma/cnxk/cnxk_dmadev_fp.c           | 39 +++++++----------\n lib/eventdev/rte_event_dma_adapter.c        | 27 ++++--------\n lib/eventdev/rte_event_dma_adapter.h        | 46 +++++++++++++++------\n 6 files changed, 72 insertions(+), 88 deletions(-)\n\n--\n2.25.1",
    "diff": "diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c\nindex 93e6132de8..db0f9c1f3b 100644\n--- a/app/test-eventdev/test_perf_common.c\n+++ b/app/test-eventdev/test_perf_common.c\n@@ -1503,7 +1503,6 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,\n \t\tprod = 0;\n \t\tfor (; port < perf_nb_event_ports(opt); port++) {\n \t\t\tstruct prod_data *p = &t->prod[port];\n-\t\t\tstruct rte_event *response_info;\n \t\t\tuint32_t flow_id;\n\n \t\t\tp->dev_id = opt->dev_id;\n@@ -1523,13 +1522,10 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,\n \t\t\tfor (flow_id = 0; flow_id < t->nb_flows; flow_id++) {\n \t\t\t\trte_mempool_get(t->da_op_pool, (void **)&op);\n\n-\t\t\t\top->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n-\t\t\t\top->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n-\n-\t\t\t\top->src_seg->addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));\n-\t\t\t\top->dst_seg->addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));\n-\t\t\t\top->src_seg->length = 1024;\n-\t\t\t\top->dst_seg->length = 1024;\n+\t\t\t\top->src_dst_seg[0].addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));\n+\t\t\t\top->src_dst_seg[1].addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));\n+\t\t\t\top->src_dst_seg[0].length = 1024;\n+\t\t\t\top->src_dst_seg[1].length = 1024;\n \t\t\t\top->nb_src = 1;\n \t\t\t\top->nb_dst = 1;\n \t\t\t\top->flags = RTE_DMA_OP_FLAG_SUBMIT;\n@@ -1537,12 +1533,6 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,\n \t\t\t\top->dma_dev_id = dma_dev_id;\n \t\t\t\top->vchan = vchan_id;\n\n-\t\t\t\tresponse_info = (struct rte_event *)((uint8_t *)op +\n-\t\t\t\t\t\t sizeof(struct rte_event_dma_adapter_op));\n-\t\t\t\tresponse_info->queue_id = p->queue_id;\n-\t\t\t\tresponse_info->sched_type = RTE_SCHED_TYPE_ATOMIC;\n-\t\t\t\tresponse_info->flow_id = flow_id;\n-\n \t\t\t\tp->da.dma_op[flow_id] = op;\n \t\t\t}\n\n@@ -2036,7 +2026,7 @@ perf_dmadev_setup(struct evt_test *test, struct evt_options *opt)\n \t\treturn -ENODEV;\n \t}\n\n-\telt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event);\n+\telt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2);\n \tt->da_op_pool = rte_mempool_create(\"dma_op_pool\", opt->pool_sz, elt_size, 256,\n \t\t\t\t\t   0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);\n \tif (t->da_op_pool == NULL) {\n@@ -2085,10 +2075,8 @@ perf_dmadev_destroy(struct evt_test *test, struct evt_options *opt)\n \t\tfor (flow_id = 0; flow_id < t->nb_flows; flow_id++) {\n \t\t\top = p->da.dma_op[flow_id];\n\n-\t\t\trte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->src_seg->addr);\n-\t\t\trte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->dst_seg->addr);\n-\t\t\trte_free(op->src_seg);\n-\t\t\trte_free(op->dst_seg);\n+\t\t\trte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->src_dst_seg[0].addr);\n+\t\t\trte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->src_dst_seg[1].addr);\n \t\t\trte_mempool_put(op->op_mp, op);\n \t\t}\n\ndiff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c\nindex 35b417b69f..d9dff4ff7d 100644\n--- a/app/test/test_event_dma_adapter.c\n+++ b/app/test/test_event_dma_adapter.c\n@@ -235,7 +235,6 @@ test_op_forward_mode(void)\n \tstruct rte_mbuf *dst_mbuf[TEST_MAX_OP];\n \tstruct rte_event_dma_adapter_op *op;\n \tstruct rte_event ev[TEST_MAX_OP];\n-\tstruct rte_event response_info;\n \tint ret, i;\n\n \tret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);\n@@ -253,14 +252,11 @@ test_op_forward_mode(void)\n \t\trte_mempool_get(params.op_mpool, (void **)&op);\n \t\tTEST_ASSERT_NOT_NULL(op, \"Failed to allocate dma operation struct\\n\");\n\n-\t\top->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n-\t\top->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);\n-\n \t\t/* Update Op */\n-\t\top->src_seg->addr = rte_pktmbuf_iova(src_mbuf[i]);\n-\t\top->dst_seg->addr = rte_pktmbuf_iova(dst_mbuf[i]);\n-\t\top->src_seg->length = PACKET_LENGTH;\n-\t\top->dst_seg->length = PACKET_LENGTH;\n+\t\top->src_dst_seg[0].addr = rte_pktmbuf_iova(src_mbuf[i]);\n+\t\top->src_dst_seg[1].addr = rte_pktmbuf_iova(dst_mbuf[i]);\n+\t\top->src_dst_seg[0].length = PACKET_LENGTH;\n+\t\top->src_dst_seg[1].length = PACKET_LENGTH;\n \t\top->nb_src = 1;\n \t\top->nb_dst = 1;\n \t\top->flags = RTE_DMA_OP_FLAG_SUBMIT;\n@@ -268,10 +264,6 @@ test_op_forward_mode(void)\n \t\top->dma_dev_id = TEST_DMA_DEV_ID;\n \t\top->vchan = TEST_DMA_VCHAN_ID;\n\n-\t\tresponse_info.event = dma_response_info.event;\n-\t\trte_memcpy((uint8_t *)op + sizeof(struct rte_event_dma_adapter_op), &response_info,\n-\t\t\t   sizeof(struct rte_event));\n-\n \t\t/* Fill in event info and update event_ptr with rte_event_dma_adapter_op */\n \t\tmemset(&ev[i], 0, sizeof(struct rte_event));\n \t\tev[i].event = 0;\n@@ -294,8 +286,6 @@ test_op_forward_mode(void)\n\n \t\tTEST_ASSERT_EQUAL(ret, 0, \"Data mismatch for dma adapter\\n\");\n\n-\t\trte_free(op->src_seg);\n-\t\trte_free(op->dst_seg);\n \t\trte_mempool_put(op->op_mp, op);\n \t}\n\n@@ -400,7 +390,7 @@ configure_dmadev(void)\n \t\t\t\t\t\t       rte_socket_id());\n \tRTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, \"Can't create DMA_DST_MBUFPOOL\\n\");\n\n-\telt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event);\n+\telt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2);\n \tparams.op_mpool = rte_mempool_create(\"EVENT_DMA_OP_POOL\", DMA_OP_POOL_SIZE, elt_size, 0,\n \t\t\t\t\t     0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);\n \tRTE_TEST_ASSERT_NOT_NULL(params.op_mpool, \"Can't create DMA_OP_POOL\\n\");\ndiff --git a/doc/guides/prog_guide/event_dma_adapter.rst b/doc/guides/prog_guide/event_dma_adapter.rst\nindex 3443b6a803..1fb9b0a07b 100644\n--- a/doc/guides/prog_guide/event_dma_adapter.rst\n+++ b/doc/guides/prog_guide/event_dma_adapter.rst\n@@ -144,7 +144,7 @@ on which it enqueues events towards the DMA adapter using ``rte_event_enqueue_bu\n    uint32_t cap;\n    int ret;\n\n-   /* Fill in event info and update event_ptr with rte_dma_op */\n+   /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */\n    memset(&ev, 0, sizeof(ev));\n    .\n    .\ndiff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c\nindex f6562b603e..9f7f9b2eed 100644\n--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c\n+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c\n@@ -457,7 +457,6 @@ cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n \tstruct cnxk_dpi_compl_s *comp_ptr;\n \tstruct cnxk_dpi_conf *dpi_conf;\n \tstruct cnxk_dpi_vf_s *dpivf;\n-\tstruct rte_event *rsp_info;\n \tstruct cn10k_sso_hws *work;\n \tuint16_t nb_src, nb_dst;\n \trte_mcslock_t mcs_lock_me;\n@@ -469,9 +468,7 @@ cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n\n \tfor (count = 0; count < nb_events; count++) {\n \t\top = ev[count].event_ptr;\n-\t\trsp_info = (struct rte_event *)((uint8_t *)op +\n-\t\t\t     sizeof(struct rte_event_dma_adapter_op));\n-\t\tdpivf =\trte_dma_fp_objs[op->dma_dev_id].dev_private;\n+\t\tdpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;\n \t\tdpi_conf = &dpivf->conf[op->vchan];\n\n \t\tif (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))\n@@ -488,15 +485,14 @@ cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n \t\thdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);\n \t\thdr[0] |= (nb_dst << 6) | nb_src;\n \t\thdr[1] = ((uint64_t)comp_ptr);\n-\t\thdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);\n+\t\thdr[2] = cnxk_dma_adapter_format_event(ev[count].event);\n\n-\t\tsrc = &op->src_seg[0];\n-\t\tdst = &op->dst_seg[0];\n+\t\tsrc = &op->src_dst_seg[0];\n+\t\tdst = &op->src_dst_seg[op->nb_src];\n\n \t\tif (CNXK_TAG_IS_HEAD(work->gw_rdata) ||\n \t\t    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&\n-\t\t    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==\n-\t\t\t    RTE_SCHED_TYPE_ORDERED))\n+\t\t     (ev[count].sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED))\n \t\t\troc_sso_hws_head_wait(work->base);\n\n \t\trte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);\n@@ -566,12 +562,12 @@ cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_event\n \t\t * For all other cases, src pointers are first pointers.\n \t\t */\n \t\tif (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {\n-\t\t\tfptr = &op->dst_seg[0];\n-\t\t\tlptr = &op->src_seg[0];\n+\t\t\tfptr = &op->src_dst_seg[nb_src];\n+\t\t\tlptr = &op->src_dst_seg[0];\n \t\t\tRTE_SWAP(nb_src, nb_dst);\n \t\t} else {\n-\t\t\tfptr = &op->src_seg[0];\n-\t\t\tlptr = &op->dst_seg[0];\n+\t\t\tfptr = &op->src_dst_seg[0];\n+\t\t\tlptr = &op->src_dst_seg[nb_src];\n \t\t}\n\n \t\thdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;\n@@ -612,7 +608,6 @@ cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n \tstruct cnxk_dpi_compl_s *comp_ptr;\n \tstruct cnxk_dpi_conf *dpi_conf;\n \tstruct cnxk_dpi_vf_s *dpivf;\n-\tstruct rte_event *rsp_info;\n \tstruct cn9k_sso_hws *work;\n \tuint16_t nb_src, nb_dst;\n \trte_mcslock_t mcs_lock_me;\n@@ -624,9 +619,7 @@ cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n\n \tfor (count = 0; count < nb_events; count++) {\n \t\top = ev[count].event_ptr;\n-\t\trsp_info = (struct rte_event *)((uint8_t *)op +\n-\t\t\t    sizeof(struct rte_event_dma_adapter_op));\n-\t\tdpivf =\trte_dma_fp_objs[op->dma_dev_id].dev_private;\n+\t\tdpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;\n \t\tdpi_conf = &dpivf->conf[op->vchan];\n\n \t\tif (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))\n@@ -647,18 +640,18 @@ cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)\n \t\t * For all other cases, src pointers are first pointers.\n \t\t */\n \t\tif (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {\n-\t\t\tfptr = &op->dst_seg[0];\n-\t\t\tlptr = &op->src_seg[0];\n+\t\t\tfptr = &op->src_dst_seg[nb_src];\n+\t\t\tlptr = &op->src_dst_seg[0];\n \t\t\tRTE_SWAP(nb_src, nb_dst);\n \t\t} else {\n-\t\t\tfptr = &op->src_seg[0];\n-\t\t\tlptr = &op->dst_seg[0];\n+\t\t\tfptr = &op->src_dst_seg[0];\n+\t\t\tlptr = &op->src_dst_seg[nb_src];\n \t\t}\n\n \t\thdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;\n-\t\thdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);\n+\t\thdr[0] |= cnxk_dma_adapter_format_event(ev[count].event);\n\n-\t\tif ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)\n+\t\tif ((ev[count].sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)\n \t\t\troc_sso_hws_head_wait(work->base);\n\n \t\trte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);\ndiff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c\nindex 24dff556db..e52ef46a1b 100644\n--- a/lib/eventdev/rte_event_dma_adapter.c\n+++ b/lib/eventdev/rte_event_dma_adapter.c\n@@ -236,9 +236,9 @@ edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,\n \t\t\t\t      uint16_t vchan, uint16_t *nb_ops_flushed)\n {\n \tstruct rte_event_dma_adapter_op *op;\n-\tstruct dma_vchan_info *tq;\n \tuint16_t *head = &bufp->head;\n \tuint16_t *tail = &bufp->tail;\n+\tstruct dma_vchan_info *tq;\n \tuint16_t n;\n \tuint16_t i;\n \tint ret;\n@@ -257,11 +257,13 @@ edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,\n \tfor (i = 0; i < n; i++)\t{\n \t\top = bufp->op_buffer[*head];\n \t\tif (op->nb_src == 1 && op->nb_dst == 1)\n-\t\t\tret = rte_dma_copy(dma_dev_id, vchan, op->src_seg->addr, op->dst_seg->addr,\n-\t\t\t\t\t   op->src_seg->length, op->flags);\n+\t\t\tret = rte_dma_copy(dma_dev_id, vchan, op->src_dst_seg[0].addr,\n+\t\t\t\t\t   op->src_dst_seg[1].addr, op->src_dst_seg[0].length,\n+\t\t\t\t\t   op->flags);\n \t\telse\n-\t\t\tret = rte_dma_copy_sg(dma_dev_id, vchan, op->src_seg, op->dst_seg,\n-\t\t\t\t\t      op->nb_src, op->nb_dst, op->flags);\n+\t\t\tret = rte_dma_copy_sg(dma_dev_id, vchan, &op->src_dst_seg[0],\n+\t\t\t\t\t      &op->src_dst_seg[op->nb_src], op->nb_src, op->nb_dst,\n+\t\t\t\t\t      op->flags);\n \t\tif (ret < 0)\n \t\t\tbreak;\n\n@@ -511,8 +513,7 @@ edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, uns\n \t\tif (dma_op == NULL)\n \t\t\tcontinue;\n\n-\t\t/* Expected to have response info appended to dma_op. */\n-\n+\t\tdma_op->impl_opaque[0] = ev[i].event;\n \t\tdma_dev_id = dma_op->dma_dev_id;\n \t\tvchan = dma_op->vchan;\n \t\tvchan_qinfo = &adapter->dma_devs[dma_dev_id].vchanq[vchan];\n@@ -647,7 +648,6 @@ edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_a\n \tuint8_t event_port_id = adapter->event_port_id;\n \tuint8_t event_dev_id = adapter->eventdev_id;\n \tstruct rte_event events[DMA_BATCH_SIZE];\n-\tstruct rte_event *response_info;\n \tuint16_t nb_enqueued, nb_ev;\n \tuint8_t retry;\n \tuint8_t i;\n@@ -659,16 +659,7 @@ edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_a\n \tfor (i = 0; i < num; i++) {\n \t\tstruct rte_event *ev = &events[nb_ev++];\n\n-\t\t/* Expected to have response info appended to dma_op. */\n-\t\tresponse_info = (struct rte_event *)((uint8_t *)ops[i] +\n-\t\t\t\t\t\t\t  sizeof(struct rte_event_dma_adapter_op));\n-\t\tif (unlikely(response_info == NULL)) {\n-\t\t\tif (ops[i] != NULL && ops[i]->op_mp != NULL)\n-\t\t\t\trte_mempool_put(ops[i]->op_mp, ops[i]);\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\trte_memcpy(ev, response_info, sizeof(struct rte_event));\n+\t\tev->event = ops[i]->impl_opaque[0];\n \t\tev->event_ptr = ops[i];\n \t\tev->event_type = RTE_EVENT_TYPE_DMADEV;\n \t\tif (adapter->implicit_release_disabled)\ndiff --git a/lib/eventdev/rte_event_dma_adapter.h b/lib/eventdev/rte_event_dma_adapter.h\nindex e924ab673d..048ddba3f3 100644\n--- a/lib/eventdev/rte_event_dma_adapter.h\n+++ b/lib/eventdev/rte_event_dma_adapter.h\n@@ -157,24 +157,46 @@ extern \"C\" {\n  * instance.\n  */\n struct rte_event_dma_adapter_op {\n-\tstruct rte_dma_sge *src_seg;\n-\t/**< Source segments. */\n-\tstruct rte_dma_sge *dst_seg;\n-\t/**< Destination segments. */\n-\tuint16_t nb_src;\n-\t/**< Number of source segments. */\n-\tuint16_t nb_dst;\n-\t/**< Number of destination segments. */\n \tuint64_t flags;\n \t/**< Flags related to the operation.\n \t * @see RTE_DMA_OP_FLAG_*\n \t */\n-\tint16_t dma_dev_id;\n-\t/**< DMA device ID to be used */\n-\tuint16_t vchan;\n-\t/**< DMA vchan ID to be used */\n \tstruct rte_mempool *op_mp;\n \t/**< Mempool from which op is allocated. */\n+\tenum rte_dma_status_code status;\n+\t/**< Status code for this operation. */\n+\tuint32_t rsvd;\n+\t/**< Reserved for future use. */\n+\tuint64_t impl_opaque[2];\n+\t/**< Implementation-specific opaque data.\n+\t * An dma device implementation use this field to hold\n+\t * implementation specific values to share between dequeue and enqueue\n+\t * operations.\n+\t * The application should not modify this field.\n+\t */\n+\tuint64_t user_meta;\n+\t/**<  Memory to store user specific metadata.\n+\t * The dma device implementation should not modify this area.\n+\t */\n+\tuint64_t event_meta;\n+\t/**< Event metadata that defines event attributes when used in OP_NEW mode.\n+\t * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW\n+\t * @see struct rte_event::event\n+\t */\n+\tint16_t dma_dev_id;\n+\t/**< DMA device ID to be used with OP_FORWARD mode.\n+\t * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD\n+\t */\n+\tuint16_t vchan;\n+\t/**< DMA vchan ID to be used with OP_FORWARD mode\n+\t * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD\n+\t */\n+\tuint16_t nb_src;\n+\t/**< Number of source segments. */\n+\tuint16_t nb_dst;\n+\t/**< Number of destination segments. */\n+\tstruct rte_dma_sge src_dst_seg[0];\n+\t/**< Source and destination segments. */\n };\n\n /**\n",
    "prefixes": [
        "v3",
        "1/2"
    ]
}